python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
// Dong Aisheng <[email protected]>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
* https://github.com/linux-can/can-doc/tree/master/m_can
*/
#include <linux/bitfield.h>
#include <linux/can/dev.h>
#include <linux/ethtool.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "m_can.h"
/* registers definition */
enum m_can_reg {
M_CAN_CREL = 0x0,
M_CAN_ENDN = 0x4,
M_CAN_CUST = 0x8,
M_CAN_DBTP = 0xc,
M_CAN_TEST = 0x10,
M_CAN_RWD = 0x14,
M_CAN_CCCR = 0x18,
M_CAN_NBTP = 0x1c,
M_CAN_TSCC = 0x20,
M_CAN_TSCV = 0x24,
M_CAN_TOCC = 0x28,
M_CAN_TOCV = 0x2c,
M_CAN_ECR = 0x40,
M_CAN_PSR = 0x44,
/* TDCR Register only available for version >=3.1.x */
M_CAN_TDCR = 0x48,
M_CAN_IR = 0x50,
M_CAN_IE = 0x54,
M_CAN_ILS = 0x58,
M_CAN_ILE = 0x5c,
M_CAN_GFC = 0x80,
M_CAN_SIDFC = 0x84,
M_CAN_XIDFC = 0x88,
M_CAN_XIDAM = 0x90,
M_CAN_HPMS = 0x94,
M_CAN_NDAT1 = 0x98,
M_CAN_NDAT2 = 0x9c,
M_CAN_RXF0C = 0xa0,
M_CAN_RXF0S = 0xa4,
M_CAN_RXF0A = 0xa8,
M_CAN_RXBC = 0xac,
M_CAN_RXF1C = 0xb0,
M_CAN_RXF1S = 0xb4,
M_CAN_RXF1A = 0xb8,
M_CAN_RXESC = 0xbc,
M_CAN_TXBC = 0xc0,
M_CAN_TXFQS = 0xc4,
M_CAN_TXESC = 0xc8,
M_CAN_TXBRP = 0xcc,
M_CAN_TXBAR = 0xd0,
M_CAN_TXBCR = 0xd4,
M_CAN_TXBTO = 0xd8,
M_CAN_TXBCF = 0xdc,
M_CAN_TXBTIE = 0xe0,
M_CAN_TXBCIE = 0xe4,
M_CAN_TXEFC = 0xf0,
M_CAN_TXEFS = 0xf4,
M_CAN_TXEFA = 0xf8,
};
/* message ram configuration data length */
#define MRAM_CFG_LEN 8
/* Core Release Register (CREL) */
#define CREL_REL_MASK GENMASK(31, 28)
#define CREL_STEP_MASK GENMASK(27, 24)
#define CREL_SUBSTEP_MASK GENMASK(23, 20)
/* Data Bit Timing & Prescaler Register (DBTP) */
#define DBTP_TDC BIT(23)
#define DBTP_DBRP_MASK GENMASK(20, 16)
#define DBTP_DTSEG1_MASK GENMASK(12, 8)
#define DBTP_DTSEG2_MASK GENMASK(7, 4)
#define DBTP_DSJW_MASK GENMASK(3, 0)
/* Transmitter Delay Compensation Register (TDCR) */
#define TDCR_TDCO_MASK GENMASK(14, 8)
#define TDCR_TDCF_MASK GENMASK(6, 0)
/* Test Register (TEST) */
#define TEST_LBCK BIT(4)
/* CC Control Register (CCCR) */
#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
#define CCCR_DAR BIT(6)
#define CCCR_MON BIT(5)
#define CCCR_CSR BIT(4)
#define CCCR_CSA BIT(3)
#define CCCR_ASM BIT(2)
#define CCCR_CCE BIT(1)
#define CCCR_INIT BIT(0)
/* for version 3.0.x */
#define CCCR_CMR_MASK GENMASK(11, 10)
#define CCCR_CMR_CANFD 0x1
#define CCCR_CMR_CANFD_BRS 0x2
#define CCCR_CMR_CAN 0x3
#define CCCR_CME_MASK GENMASK(9, 8)
#define CCCR_CME_CAN 0
#define CCCR_CME_CANFD 0x1
#define CCCR_CME_CANFD_BRS 0x2
/* for version >=3.1.x */
#define CCCR_EFBI BIT(13)
#define CCCR_PXHD BIT(12)
#define CCCR_BRSE BIT(9)
#define CCCR_FDOE BIT(8)
/* for version >=3.2.x */
#define CCCR_NISO BIT(15)
/* for version >=3.3.x */
#define CCCR_WMM BIT(11)
#define CCCR_UTSU BIT(10)
/* Nominal Bit Timing & Prescaler Register (NBTP) */
#define NBTP_NSJW_MASK GENMASK(31, 25)
#define NBTP_NBRP_MASK GENMASK(24, 16)
#define NBTP_NTSEG1_MASK GENMASK(15, 8)
#define NBTP_NTSEG2_MASK GENMASK(6, 0)
/* Timestamp Counter Configuration Register (TSCC) */
#define TSCC_TCP_MASK GENMASK(19, 16)
#define TSCC_TSS_MASK GENMASK(1, 0)
#define TSCC_TSS_DISABLE 0x0
#define TSCC_TSS_INTERNAL 0x1
#define TSCC_TSS_EXTERNAL 0x2
/* Timestamp Counter Value Register (TSCV) */
#define TSCV_TSC_MASK GENMASK(15, 0)
/* Error Counter Register (ECR) */
#define ECR_RP BIT(15)
#define ECR_REC_MASK GENMASK(14, 8)
#define ECR_TEC_MASK GENMASK(7, 0)
/* Protocol Status Register (PSR) */
#define PSR_BO BIT(7)
#define PSR_EW BIT(6)
#define PSR_EP BIT(5)
#define PSR_LEC_MASK GENMASK(2, 0)
#define PSR_DLEC_MASK GENMASK(10, 8)
/* Interrupt Register (IR) */
#define IR_ALL_INT 0xffffffff
/* Renamed bits for versions > 3.1.x */
#define IR_ARA BIT(29)
#define IR_PED BIT(28)
#define IR_PEA BIT(27)
/* Bits for version 3.0.x */
#define IR_STE BIT(31)
#define IR_FOE BIT(30)
#define IR_ACKE BIT(29)
#define IR_BE BIT(28)
#define IR_CRCE BIT(27)
#define IR_WDI BIT(26)
#define IR_BO BIT(25)
#define IR_EW BIT(24)
#define IR_EP BIT(23)
#define IR_ELO BIT(22)
#define IR_BEU BIT(21)
#define IR_BEC BIT(20)
#define IR_DRX BIT(19)
#define IR_TOO BIT(18)
#define IR_MRAF BIT(17)
#define IR_TSW BIT(16)
#define IR_TEFL BIT(15)
#define IR_TEFF BIT(14)
#define IR_TEFW BIT(13)
#define IR_TEFN BIT(12)
#define IR_TFE BIT(11)
#define IR_TCF BIT(10)
#define IR_TC BIT(9)
#define IR_HPM BIT(8)
#define IR_RF1L BIT(7)
#define IR_RF1F BIT(6)
#define IR_RF1W BIT(5)
#define IR_RF1N BIT(4)
#define IR_RF0L BIT(3)
#define IR_RF0F BIT(2)
#define IR_RF0W BIT(1)
#define IR_RF0N BIT(0)
#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
/* Interrupts for version 3.0.x */
#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
/* Interrupt Line Select (ILS) */
#define ILS_ALL_INT0 0x0
#define ILS_ALL_INT1 0xFFFFFFFF
/* Interrupt Line Enable (ILE) */
#define ILE_EINT1 BIT(1)
#define ILE_EINT0 BIT(0)
/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
#define RXFC_FWM_MASK GENMASK(30, 24)
#define RXFC_FS_MASK GENMASK(22, 16)
/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
#define RXFS_RFL BIT(25)
#define RXFS_FF BIT(24)
#define RXFS_FPI_MASK GENMASK(21, 16)
#define RXFS_FGI_MASK GENMASK(13, 8)
#define RXFS_FFL_MASK GENMASK(6, 0)
/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
#define RXESC_RBDS_MASK GENMASK(10, 8)
#define RXESC_F1DS_MASK GENMASK(6, 4)
#define RXESC_F0DS_MASK GENMASK(2, 0)
#define RXESC_64B 0x7
/* Tx Buffer Configuration (TXBC) */
#define TXBC_TFQS_MASK GENMASK(29, 24)
#define TXBC_NDTB_MASK GENMASK(21, 16)
/* Tx FIFO/Queue Status (TXFQS) */
#define TXFQS_TFQF BIT(21)
#define TXFQS_TFQPI_MASK GENMASK(20, 16)
#define TXFQS_TFGI_MASK GENMASK(12, 8)
#define TXFQS_TFFL_MASK GENMASK(5, 0)
/* Tx Buffer Element Size Configuration (TXESC) */
#define TXESC_TBDS_MASK GENMASK(2, 0)
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
#define TXEFS_TEFL BIT(25)
#define TXEFS_EFF BIT(24)
#define TXEFS_EFGI_MASK GENMASK(12, 8)
#define TXEFS_EFFL_MASK GENMASK(5, 0)
/* Tx Event FIFO Acknowledge (TXEFA) */
#define TXEFA_EFAI_MASK GENMASK(4, 0)
/* Message RAM Configuration (in bytes) */
#define SIDF_ELEMENT_SIZE 4
#define XIDF_ELEMENT_SIZE 8
#define RXF0_ELEMENT_SIZE 72
#define RXF1_ELEMENT_SIZE 72
#define RXB_ELEMENT_SIZE 72
#define TXE_ELEMENT_SIZE 8
#define TXB_ELEMENT_SIZE 72
/* Message RAM Elements */
#define M_CAN_FIFO_ID 0x0
#define M_CAN_FIFO_DLC 0x4
#define M_CAN_FIFO_DATA 0x8
/* Rx Buffer Element */
/* R0 */
#define RX_BUF_ESI BIT(31)
#define RX_BUF_XTD BIT(30)
#define RX_BUF_RTR BIT(29)
/* R1 */
#define RX_BUF_ANMF BIT(31)
#define RX_BUF_FDF BIT(21)
#define RX_BUF_BRS BIT(20)
#define RX_BUF_RXTS_MASK GENMASK(15, 0)
/* Tx Buffer Element */
/* T0 */
#define TX_BUF_ESI BIT(31)
#define TX_BUF_XTD BIT(30)
#define TX_BUF_RTR BIT(29)
/* T1 */
#define TX_BUF_EFC BIT(23)
#define TX_BUF_FDF BIT(21)
#define TX_BUF_BRS BIT(20)
#define TX_BUF_MM_MASK GENMASK(31, 24)
#define TX_BUF_DLC_MASK GENMASK(19, 16)
/* Tx event FIFO Element */
/* E1 */
#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
/* Hrtimer polling interval */
#define HRTIMER_POLL_INTERVAL_MS 1
/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
* and we can save a (potentially slow) bus round trip by combining
* reads and writes to them.
*/
struct id_and_dlc {
u32 id;
u32 dlc;
};
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
}
static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
u32 val)
{
cdev->ops->write_reg(cdev, reg, val);
}
static int
m_can_fifo_read(struct m_can_classdev *cdev,
u32 fgi, unsigned int offset, void *val, size_t val_count)
{
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
if (val_count == 0)
return 0;
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
static int
m_can_fifo_write(struct m_can_classdev *cdev,
u32 fpi, unsigned int offset, const void *val, size_t val_count)
{
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
if (val_count == 0)
return 0;
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev,
u32 fpi, u32 val)
{
return cdev->ops->write_fifo(cdev, fpi, &val, 1);
}
static int
m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
{
u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
offset;
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
static inline bool _m_can_tx_fifo_full(u32 txfqs)
{
return !!(txfqs & TXFQS_TFQF);
}
static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
{
return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
}
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
u32 cccr = m_can_read(cdev, M_CAN_CCCR);
u32 timeout = 10;
u32 val = 0;
/* Clear the Clock stop request if it was set */
if (cccr & CCCR_CSR)
cccr &= ~CCCR_CSR;
if (enable) {
/* enable m_can configuration */
m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
udelay(5);
/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
} else {
m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
}
/* there's a delay for module initialization */
if (enable)
val = CCCR_INIT | CCCR_CCE;
while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
if (timeout == 0) {
netdev_warn(cdev->net, "Failed to init module\n");
return;
}
timeout--;
udelay(1);
}
}
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
m_can_write(cdev, M_CAN_ILE, 0x0);
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
* width.
*/
static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
{
u32 tscv;
u32 tsc;
tscv = m_can_read(cdev, M_CAN_TSCV);
tsc = FIELD_GET(TSCV_TSC_MASK, tscv);
return (tsc << 16);
}
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
if (cdev->tx_skb) {
int putidx = 0;
net->stats.tx_errors++;
if (cdev->version > 30)
putidx = FIELD_GET(TXFQS_TFQPI_MASK,
m_can_read(cdev, M_CAN_TXFQS));
can_free_echo_skb(cdev->net, putidx, NULL);
cdev->tx_skb = NULL;
}
}
/* For peripherals, pass skb to rx-offload, which will push skb from
* napi. For non-peripherals, RX is done in napi already, so push
* directly. timestamp is used to ensure good skb ordering in
* rx-offload and is ignored for non-peripherals.
*/
static void m_can_receive_skb(struct m_can_classdev *cdev,
struct sk_buff *skb,
u32 timestamp)
{
if (cdev->is_peripheral) {
struct net_device_stats *stats = &cdev->net->stats;
int err;
err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp);
if (err)
stats->rx_fifo_errors++;
} else {
netif_receive_skb(skb);
}
}
static int m_can_read_fifo(struct net_device *dev, u32 fgi)
{
struct net_device_stats *stats = &dev->stats;
struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf;
struct sk_buff *skb;
struct id_and_dlc fifo_header;
u32 timestamp = 0;
int err;
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
if (err)
goto out_fail;
if (fifo_header.dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
else
skb = alloc_can_skb(dev, (struct can_frame **)&cf);
if (!skb) {
stats->rx_dropped++;
return 0;
}
if (fifo_header.dlc & RX_BUF_FDF)
cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F);
else
cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F);
if (fifo_header.id & RX_BUF_XTD)
cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK;
if (fifo_header.id & RX_BUF_ESI) {
cf->flags |= CANFD_ESI;
netdev_dbg(dev, "ESI Error\n");
}
if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) {
cf->can_id |= CAN_RTR_FLAG;
} else {
if (fifo_header.dlc & RX_BUF_BRS)
cf->flags |= CANFD_BRS;
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
goto out_free_skb;
stats->rx_bytes += cf->len;
}
stats->rx_packets++;
timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp);
return 0;
out_free_skb:
kfree_skb(skb);
out_fail:
netdev_err(dev, "FIFO read returned %d\n", err);
return err;
}
static int m_can_do_rx_poll(struct net_device *dev, int quota)
{
struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0;
u32 rxfs;
u32 rx_count;
u32 fgi;
int ack_fgi = -1;
int i;
int err = 0;
rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) {
netdev_dbg(dev, "no messages in fifo0\n");
return 0;
}
rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs);
fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
for (i = 0; i < rx_count && quota > 0; ++i) {
err = m_can_read_fifo(dev, fgi);
if (err)
break;
quota--;
pkts++;
ack_fgi = fgi;
fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi);
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_RXF0A, ack_fgi);
if (err)
return err;
return pkts;
}
static int m_can_handle_lost_msg(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *frame;
u32 timestamp = 0;
netdev_err(dev, "msg lost in rxf0\n");
stats->rx_errors++;
stats->rx_over_errors++;
skb = alloc_can_err_skb(dev, &frame);
if (unlikely(!skb))
return 0;
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
static int m_can_handle_lec_err(struct net_device *dev,
enum m_can_lec_type lec_type)
{
struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
}
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
static int __m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
struct m_can_classdev *cdev = netdev_priv(dev);
unsigned int ecr;
ecr = m_can_read(cdev, M_CAN_ECR);
bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr);
bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr);
return 0;
}
static int m_can_clk_start(struct m_can_classdev *cdev)
{
if (cdev->pm_clock_support == 0)
return 0;
return pm_runtime_resume_and_get(cdev->dev);
}
static void m_can_clk_stop(struct m_can_classdev *cdev)
{
if (cdev->pm_clock_support)
pm_runtime_put_sync(cdev->dev);
}
static int m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
err = m_can_clk_start(cdev);
if (err)
return err;
__m_can_get_berr_counter(dev, bec);
m_can_clk_stop(cdev);
return 0;
}
static int m_can_handle_state_change(struct net_device *dev,
enum can_state new_state)
{
struct m_can_classdev *cdev = netdev_priv(dev);
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
unsigned int ecr;
u32 timestamp = 0;
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
cdev->can.state = CAN_STATE_ERROR_WARNING;
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
cdev->can.can_stats.error_passive++;
cdev->can.state = CAN_STATE_ERROR_PASSIVE;
break;
case CAN_STATE_BUS_OFF:
/* bus-off state */
cdev->can.state = CAN_STATE_BUS_OFF;
m_can_disable_all_interrupts(cdev);
cdev->can.can_stats.bus_off++;
can_bus_off(dev);
break;
default:
break;
}
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case CAN_STATE_BUS_OFF:
/* bus-off state */
cf->can_id |= CAN_ERR_BUSOFF;
break;
default:
break;
}
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
netdev_dbg(dev, "entered error warning state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_WARNING);
}
if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
netdev_dbg(dev, "entered error passive state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_PASSIVE);
}
if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
netdev_dbg(dev, "entered error bus off state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_BUS_OFF);
}
return work_done;
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
{
if (irqstatus & IR_WDI)
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
if (irqstatus & IR_BEU)
netdev_err(dev, "Bit Error Uncorrected\n");
if (irqstatus & IR_BEC)
netdev_err(dev, "Bit Error Corrected\n");
if (irqstatus & IR_TOO)
netdev_err(dev, "Timeout reached\n");
if (irqstatus & IR_MRAF)
netdev_err(dev, "Message RAM access failure occurred\n");
}
static inline bool is_lec_err(u8 lec)
{
return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE;
}
static inline bool m_can_is_protocol_err(u32 irqstatus)
{
return irqstatus & IR_ERR_LEC_31X;
}
static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
{
struct net_device_stats *stats = &dev->stats;
struct m_can_classdev *cdev = netdev_priv(dev);
struct can_frame *cf;
struct sk_buff *skb;
u32 timestamp = 0;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
/* update tx error stats since there is protocol error */
stats->tx_errors++;
/* update arbitration lost status */
if (cdev->version >= 31 && (irqstatus & IR_PEA)) {
netdev_dbg(dev, "Protocol error in Arbitration fail\n");
cdev->can.can_stats.arbitration_lost++;
if (skb) {
cf->can_id |= CAN_ERR_LOSTARB;
cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
}
}
if (unlikely(!skb)) {
netdev_dbg(dev, "allocation of skb failed\n");
return 0;
}
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_receive_skb(cdev, skb, timestamp);
return 1;
}
static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
u32 psr)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
if (irqstatus & IR_RF0L)
work_done += m_can_handle_lost_msg(dev);
/* handle lec errors on the bus */
if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
u8 lec = FIELD_GET(PSR_LEC_MASK, psr);
u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr);
if (is_lec_err(lec)) {
netdev_dbg(dev, "Arbitration phase error detected\n");
work_done += m_can_handle_lec_err(dev, lec);
}
if (is_lec_err(dlec)) {
netdev_dbg(dev, "Data phase error detected\n");
work_done += m_can_handle_lec_err(dev, dlec);
}
}
/* handle protocol errors in arbitration phase */
if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
m_can_is_protocol_err(irqstatus))
work_done += m_can_handle_protocol_error(dev, irqstatus);
/* other unproccessed error interrupts */
m_can_handle_other_err(dev, irqstatus);
return work_done;
}
static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int rx_work_or_err;
int work_done = 0;
if (!irqstatus)
goto end;
/* Errata workaround for issue "Needless activation of MRAF irq"
* During frame reception while the MCAN is in Error Passive state
* and the Receive Error Counter has the value MCAN_ECR.REC = 127,
* it may happen that MCAN_IR.MRAF is set although there was no
* Message RAM access failure.
* If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
* The Message RAM Access Failure interrupt routine needs to check
* whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
* In this case, reset MCAN_IR.MRAF. No further action is required.
*/
if (cdev->version <= 31 && irqstatus & IR_MRAF &&
m_can_read(cdev, M_CAN_ECR) & ECR_RP) {
struct can_berr_counter bec;
__m_can_get_berr_counter(dev, &bec);
if (bec.rxerr == 127) {
m_can_write(cdev, M_CAN_IR, IR_MRAF);
irqstatus &= ~IR_MRAF;
}
}
if (irqstatus & IR_ERR_STATE)
work_done += m_can_handle_state_errors(dev,
m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus,
m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_RF0N) {
rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
if (rx_work_or_err < 0)
return rx_work_or_err;
work_done += rx_work_or_err;
}
end:
return work_done;
}
static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
*/
if (work_done < 0)
m_can_disable_all_interrupts(cdev);
return work_done;
}
static int m_can_poll(struct napi_struct *napi, int quota)
{
struct net_device *dev = napi->dev;
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
u32 irqstatus;
irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
work_done = m_can_rx_handler(dev, quota, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
*/
if (work_done >= 0 && work_done < quota) {
napi_complete_done(napi, work_done);
m_can_enable_all_interrupts(cdev);
}
return work_done;
}
/* Echo tx skb and update net stats. Peripherals use rx-offload for
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
static void m_can_tx_update_stats(struct m_can_classdev *cdev,
unsigned int msg_mark,
u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
if (cdev->is_peripheral)
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
msg_mark,
timestamp,
NULL);
else
stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
stats->tx_packets++;
}
static int m_can_echo_tx_event(struct net_device *dev)
{
u32 txe_count = 0;
u32 m_can_txefs;
u32 fgi = 0;
int ack_fgi = -1;
int i = 0;
int err = 0;
unsigned int msg_mark;
struct m_can_classdev *cdev = netdev_priv(dev);
/* read tx event fifo status */
m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
/* Get Tx Event fifo element count */
txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs);
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0;
/* get message marker, timestamp */
err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
if (err) {
netdev_err(dev, "TXE FIFO read returned %d\n", err);
break;
}
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
ack_fgi = fgi;
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
return err;
}
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
u32 ir;
if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
ir = m_can_read(cdev, M_CAN_IR);
if (!ir)
return IRQ_NONE;
/* ACK all irqs */
m_can_write(cdev, M_CAN_IR, ir);
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
/* schedule NAPI in case of
* - rx IRQ
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
if (!cdev->is_peripheral) {
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
} else if (m_can_rx_peripheral(dev, ir) < 0) {
goto out_fail;
}
}
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_tx_update_stats(cdev, 0, timestamp);
netif_wake_queue(dev);
}
} else {
if (ir & IR_TEFN) {
/* New TX FIFO Element arrived */
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
if (netif_queue_stopped(dev) &&
!m_can_tx_fifo_full(cdev))
netif_wake_queue(dev);
}
}
if (cdev->is_peripheral)
can_rx_offload_threaded_irq_finish(&cdev->offload);
return IRQ_HANDLED;
out_fail:
m_can_disable_all_interrupts(cdev);
return IRQ_HANDLED;
}
static const struct can_bittiming_const m_can_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 64,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 1024,
.brp_inc = 1,
};
static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static const struct can_bittiming_const m_can_bittiming_const_31X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 256,
.tseg2_min = 2, /* Time segment 2 = phase_seg2 */
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 1,
.brp_max = 512,
.brp_inc = 1,
};
static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
.name = KBUILD_MODNAME,
.tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 32,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
const struct can_bittiming *dbt = &cdev->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
brp = bt->brp - 1;
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
FIELD_PREP(NBTP_NSJW_MASK, sjw) |
FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_NBTP, reg_btp);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
reg_btp = 0;
brp = dbt->brp - 1;
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
/* TDC is only needed for bitrates beyond 2.5 MBit/s.
* This is mentioned in the "Bit Time Requirements for CAN FD"
* paper presented at the International CAN Conference 2013
*/
if (dbt->bitrate > 2500000) {
u32 tdco, ssp;
/* Use the same value of secondary sampling point
* as the data sampling point
*/
ssp = dbt->sample_point;
/* Equation based on Bosch's M_CAN User Manual's
* Transmitter Delay Compensation Section
*/
tdco = (cdev->can.clock.freq / 1000) *
ssp / dbt->bitrate;
/* Max valid TDCO value is 127 */
if (tdco > 127) {
netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
tdco);
tdco = 127;
}
reg_btp |= DBTP_TDC;
m_can_write(cdev, M_CAN_TDCR,
FIELD_PREP(TDCR_TDCO_MASK, tdco));
}
reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
FIELD_PREP(DBTP_DSJW_MASK, sjw) |
FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_DBTP, reg_btp);
}
return 0;
}
/* Configure M_CAN chip:
* - set rx buffer/fifo element size
* - configure rx fifo
* - accept non-matching frame into fifo 0
* - configure tx buffer
* - >= v3.1.x: TX FIFO is used
* - configure mode
* - setup bittiming
* - configure timestamp generation
*/
static int m_can_chip_config(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
u32 interrupts = IR_ALL_INT;
u32 cccr, test;
int err;
err = m_can_init_ram(cdev);
if (err) {
dev_err(cdev->dev, "Message RAM configuration failed\n");
return err;
}
/* Disable unused interrupts */
interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
IR_RF0F | IR_RF0W);
m_can_config_endisable(cdev, true);
/* RX Buffer/FIFO Element Size 64 bytes data field */
m_can_write(cdev, M_CAN_RXESC,
FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) |
FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) |
FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B));
/* Accept Non-matching Frames Into FIFO 0 */
m_can_write(cdev, M_CAN_GFC, 0x0);
if (cdev->version == 30) {
/* only support one Tx Buffer currently */
m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) |
cdev->mcfg[MRAM_TXB].off);
} else {
/* TX FIFO is used for newer IP Core versions */
m_can_write(cdev, M_CAN_TXBC,
FIELD_PREP(TXBC_TFQS_MASK,
cdev->mcfg[MRAM_TXB].num) |
cdev->mcfg[MRAM_TXB].off);
}
/* support 64 bytes payload */
m_can_write(cdev, M_CAN_TXESC,
FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B));
/* TX Event FIFO */
if (cdev->version == 30) {
m_can_write(cdev, M_CAN_TXEFC,
FIELD_PREP(TXEFC_EFS_MASK, 1) |
cdev->mcfg[MRAM_TXE].off);
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
}
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
m_can_write(cdev, M_CAN_RXF1C,
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) |
cdev->mcfg[MRAM_RXF1].off);
cccr = m_can_read(cdev, M_CAN_CCCR);
test = m_can_read(cdev, M_CAN_TEST);
test &= ~TEST_LBCK;
if (cdev->version == 30) {
/* Version 3.0.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) |
FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK)));
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS);
} else {
/* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
CCCR_NISO | CCCR_DAR);
/* Only 3.2.x has NISO Bit implemented */
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
cccr |= CCCR_NISO;
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
cccr |= (CCCR_BRSE | CCCR_FDOE);
}
/* Loopback Mode */
if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
cccr |= CCCR_TEST | CCCR_MON;
test |= TEST_LBCK;
}
/* Enable Monitoring (all versions) */
if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cccr |= CCCR_MON;
/* Disable Auto Retransmission (all versions) */
if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
cccr |= CCCR_DAR;
/* Write config */
m_can_write(cdev, M_CAN_CCCR, cccr);
m_can_write(cdev, M_CAN_TEST, test);
/* Enable interrupts */
if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
if (cdev->version == 30)
interrupts &= ~(IR_ERR_LEC_30X);
else
interrupts &= ~(IR_ERR_LEC_31X);
}
m_can_write(cdev, M_CAN_IE, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
/* set bittiming params */
m_can_set_bittiming(dev);
/* enable internal timestamp generation, with a prescaler of 16. The
* prescaler is applied to the nominal bit timing
*/
m_can_write(cdev, M_CAN_TSCC,
FIELD_PREP(TSCC_TCP_MASK, 0xf) |
FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
m_can_config_endisable(cdev, false);
if (cdev->ops->init)
cdev->ops->init(cdev);
return 0;
}
static int m_can_start(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int ret;
/* basic m_can configuration */
ret = m_can_chip_config(dev);
if (ret)
return ret;
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
if (!dev->irq) {
dev_dbg(cdev->dev, "Start hrtimer\n");
hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
HRTIMER_MODE_REL_PINNED);
}
return 0;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
m_can_clean(dev);
m_can_start(dev);
netif_wake_queue(dev);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
/* Checks core release number of M_CAN
* returns 0 if an unsupported device is detected
* else it returns the release and step coded as:
* return value = 10 * <release> + 1 * <step>
*/
static int m_can_check_core_release(struct m_can_classdev *cdev)
{
u32 crel_reg;
u8 rel;
u8 step;
int res;
/* Read Core Release Version and split into version number
* Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
*/
crel_reg = m_can_read(cdev, M_CAN_CREL);
rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg);
step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg);
if (rel == 3) {
/* M_CAN v3.x.y: create return value */
res = 30 + step;
} else {
/* Unsupported M_CAN version */
res = 0;
}
return res;
}
/* Selectable Non ISO support only in version 3.2.x
* This function checks if the bit is writable.
*/
static bool m_can_niso_supported(struct m_can_classdev *cdev)
{
u32 cccr_reg, cccr_poll = 0;
int niso_timeout = -ETIMEDOUT;
int i;
m_can_config_endisable(cdev, true);
cccr_reg = m_can_read(cdev, M_CAN_CCCR);
cccr_reg |= CCCR_NISO;
m_can_write(cdev, M_CAN_CCCR, cccr_reg);
for (i = 0; i <= 10; i++) {
cccr_poll = m_can_read(cdev, M_CAN_CCCR);
if (cccr_poll == cccr_reg) {
niso_timeout = 0;
break;
}
usleep_range(1, 5);
}
/* Clear NISO */
cccr_reg &= ~(CCCR_NISO);
m_can_write(cdev, M_CAN_CCCR, cccr_reg);
m_can_config_endisable(cdev, false);
/* return false if time out (-ETIMEDOUT), else return true */
return !niso_timeout;
}
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
int m_can_version, err;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
if (!m_can_version) {
dev_err(cdev->dev, "Unsupported version number: %2d",
m_can_version);
return -EINVAL;
}
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
cdev->can.do_set_mode = m_can_set_mode;
cdev->can.do_get_berr_counter = m_can_get_berr_counter;
/* Set M_CAN supported operations */
cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_ONE_SHOT;
/* Set properties depending on M_CAN version */
switch (cdev->version) {
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
CAN_CTRLMODE_FD_NON_ISO : 0);
break;
default:
dev_err(cdev->dev, "Unsupported version number: %2d",
cdev->version);
return -EINVAL;
}
if (cdev->ops->init)
cdev->ops->init(cdev);
return 0;
}
static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
if (!dev->irq) {
dev_dbg(cdev->dev, "Stop hrtimer\n");
hrtimer_cancel(&cdev->hrtimer);
}
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
/* Set init mode to disengage from the network */
m_can_config_endisable(cdev, true);
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
}
static int m_can_close(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
netif_stop_queue(dev);
if (!cdev->is_peripheral)
napi_disable(&cdev->napi);
m_can_stop(dev);
m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
if (cdev->is_peripheral) {
cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
}
close_candev(dev);
phy_power_off(cdev->transceiver);
return 0;
}
static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
{
struct m_can_classdev *cdev = netdev_priv(dev);
/*get wrap around for loopback skb index */
unsigned int wrap = cdev->can.echo_skb_max;
int next_idx;
/* calculate next index */
next_idx = (++putidx >= wrap ? 0 : putidx);
/* check if occupied */
return !!cdev->can.echo_skb[next_idx];
}
static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
{
struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
struct net_device *dev = cdev->net;
struct sk_buff *skb = cdev->tx_skb;
struct id_and_dlc fifo_header;
u32 cccr, fdflags;
u32 txfqs;
int err;
int putidx;
cdev->tx_skb = NULL;
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
fifo_header.id = cf->can_id & CAN_EFF_MASK;
fifo_header.id |= TX_BUF_XTD;
} else {
fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
fifo_header.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
goto out_fail;
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~CCCR_CMR_MASK;
if (can_is_canfd_skb(skb)) {
if (cf->flags & CANFD_BRS)
cccr |= FIELD_PREP(CCCR_CMR_MASK,
CCCR_CMR_CANFD_BRS);
else
cccr |= FIELD_PREP(CCCR_CMR_MASK,
CCCR_CMR_CANFD);
} else {
cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN);
}
m_can_write(cdev, M_CAN_CCCR, cccr);
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
can_put_echo_skb(skb, dev, 0, 0);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
txfqs = m_can_read(cdev, M_CAN_TXFQS);
/* Check if FIFO full */
if (_m_can_tx_fifo_full(txfqs)) {
/* This shouldn't happen */
netif_stop_queue(dev);
netdev_warn(dev,
"TX queue active although FIFO is full.");
if (cdev->is_peripheral) {
kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
} else {
return NETDEV_TX_BUSY;
}
}
/* get put index for frame */
putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
* used in the TX interrupt for sending the correct echo frame.
*/
/* get CAN FD configuration of frame */
fdflags = 0;
if (can_is_canfd_skb(skb)) {
fdflags |= TX_BUF_FDF;
if (cf->flags & CANFD_BRS)
fdflags |= TX_BUF_BRS;
}
fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
can_put_echo_skb(skb, dev, putidx, 0);
/* Enable TX FIFO element to start transfer */
m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
/* stop network queue if fifo full */
if (m_can_tx_fifo_full(cdev) ||
m_can_next_echo_skb_occupied(dev, putidx))
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
out_fail:
netdev_err(dev, "FIFO write returned %d\n", err);
m_can_disable_all_interrupts(cdev);
return NETDEV_TX_BUSY;
}
static void m_can_tx_work_queue(struct work_struct *ws)
{
struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
tx_work);
m_can_tx_handler(cdev);
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (cdev->is_peripheral) {
if (cdev->tx_skb) {
netdev_err(dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
if (cdev->can.state == CAN_STATE_BUS_OFF) {
m_can_clean(dev);
} else {
/* Need to stop the queue to avoid numerous requests
* from being sent. Suggested improvement is to create
* a queueing mechanism that will queue the skbs and
* process them in order.
*/
cdev->tx_skb = skb;
netif_stop_queue(cdev->net);
queue_work(cdev->tx_wq, &cdev->tx_work);
}
} else {
cdev->tx_skb = skb;
return m_can_tx_handler(cdev);
}
return NETDEV_TX_OK;
}
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
{
struct m_can_classdev *cdev = container_of(timer, struct
m_can_classdev, hrtimer);
m_can_isr(0, cdev->net);
hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
return HRTIMER_RESTART;
}
static int m_can_open(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
err = phy_power_on(cdev->transceiver);
if (err)
return err;
err = m_can_clk_start(cdev);
if (err)
goto out_phy_power_off;
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
goto exit_disable_clks;
}
if (cdev->is_peripheral)
can_rx_offload_enable(&cdev->offload);
/* register interrupt handler */
if (cdev->is_peripheral) {
cdev->tx_skb = NULL;
cdev->tx_wq = alloc_workqueue("mcan_wq",
WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
dev->name, dev);
} else if (dev->irq) {
err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
dev);
}
if (err < 0) {
netdev_err(dev, "failed to request interrupt\n");
goto exit_irq_fail;
}
/* start the m_can controller */
err = m_can_start(dev);
if (err)
goto exit_irq_fail;
if (!cdev->is_peripheral)
napi_enable(&cdev->napi);
netif_start_queue(dev);
return 0;
exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
out_phy_power_off:
phy_power_off(cdev->transceiver);
return err;
}
static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
.ndo_start_xmit = m_can_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops m_can_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size)
{
u32 total_size;
total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off +
cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
if (total_size > mram_max_size) {
dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n",
total_size, mram_max_size);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(m_can_check_mram_cfg);
static void m_can_of_parse_mram(struct m_can_classdev *cdev,
const u32 *mram_config_vals)
{
cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0];
cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1];
cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off +
cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2];
cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
FIELD_MAX(RXFC_FS_MASK);
cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
FIELD_MAX(RXFC_FS_MASK);
cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off +
cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
cdev->mcfg[MRAM_TXE].num = mram_config_vals[6];
cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
FIELD_MAX(TXBC_NDTB_MASK);
dev_dbg(cdev->dev,
"sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
int m_can_init_ram(struct m_can_classdev *cdev)
{
int end, i, start;
int err = 0;
/* initialize the entire Message RAM in use to avoid possible
* ECC/parity checksum errors when reading an uninitialized buffer
*/
start = cdev->mcfg[MRAM_SIDF].off;
end = cdev->mcfg[MRAM_TXB].off +
cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
for (i = start; i < end; i += 4) {
err = m_can_fifo_write_no_off(cdev, i, 0x0);
if (err)
break;
}
return err;
}
EXPORT_SYMBOL_GPL(m_can_init_ram);
int m_can_class_get_clocks(struct m_can_classdev *cdev)
{
int ret = 0;
cdev->hclk = devm_clk_get(cdev->dev, "hclk");
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
dev_err(cdev->dev, "no clock found\n");
ret = -ENODEV;
}
return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
int sizeof_priv)
{
struct m_can_classdev *class_dev = NULL;
u32 mram_config_vals[MRAM_CFG_LEN];
struct net_device *net_dev;
u32 tx_fifo_size;
int ret;
ret = fwnode_property_read_u32_array(dev_fwnode(dev),
"bosch,mram-cfg",
mram_config_vals,
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(dev, "Could not get Message RAM configuration.");
goto out;
}
/* Get TX FIFO size
* Defines the total amount of echo buffers for loopback
*/
tx_fifo_size = mram_config_vals[7];
/* allocate the m_can device */
net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
if (!net_dev) {
dev_err(dev, "Failed to allocate CAN device");
goto out;
}
class_dev = netdev_priv(net_dev);
class_dev->net = net_dev;
class_dev->dev = dev;
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
out:
return class_dev;
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
void m_can_class_free_dev(struct net_device *net)
{
free_candev(net);
}
EXPORT_SYMBOL_GPL(m_can_class_free_dev);
int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
if (cdev->pm_clock_support) {
ret = m_can_clk_start(cdev);
if (ret)
return ret;
}
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
if (ret)
goto clk_disable;
}
if (!cdev->net->irq)
cdev->hrtimer.function = &hrtimer_callback;
ret = m_can_dev_setup(cdev);
if (ret)
goto rx_offload_del;
ret = register_m_can_dev(cdev->net);
if (ret) {
dev_err(cdev->dev, "registering %s failed (err=%d)\n",
cdev->net->name, ret);
goto rx_offload_del;
}
of_can_transceiver(cdev->net);
dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
KBUILD_MODNAME, cdev->net->irq, cdev->version);
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
m_can_clk_stop(cdev);
return 0;
rx_offload_del:
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
clk_disable:
m_can_clk_stop(cdev);
return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
m_can_stop(ndev);
m_can_clk_stop(cdev);
}
pinctrl_pm_select_sleep_state(dev);
cdev->can.state = CAN_STATE_SLEEPING;
return 0;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
pinctrl_pm_select_default_state(dev);
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
int ret;
ret = m_can_clk_start(cdev);
if (ret)
return ret;
ret = m_can_start(ndev);
if (ret) {
m_can_clk_stop(cdev);
return ret;
}
netif_device_attach(ndev);
netif_start_queue(ndev);
}
return 0;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
MODULE_AUTHOR("Dong Aisheng <[email protected]>");
MODULE_AUTHOR("Dan Murphy <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
| linux-master | drivers/net/can/m_can/m_can.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]>
*/
#include <linux/can/dev.h>
#include <linux/module.h>
#define MOD_DESC "CAN device driver interface"
MODULE_DESCRIPTION(MOD_DESC);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>");
/* Local echo of CAN messages
*
* CAN network devices *should* support a local echo functionality
* (see Documentation/networking/can.rst). To test the handling of CAN
* interfaces that do not support the local echo both driver types are
* implemented. In the case that the driver does not support the echo
* the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
* to perform the echo as a fallback solution.
*/
void can_flush_echo_skb(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
int i;
for (i = 0; i < priv->echo_skb_max; i++) {
if (priv->echo_skb[i]) {
kfree_skb(priv->echo_skb[i]);
priv->echo_skb[i] = NULL;
stats->tx_dropped++;
stats->tx_aborted_errors++;
}
}
}
/* Put the skb on the stack to be looped backed locally lateron
*
* The function is typically called in the start_xmit function
* of the device driver. The driver must protect access to
* priv->echo_skb, if necessary.
*/
int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx, unsigned int frame_len)
{
struct can_priv *priv = netdev_priv(dev);
BUG_ON(idx >= priv->echo_skb_max);
/* check flag whether this packet has to be looped back */
if (!(dev->flags & IFF_ECHO) ||
(skb->protocol != htons(ETH_P_CAN) &&
skb->protocol != htons(ETH_P_CANFD) &&
skb->protocol != htons(ETH_P_CANXL))) {
kfree_skb(skb);
return 0;
}
if (!priv->echo_skb[idx]) {
skb = can_create_echo_skb(skb);
if (!skb)
return -ENOMEM;
/* make settings for echo to reduce code in irq context */
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->dev = dev;
/* save frame_len to reuse it when transmission is completed */
can_skb_prv(skb)->frame_len = frame_len;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
/* save this skb for tx interrupt echo handling */
priv->echo_skb[idx] = skb;
} else {
/* locking problem with netif_stop_queue() ?? */
netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
kfree_skb(skb);
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
__can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
if (idx >= priv->echo_skb_max) {
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
__func__, idx, priv->echo_skb_max);
return NULL;
}
if (priv->echo_skb[idx]) {
/* Using "struct canfd_frame::len" for the frame
* length is supported on both CAN and CANFD frames.
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
*len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
priv->echo_skb[idx] = NULL;
if (skb->pkt_type == PACKET_LOOPBACK) {
skb->pkt_type = PACKET_BROADCAST;
} else {
dev_consume_skb_any(skb);
return NULL;
}
return skb;
}
return NULL;
}
/* Get the skb from the stack and loop it back locally
*
* The function is typically called when the TX done interrupt
* is handled in the device driver. The driver must protect
* access to priv->echo_skb, if necessary.
*/
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
skb_get(skb);
if (netif_rx(skb) == NET_RX_SUCCESS)
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
return len;
}
EXPORT_SYMBOL_GPL(can_get_echo_skb);
/* Remove the skb from the stack and free it.
*
* The function is typically called when TX failed.
*/
void can_free_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
if (idx >= priv->echo_skb_max) {
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
__func__, idx, priv->echo_skb_max);
return;
}
if (priv->echo_skb[idx]) {
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
dev_kfree_skb_any(skb);
priv->echo_skb[idx] = NULL;
}
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
/* fill common values for CAN sk_buffs */
static void init_can_skb_reserve(struct sk_buff *skb)
{
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
can_skb_reserve(skb);
can_skb_prv(skb)->skbcnt = 0;
}
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
sizeof(struct can_frame));
if (unlikely(!skb)) {
*cf = NULL;
return NULL;
}
skb->protocol = htons(ETH_P_CAN);
init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_skb);
struct sk_buff *alloc_canfd_skb(struct net_device *dev,
struct canfd_frame **cfd)
{
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
sizeof(struct canfd_frame));
if (unlikely(!skb)) {
*cfd = NULL;
return NULL;
}
skb->protocol = htons(ETH_P_CANFD);
init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
/* set CAN FD flag by default */
(*cfd)->flags = CANFD_FDF;
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
struct sk_buff *alloc_canxl_skb(struct net_device *dev,
struct canxl_frame **cxl,
unsigned int data_len)
{
struct sk_buff *skb;
if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
goto out_error;
skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
CANXL_HDR_SIZE + data_len);
if (unlikely(!skb))
goto out_error;
skb->protocol = htons(ETH_P_CANXL);
init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
*cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
/* set CAN XL flag and length information by default */
(*cxl)->flags = CANXL_XLF;
(*cxl)->len = data_len;
return skb;
out_error:
*cxl = NULL;
return NULL;
}
EXPORT_SYMBOL_GPL(alloc_canxl_skb);
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
skb = alloc_can_skb(dev, cf);
if (unlikely(!skb))
return NULL;
(*cf)->can_id = CAN_ERR_FLAG;
(*cf)->len = CAN_ERR_DLC;
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
/* Check for outgoing skbs that have not been created by the CAN subsystem */
static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
{
/* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
return false;
/* af_packet does not apply CAN skb specific settings */
if (skb->ip_summed == CHECKSUM_NONE) {
/* init headroom */
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* perform proper loopback on capable devices */
if (dev->flags & IFF_ECHO)
skb->pkt_type = PACKET_LOOPBACK;
else
skb->pkt_type = PACKET_HOST;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
/* set CANFD_FDF flag for CAN FD frames */
if (can_is_canfd_skb(skb)) {
struct canfd_frame *cfd;
cfd = (struct canfd_frame *)skb->data;
cfd->flags |= CANFD_FDF;
}
}
return true;
}
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
{
switch (ntohs(skb->protocol)) {
case ETH_P_CAN:
if (!can_is_can_skb(skb))
goto inval_skb;
break;
case ETH_P_CANFD:
if (!can_is_canfd_skb(skb))
goto inval_skb;
break;
case ETH_P_CANXL:
if (!can_is_canxl_skb(skb))
goto inval_skb;
break;
default:
goto inval_skb;
}
if (!can_skb_headroom_valid(dev, skb))
goto inval_skb;
return false;
inval_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
return true;
}
EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);
| linux-master | drivers/net/can/dev/skb.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2012, 2020 Oliver Hartkopp <[email protected]>
*/
#include <linux/can/dev.h>
/* CAN DLC to real data length conversion helpers */
static const u8 dlc2len[] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 12, 16, 20, 24, 32, 48, 64
};
/* get data length from raw data length code (DLC) */
u8 can_fd_dlc2len(u8 dlc)
{
return dlc2len[dlc & 0x0F];
}
EXPORT_SYMBOL_GPL(can_fd_dlc2len);
static const u8 len2dlc[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
9, 9, 9, 9, /* 9 - 12 */
10, 10, 10, 10, /* 13 - 16 */
11, 11, 11, 11, /* 17 - 20 */
12, 12, 12, 12, /* 21 - 24 */
13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
15, 15, 15, 15, 15, 15, 15, 15 /* 57 - 64 */
};
/* map the sanitized data length to an appropriate data length code */
u8 can_fd_len2dlc(u8 len)
{
/* check for length mapping table size at build time */
BUILD_BUG_ON(ARRAY_SIZE(len2dlc) != CANFD_MAX_DLEN + 1);
if (unlikely(len > CANFD_MAX_DLEN))
return CANFD_MAX_DLC;
return len2dlc[len];
}
EXPORT_SYMBOL_GPL(can_fd_len2dlc);
/**
* can_skb_get_frame_len() - Calculate the CAN Frame length in bytes
* of a given skb.
* @skb: socket buffer of a CAN message.
*
* Do a rough calculation: bit stuffing is ignored and length in bits
* is rounded up to a length in bytes.
*
* Rationale: this function is to be used for the BQL functions
* (netdev_sent_queue() and netdev_completed_queue()) which expect a
* value in bytes. Just using skb->len is insufficient because it will
* return the constant value of CAN(FD)_MTU. Doing the bit stuffing
* calculation would be too expensive in term of computing resources
* for no noticeable gain.
*
* Remarks: The payload of CAN FD frames with BRS flag are sent at a
* different bitrate. Currently, the can-utils canbusload tool does
* not support CAN-FD yet and so we could not run any benchmark to
* measure the impact. There might be possible improvement here.
*
* Return: length in bytes.
*/
unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
{
const struct canfd_frame *cf = (const struct canfd_frame *)skb->data;
u8 len;
if (can_is_canfd_skb(skb))
len = canfd_sanitize_len(cf->len);
else if (cf->can_id & CAN_RTR_FLAG)
len = 0;
else
len = cf->len;
return can_frame_bytes(can_is_canfd_skb(skb), cf->can_id & CAN_EFF_FLAG,
false, len);
}
EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
| linux-master | drivers/net/can/dev/length.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]>
*/
#include <linux/can/dev.h>
void can_sjw_set_default(struct can_bittiming *bt)
{
if (bt->sjw)
return;
/* If user space provides no sjw, use sane default of phase_seg2 / 2 */
bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
}
int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
if (bt->sjw > btc->sjw_max) {
NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
bt->sjw, btc->sjw_max);
return -EINVAL;
}
if (bt->sjw > bt->phase_seg1) {
NL_SET_ERR_MSG_FMT(extack,
"sjw: %u greater than phase-seg1: %u",
bt->sjw, bt->phase_seg1);
return -EINVAL;
}
if (bt->sjw > bt->phase_seg2) {
NL_SET_ERR_MSG_FMT(extack,
"sjw: %u greater than phase-seg2: %u",
bt->sjw, bt->phase_seg2);
return -EINVAL;
}
return 0;
}
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
struct netlink_ext_ack *extack)
{
const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
const struct can_priv *priv = netdev_priv(dev);
u64 brp64;
int err;
if (tseg1 < btc->tseg1_min) {
NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
tseg1, btc->tseg1_min);
return -EINVAL;
}
if (tseg1 > btc->tseg1_max) {
NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
tseg1, btc->tseg1_max);
return -EINVAL;
}
if (bt->phase_seg2 < btc->tseg2_min) {
NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
bt->phase_seg2, btc->tseg2_min);
return -EINVAL;
}
if (bt->phase_seg2 > btc->tseg2_max) {
NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
bt->phase_seg2, btc->tseg2_max);
return -EINVAL;
}
can_sjw_set_default(bt);
err = can_sjw_check(dev, bt, btc, extack);
if (err)
return err;
brp64 = (u64)priv->clock.freq * (u64)bt->tq;
if (btc->brp_inc > 1)
do_div(brp64, btc->brp_inc);
brp64 += 500000000UL - 1;
do_div(brp64, 1000000000UL); /* the practicable BRP */
if (btc->brp_inc > 1)
brp64 *= btc->brp_inc;
bt->brp = (u32)brp64;
if (bt->brp < btc->brp_min) {
NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
bt->brp, btc->brp_min);
return -EINVAL;
}
if (bt->brp > btc->brp_max) {
NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
bt->brp, btc->brp_max);
return -EINVAL;
}
bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
priv->clock.freq);
return 0;
}
/* Checks the validity of predefined bitrate settings */
static int
can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt,
struct netlink_ext_ack *extack)
{
unsigned int i;
for (i = 0; i < bitrate_const_cnt; i++) {
if (bt->bitrate == bitrate_const[i])
return 0;
}
NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
bt->brp);
return -EINVAL;
}
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt,
struct netlink_ext_ack *extack)
{
/* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
if (!bt->tq && bt->bitrate && btc)
return can_calc_bittiming(dev, bt, btc, extack);
if (bt->tq && !bt->bitrate && btc)
return can_fixup_bittiming(dev, bt, btc, extack);
if (!bt->tq && bt->bitrate && bitrate_const)
return can_validate_bitrate(dev, bt, bitrate_const,
bitrate_const_cnt, extack);
return -EINVAL;
}
| linux-master | drivers/net/can/dev/bittiming.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]>
*/
#include <linux/units.h>
#include <linux/can/dev.h>
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
/* Bit-timing calculation derived from:
*
* Code based on LinCAN sources and H8S2638 project
* Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
* Copyright 2005 Stanislav Marek
* email: [email protected]
*
* Calculates proper bit-timing parameters for a specified bit-rate
* and sample-point, which can then be used to set the bit-timing
* registers of the CAN controller. You can find more information
* in the header file linux/can/netlink.h.
*/
static int
can_update_sample_point(const struct can_bittiming_const *btc,
const unsigned int sample_point_nominal, const unsigned int tseg,
unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
unsigned int *sample_point_error_ptr)
{
unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
unsigned int sample_point, best_sample_point = 0;
unsigned int tseg1, tseg2;
int i;
for (i = 0; i <= 1; i++) {
tseg2 = tseg + CAN_SYNC_SEG -
(sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
1000 - i;
tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
tseg1 = tseg - tseg2;
if (tseg1 > btc->tseg1_max) {
tseg1 = btc->tseg1_max;
tseg2 = tseg - tseg1;
}
sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
(tseg + CAN_SYNC_SEG);
sample_point_error = abs(sample_point_nominal - sample_point);
if (sample_point <= sample_point_nominal &&
sample_point_error < best_sample_point_error) {
best_sample_point = sample_point;
best_sample_point_error = sample_point_error;
*tseg1_ptr = tseg1;
*tseg2_ptr = tseg2;
}
}
if (sample_point_error_ptr)
*sample_point_error_ptr = best_sample_point_error;
return best_sample_point;
}
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
unsigned int bitrate_error; /* difference between current and nominal value */
unsigned int best_bitrate_error = UINT_MAX;
unsigned int sample_point_error; /* difference between current and nominal value */
unsigned int best_sample_point_error = UINT_MAX;
unsigned int sample_point_nominal; /* nominal sample point */
unsigned int best_tseg = 0; /* current best value for tseg */
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
int err;
/* Use CiA recommended sample points */
if (bt->sample_point) {
sample_point_nominal = bt->sample_point;
} else {
if (bt->bitrate > 800 * KILO /* BPS */)
sample_point_nominal = 750;
else if (bt->bitrate > 500 * KILO /* BPS */)
sample_point_nominal = 800;
else
sample_point_nominal = 875;
}
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
tsegall = CAN_SYNC_SEG + tseg / 2;
/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
/* choose brp step which is possible in system */
brp = (brp / btc->brp_inc) * btc->brp_inc;
if (brp < btc->brp_min || brp > btc->brp_max)
continue;
bitrate = priv->clock.freq / (brp * tsegall);
bitrate_error = abs(bt->bitrate - bitrate);
/* tseg brp biterror */
if (bitrate_error > best_bitrate_error)
continue;
/* reset sample point error if we have a better bitrate */
if (bitrate_error < best_bitrate_error)
best_sample_point_error = UINT_MAX;
can_update_sample_point(btc, sample_point_nominal, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
if (sample_point_error >= best_sample_point_error)
continue;
best_sample_point_error = sample_point_error;
best_bitrate_error = bitrate_error;
best_tseg = tseg / 2;
best_brp = brp;
if (bitrate_error == 0 && sample_point_error == 0)
break;
}
if (best_bitrate_error) {
/* Error in one-tenth of a percent */
v64 = (u64)best_bitrate_error * 1000;
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
if (bitrate_error > CAN_CALC_MAX_ERROR) {
NL_SET_ERR_MSG_FMT(extack,
"bitrate error: %u.%u%% too high",
bitrate_error / 10, bitrate_error % 10);
return -EINVAL;
}
NL_SET_ERR_MSG_FMT(extack,
"bitrate error: %u.%u%%",
bitrate_error / 10, bitrate_error % 10);
}
/* real sample point */
bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
best_tseg, &tseg1, &tseg2,
NULL);
v64 = (u64)best_brp * 1000 * 1000 * 1000;
do_div(v64, priv->clock.freq);
bt->tq = (u32)v64;
bt->prop_seg = tseg1 / 2;
bt->phase_seg1 = tseg1 - bt->prop_seg;
bt->phase_seg2 = tseg2;
can_sjw_set_default(bt);
err = can_sjw_check(dev, bt, btc, extack);
if (err)
return err;
bt->brp = best_brp;
/* real bitrate */
bt->bitrate = priv->clock.freq /
(bt->brp * can_bit_time(bt));
return 0;
}
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
u32 *ctrlmode, u32 ctrlmode_supported)
{
if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
return;
*ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
* one or two.
*/
if (dbt->brp == 1 || dbt->brp == 2) {
/* Sample point in clock periods */
u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
dbt->phase_seg1) * dbt->brp;
if (sample_point_in_tc < tdc_const->tdco_min)
return;
tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
*ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
}
}
| linux-master | drivers/net/can/dev/calc_bittiming.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 Protonic Holland,
* David Jander
* Copyright (C) 2014-2021, 2023 Pengutronix,
* Marc Kleine-Budde <[email protected]>
*/
#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
struct can_rx_offload_cb {
u32 timestamp;
};
static inline struct can_rx_offload_cb *
can_rx_offload_get_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
return (struct can_rx_offload_cb *)skb->cb;
}
static inline bool
can_rx_offload_le(struct can_rx_offload *offload,
unsigned int a, unsigned int b)
{
if (offload->inc)
return a <= b;
else
return a >= b;
}
static inline unsigned int
can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
{
if (offload->inc)
return (*val)++;
else
return (*val)--;
}
static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
{
struct can_rx_offload *offload = container_of(napi,
struct can_rx_offload,
napi);
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
int work_done = 0;
while ((work_done < quota) &&
(skb = skb_dequeue(&offload->skb_queue))) {
struct can_frame *cf = (struct can_frame *)skb->data;
work_done++;
if (!(cf->can_id & CAN_ERR_FLAG)) {
stats->rx_packets++;
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
}
netif_receive_skb(skb);
}
if (work_done < quota) {
napi_complete_done(napi, work_done);
/* Check if there was another interrupt */
if (!skb_queue_empty(&offload->skb_queue))
napi_reschedule(&offload->napi);
}
return work_done;
}
static inline void
__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
int (*compare)(struct sk_buff *a, struct sk_buff *b))
{
struct sk_buff *pos, *insert = NULL;
skb_queue_reverse_walk(head, pos) {
const struct can_rx_offload_cb *cb_pos, *cb_new;
cb_pos = can_rx_offload_get_cb(pos);
cb_new = can_rx_offload_get_cb(new);
netdev_dbg(new->dev,
"%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
__func__,
cb_pos->timestamp, cb_new->timestamp,
cb_new->timestamp - cb_pos->timestamp,
skb_queue_len(head));
if (compare(pos, new) < 0)
continue;
insert = pos;
break;
}
if (!insert)
__skb_queue_head(head, new);
else
__skb_queue_after(head, insert, new);
}
static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
{
const struct can_rx_offload_cb *cb_a, *cb_b;
cb_a = can_rx_offload_get_cb(a);
cb_b = can_rx_offload_get_cb(b);
/* Subtract two u32 and return result as int, to keep
* difference steady around the u32 overflow.
*/
return cb_b->timestamp - cb_a->timestamp;
}
/**
* can_rx_offload_offload_one() - Read one CAN frame from HW
* @offload: pointer to rx_offload context
* @n: number of mailbox to read
*
* The task of this function is to read a CAN frame from mailbox @n
* from the device and return the mailbox's content as a struct
* sk_buff.
*
* If the struct can_rx_offload::skb_queue exceeds the maximal queue
* length (struct can_rx_offload::skb_queue_len_max) or no skb can be
* allocated, the mailbox contents is discarded by reading it into an
* overflow buffer. This way the mailbox is marked as free by the
* driver.
*
* Return: A pointer to skb containing the CAN frame on success.
*
* NULL if the mailbox @n is empty.
*
* ERR_PTR() in case of an error
*/
static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{
struct sk_buff *skb;
struct can_rx_offload_cb *cb;
bool drop = false;
u32 timestamp;
/* If queue is full drop frame */
if (unlikely(skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max))
drop = true;
skb = offload->mailbox_read(offload, n, ×tamp, drop);
/* Mailbox was empty. */
if (unlikely(!skb))
return NULL;
/* There was a problem reading the mailbox, propagate
* error value.
*/
if (IS_ERR(skb)) {
offload->dev->stats.rx_dropped++;
offload->dev->stats.rx_fifo_errors++;
return skb;
}
/* Mailbox was read. */
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
return skb;
}
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 pending)
{
unsigned int i;
int received = 0;
for (i = offload->mb_first;
can_rx_offload_le(offload, i, offload->mb_last);
can_rx_offload_inc(offload, &i)) {
struct sk_buff *skb;
if (!(pending & BIT_ULL(i)))
continue;
skb = can_rx_offload_offload_one(offload, i);
if (IS_ERR_OR_NULL(skb))
continue;
__skb_queue_add_sort(&offload->skb_irq_queue, skb,
can_rx_offload_compare);
received++;
}
return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
{
struct sk_buff *skb;
int received = 0;
while (1) {
skb = can_rx_offload_offload_one(offload, 0);
if (IS_ERR(skb))
continue;
if (!skb)
break;
__skb_queue_tail(&offload->skb_irq_queue, skb);
received++;
}
return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
dev_kfree_skb_any(skb);
return -ENOBUFS;
}
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
__skb_queue_add_sort(&offload->skb_irq_queue, skb,
can_rx_offload_compare);
return 0;
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
unsigned int
can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
unsigned int *frame_len_ptr)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
}
return len;
}
EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb)
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
dev_kfree_skb_any(skb);
return -ENOBUFS;
}
__skb_queue_tail(&offload->skb_irq_queue, skb);
return 0;
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
unsigned int
can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
unsigned int idx,
unsigned int *frame_len_ptr)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
err = can_rx_offload_queue_tail(offload, skb);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
}
return len;
}
EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail);
void can_rx_offload_irq_finish(struct can_rx_offload *offload)
{
unsigned long flags;
int queue_len;
if (skb_queue_empty_lockless(&offload->skb_irq_queue))
return;
spin_lock_irqsave(&offload->skb_queue.lock, flags);
skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
queue_len = skb_queue_len(&offload->skb_queue);
if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
napi_schedule(&offload->napi);
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
{
unsigned long flags;
int queue_len;
if (skb_queue_empty_lockless(&offload->skb_irq_queue))
return;
spin_lock_irqsave(&offload->skb_queue.lock, flags);
skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
queue_len = skb_queue_len(&offload->skb_queue);
if (queue_len > offload->skb_queue_len_max / 8)
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
__func__, queue_len);
local_bh_disable();
napi_schedule(&offload->napi);
local_bh_enable();
}
EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
static int can_rx_offload_init_queue(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight)
{
offload->dev = dev;
/* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
__skb_queue_head_init(&offload->skb_irq_queue);
netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
__func__, offload->skb_queue_len_max);
return 0;
}
int can_rx_offload_add_timestamp(struct net_device *dev,
struct can_rx_offload *offload)
{
unsigned int weight;
if (offload->mb_first > BITS_PER_LONG_LONG ||
offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
return -EINVAL;
if (offload->mb_first < offload->mb_last) {
offload->inc = true;
weight = offload->mb_last - offload->mb_first;
} else {
offload->inc = false;
weight = offload->mb_first - offload->mb_last;
}
return can_rx_offload_init_queue(dev, offload, weight);
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
int can_rx_offload_add_fifo(struct net_device *dev,
struct can_rx_offload *offload, unsigned int weight)
{
if (!offload->mailbox_read)
return -EINVAL;
return can_rx_offload_init_queue(dev, offload, weight);
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
int can_rx_offload_add_manual(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight)
{
if (offload->mailbox_read)
return -EINVAL;
return can_rx_offload_init_queue(dev, offload, weight);
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
void can_rx_offload_enable(struct can_rx_offload *offload)
{
napi_enable(&offload->napi);
}
EXPORT_SYMBOL_GPL(can_rx_offload_enable);
void can_rx_offload_del(struct can_rx_offload *offload)
{
netif_napi_del(&offload->napi);
skb_queue_purge(&offload->skb_queue);
__skb_queue_purge(&offload->skb_irq_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
| linux-master | drivers/net/can/dev/rx-offload.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]>
* Copyright (C) 2021 Vincent Mailhol <[email protected]>
*/
#include <linux/can/dev.h>
#include <net/rtnetlink.h>
static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_STATE] = { .type = NLA_U32 },
[IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
[IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
[IFLA_CAN_RESTART] = { .type = NLA_U32 },
[IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
[IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
[IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 },
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
static int can_validate_bittiming(const struct can_bittiming *bt,
struct netlink_ext_ack *extack)
{
/* sample point is in one-tenth of a percent */
if (bt->sample_point >= 1000) {
NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
return -EINVAL;
}
return 0;
}
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
bool is_can_fd = false;
int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
* - data bittiming
* - control mode with CAN_CTRLMODE_FD set
* - TDC parameters are coherent (details below)
*/
if (!data)
return 0;
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
err = can_validate_bittiming(&bt, extack);
if (err)
return err;
}
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
return -EOPNOTSUPP;
/* If one of the CAN_CTRLMODE_TDC_* flag is set then
* TDC must be set and vice-versa
*/
if (!!tdc_flags != !!data[IFLA_CAN_TDC])
return -EOPNOTSUPP;
/* If providing TDC parameters, at least TDCO is
* needed. TDCV is needed if and only if
* CAN_CTRLMODE_TDC_MANUAL is set
*/
if (data[IFLA_CAN_TDC]) {
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
data[IFLA_CAN_TDC],
can_tdc_policy, extack);
if (err)
return err;
if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
return -EOPNOTSUPP;
} else {
if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
return -EOPNOTSUPP;
}
if (!tb_tdc[IFLA_CAN_TDC_TDCO])
return -EOPNOTSUPP;
}
}
if (is_can_fd) {
if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
return -EOPNOTSUPP;
}
if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
if (!is_can_fd)
return -EOPNOTSUPP;
}
if (data[IFLA_CAN_DATA_BITTIMING]) {
struct can_bittiming bt;
memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
err = can_validate_bittiming(&bt, extack);
if (err)
return err;
}
return 0;
}
static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
const struct can_tdc_const *tdc_const = priv->tdc_const;
int err;
if (!tdc_const || !can_tdc_is_enabled(priv))
return -EOPNOTSUPP;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
can_tdc_policy, extack);
if (err)
return err;
if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]);
if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max)
return -EINVAL;
tdc.tdcv = tdcv;
}
if (tb_tdc[IFLA_CAN_TDC_TDCO]) {
u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]);
if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max)
return -EINVAL;
tdc.tdco = tdco;
}
if (tb_tdc[IFLA_CAN_TDC_TDCF]) {
u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]);
if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max)
return -EINVAL;
tdc.tdcf = tdcf;
}
priv->tdc = tdc;
return 0;
}
static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
u32 tdc_mask = 0;
int err;
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
/* Do not allow changing bittiming while running */
if (dev->flags & IFF_UP)
return -EBUSY;
/* Calculate bittiming parameters based on
* bittiming_const if set, otherwise pass bitrate
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
if (!priv->bittiming_const && !priv->do_set_bittiming &&
!priv->bitrate_const)
return -EOPNOTSUPP;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
err = can_get_bittiming(dev, &bt,
priv->bittiming_const,
priv->bitrate_const,
priv->bitrate_const_cnt,
extack);
if (err)
return err;
if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
NL_SET_ERR_MSG_FMT(extack,
"arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
bt.bitrate, priv->bitrate_max);
return -EINVAL;
}
memcpy(&priv->bittiming, &bt, sizeof(bt));
if (priv->do_set_bittiming) {
/* Finally, set the bit-timing registers */
err = priv->do_set_bittiming(dev);
if (err)
return err;
}
}
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm;
u32 ctrlstatic;
u32 maskedflags;
/* Do not allow changing controller mode while running */
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
ctrlstatic = can_get_static_ctrlmode(priv);
maskedflags = cm->flags & cm->mask;
/* check whether provided bits are allowed to be passed */
if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
return -EOPNOTSUPP;
/* do not check for static fd-non-iso if 'fd' is disabled */
if (!(maskedflags & CAN_CTRLMODE_FD))
ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
/* make sure static options are provided by configuration */
if ((maskedflags & ctrlstatic) != ctrlstatic)
return -EOPNOTSUPP;
/* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= maskedflags;
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
if (priv->ctrlmode & CAN_CTRLMODE_FD) {
dev->mtu = CANFD_MTU;
} else {
dev->mtu = CAN_MTU;
memset(&priv->data_bittiming, 0,
sizeof(priv->data_bittiming));
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
memset(&priv->tdc, 0, sizeof(priv->tdc));
}
tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
* exclusive: make sure to turn the other one off
*/
if (tdc_mask)
priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
}
if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
}
if (data[IFLA_CAN_RESTART]) {
/* Do not allow a restart while not running */
if (!(dev->flags & IFF_UP))
return -EINVAL;
err = can_restart_now(dev);
if (err)
return err;
}
if (data[IFLA_CAN_DATA_BITTIMING]) {
struct can_bittiming dbt;
/* Do not allow changing bittiming while running */
if (dev->flags & IFF_UP)
return -EBUSY;
/* Calculate bittiming parameters based on
* data_bittiming_const if set, otherwise pass bitrate
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
!priv->data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
sizeof(dbt));
err = can_get_bittiming(dev, &dbt,
priv->data_bittiming_const,
priv->data_bitrate_const,
priv->data_bitrate_const_cnt,
extack);
if (err)
return err;
if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
NL_SET_ERR_MSG_FMT(extack,
"CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
dbt.bitrate, priv->bitrate_max);
return -EINVAL;
}
memset(&priv->tdc, 0, sizeof(priv->tdc));
if (data[IFLA_CAN_TDC]) {
/* TDC parameters are provided: use them */
err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
extack);
if (err) {
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
return err;
}
} else if (!tdc_mask) {
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
&priv->ctrlmode, priv->ctrlmode_supported);
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
*/
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
if (priv->do_set_data_bittiming) {
/* Finally, set the bit-timing registers */
err = priv->do_set_data_bittiming(dev);
if (err)
return err;
}
}
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
unsigned int i;
if (!priv->do_set_termination)
return -EOPNOTSUPP;
/* check whether given value is supported by the interface */
for (i = 0; i < num_term; i++) {
if (termval == priv->termination_const[i])
break;
}
if (i >= num_term)
return -EINVAL;
/* Finally, set the termination value */
err = priv->do_set_termination(dev, termval);
if (err)
return err;
priv->termination = termval;
}
return 0;
}
static size_t can_tdc_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
size_t size;
if (!priv->tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
if (priv->tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
if (can_tdc_is_enabled(priv)) {
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
priv->do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
if (priv->tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
return size;
}
static size_t can_ctrlmode_ext_get_size(void)
{
return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
}
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
size_t size = 0;
if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
size += nla_total_size(sizeof(struct can_bittiming));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
size += nla_total_size(sizeof(struct can_bittiming));
if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
priv->termination_const_cnt);
}
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
return size;
}
static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
struct can_tdc *tdc = &priv->tdc;
const struct can_tdc_const *tdc_const = priv->tdc_const;
if (!tdc_const)
return 0;
nest = nla_nest_start(skb, IFLA_CAN_TDC);
if (!nest)
return -EMSGSIZE;
if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
goto err_cancel;
if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max))
goto err_cancel;
if (tdc_const->tdcf_max &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
if (can_tdc_is_enabled(priv)) {
u32 tdcv;
int err = -EINVAL;
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
tdcv = tdc->tdcv;
err = 0;
} else if (priv->do_get_auto_tdcv) {
err = priv->do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco))
goto err_cancel;
if (tdc_const->tdcf_max &&
nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf))
goto err_cancel;
}
nla_nest_end(skb, nest);
return 0;
err_cancel:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
const struct can_priv *priv)
{
struct nlattr *nest;
nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT);
if (!nest)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED,
priv->ctrlmode_supported)) {
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
nla_nest_end(skb, nest);
return 0;
}
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
struct can_berr_counter bec = { };
enum can_state state = priv->state;
if (priv->do_get_state)
priv->do_get_state(dev, &state);
if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming)) ||
(priv->bittiming_const &&
nla_put(skb, IFLA_CAN_BITTIMING_CONST,
sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
(priv->do_get_berr_counter &&
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
(priv->data_bittiming.bitrate &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING,
sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
(priv->data_bittiming_const &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
sizeof(*priv->data_bittiming_const),
priv->data_bittiming_const)) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
nla_put(skb, IFLA_CAN_TERMINATION_CONST,
sizeof(*priv->termination_const) *
priv->termination_const_cnt,
priv->termination_const))) ||
(priv->bitrate_const &&
nla_put(skb, IFLA_CAN_BITRATE_CONST,
sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt,
priv->bitrate_const)) ||
(priv->data_bitrate_const &&
nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt,
priv->data_bitrate_const)) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
can_tdc_fill_info(skb, dev) ||
can_ctrlmode_ext_fill_info(skb, priv)
)
return -EMSGSIZE;
return 0;
}
static size_t can_get_xstats_size(const struct net_device *dev)
{
return sizeof(struct can_device_stats);
}
static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
if (nla_put(skb, IFLA_INFO_XSTATS,
sizeof(priv->can_stats), &priv->can_stats))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int can_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
static void can_dellink(struct net_device *dev, struct list_head *head)
{
}
struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.netns_refund = true,
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
.setup = can_setup,
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
.dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
.fill_xstats = can_fill_xstats,
};
int can_netlink_register(void)
{
return rtnl_link_register(&can_link_ops);
}
void can_netlink_unregister(void)
{
rtnl_link_unregister(&can_link_ops);
}
| linux-master | drivers/net/can/dev/netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
{
struct can_priv *priv = netdev_priv(dev);
if (new_state <= priv->state)
return;
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
priv->can_stats.error_warning++;
break;
case CAN_STATE_ERROR_PASSIVE:
priv->can_stats.error_passive++;
break;
case CAN_STATE_BUS_OFF:
priv->can_stats.bus_off++;
break;
default:
break;
}
}
static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
{
switch (state) {
case CAN_STATE_ERROR_ACTIVE:
return CAN_ERR_CRTL_ACTIVE;
case CAN_STATE_ERROR_WARNING:
return CAN_ERR_CRTL_TX_WARNING;
case CAN_STATE_ERROR_PASSIVE:
return CAN_ERR_CRTL_TX_PASSIVE;
default:
return 0;
}
}
static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
{
switch (state) {
case CAN_STATE_ERROR_ACTIVE:
return CAN_ERR_CRTL_ACTIVE;
case CAN_STATE_ERROR_WARNING:
return CAN_ERR_CRTL_RX_WARNING;
case CAN_STATE_ERROR_PASSIVE:
return CAN_ERR_CRTL_RX_PASSIVE;
default:
return 0;
}
}
const char *can_get_state_str(const enum can_state state)
{
switch (state) {
case CAN_STATE_ERROR_ACTIVE:
return "Error Active";
case CAN_STATE_ERROR_WARNING:
return "Error Warning";
case CAN_STATE_ERROR_PASSIVE:
return "Error Passive";
case CAN_STATE_BUS_OFF:
return "Bus Off";
case CAN_STATE_STOPPED:
return "Stopped";
case CAN_STATE_SLEEPING:
return "Sleeping";
default:
return "<unknown>";
}
return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state)
{
struct can_priv *priv = netdev_priv(dev);
enum can_state new_state = max(tx_state, rx_state);
if (unlikely(new_state == priv->state)) {
netdev_warn(dev, "%s: oops, state did not change", __func__);
return;
}
netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
can_get_state_str(priv->state), priv->state,
can_get_state_str(new_state), new_state);
can_update_state_error_stats(dev, new_state);
priv->state = new_state;
if (!cf)
return;
if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
cf->can_id |= CAN_ERR_BUSOFF;
return;
}
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= tx_state >= rx_state ?
can_tx_state_to_frame(dev, tx_state) : 0;
cf->data[1] |= tx_state <= rx_state ?
can_rx_state_to_frame(dev, rx_state) : 0;
}
EXPORT_SYMBOL_GPL(can_change_state);
/* CAN device restart for bus-off recovery */
static void can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
int err;
BUG_ON(netif_carrier_ok(dev));
/* No synchronization needed because the device is bus-off and
* no messages can come in or go out.
*/
can_flush_echo_skb(dev);
/* send restart message upstream */
skb = alloc_can_err_skb(dev, &cf);
if (!skb)
goto restart;
cf->can_id |= CAN_ERR_RESTARTED;
netif_rx(skb);
restart:
netdev_dbg(dev, "restarted\n");
priv->can_stats.restarts++;
/* Now restart the device */
err = priv->do_set_mode(dev, CAN_MODE_START);
netif_carrier_on(dev);
if (err)
netdev_err(dev, "Error %d during restart", err);
}
static void can_restart_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct can_priv *priv = container_of(dwork, struct can_priv,
restart_work);
can_restart(priv->dev);
}
int can_restart_now(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
/* A manual restart is only permitted if automatic restart is
* disabled and the device is in the bus-off state
*/
if (priv->restart_ms)
return -EINVAL;
if (priv->state != CAN_STATE_BUS_OFF)
return -EBUSY;
cancel_delayed_work_sync(&priv->restart_work);
can_restart(dev);
return 0;
}
/* CAN bus-off
*
* This functions should be called when the device goes bus-off to
* tell the netif layer that no more packets can be sent or received.
* If enabled, a timer is started to trigger bus-off recovery.
*/
void can_bus_off(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
if (priv->restart_ms)
netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
priv->restart_ms);
else
netdev_info(dev, "bus-off\n");
netif_carrier_off(dev);
if (priv->restart_ms)
schedule_delayed_work(&priv->restart_work,
msecs_to_jiffies(priv->restart_ms));
}
EXPORT_SYMBOL_GPL(can_bus_off);
void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
/* New-style flags. */
dev->flags = IFF_NOARP;
dev->features = NETIF_F_HW_CSUM;
}
/* Allocate and setup space for the CAN network device */
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs)
{
struct can_ml_priv *can_ml;
struct net_device *dev;
struct can_priv *priv;
int size;
/* We put the driver's priv, the CAN mid layer priv and the
* echo skb into the netdevice's priv. The memory layout for
* the netdev_priv is like this:
*
* +-------------------------+
* | driver's priv |
* +-------------------------+
* | struct can_ml_priv |
* +-------------------------+
* | array of struct sk_buff |
* +-------------------------+
*/
size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
if (echo_skb_max)
size = ALIGN(size, sizeof(struct sk_buff *)) +
echo_skb_max * sizeof(struct sk_buff *);
dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
txqs, rxqs);
if (!dev)
return NULL;
priv = netdev_priv(dev);
priv->dev = dev;
can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
can_set_ml_priv(dev, can_ml);
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;
priv->echo_skb = (void *)priv +
(size - echo_skb_max * sizeof(struct sk_buff *));
}
priv->state = CAN_STATE_STOPPED;
INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
return dev;
}
EXPORT_SYMBOL_GPL(alloc_candev_mqs);
/* Free space of the CAN network device */
void free_candev(struct net_device *dev)
{
free_netdev(dev);
}
EXPORT_SYMBOL_GPL(free_candev);
/* changing MTU and control mode for CAN/CANFD devices */
int can_change_mtu(struct net_device *dev, int new_mtu)
{
struct can_priv *priv = netdev_priv(dev);
u32 ctrlmode_static = can_get_static_ctrlmode(priv);
/* Do not allow changing the MTU while running */
if (dev->flags & IFF_UP)
return -EBUSY;
/* allow change of MTU according to the CANFD ability of the device */
switch (new_mtu) {
case CAN_MTU:
/* 'CANFD-only' controllers can not switch to CAN_MTU */
if (ctrlmode_static & CAN_CTRLMODE_FD)
return -EINVAL;
priv->ctrlmode &= ~CAN_CTRLMODE_FD;
break;
case CANFD_MTU:
/* check for potential CANFD ability */
if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
!(ctrlmode_static & CAN_CTRLMODE_FD))
return -EINVAL;
priv->ctrlmode |= CAN_CTRLMODE_FD;
break;
default:
return -EINVAL;
}
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL_GPL(can_change_mtu);
/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
* supporting hardware timestamps
*/
int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct hwtstamp_config hwts_cfg = { 0 };
switch (cmd) {
case SIOCSHWTSTAMP: /* set */
if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
return -EFAULT;
if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
return -ERANGE;
case SIOCGHWTSTAMP: /* get */
hwts_cfg.tx_type = HWTSTAMP_TX_ON;
hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
return -EFAULT;
return 0;
default:
return -EOPNOTSUPP;
}
}
EXPORT_SYMBOL(can_eth_ioctl_hwts);
/* generic implementation of ethtool_ops::get_ts_info for CAN devices
* supporting hardware timestamps
*/
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
struct ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
/* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
* driver.
*/
int open_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
if (!priv->bittiming.bitrate) {
netdev_err(dev, "bit-timing not yet defined\n");
return -EINVAL;
}
/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
(!priv->data_bittiming.bitrate ||
priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
netdev_err(dev, "incorrect/missing data bit-timing\n");
return -EINVAL;
}
/* Switch carrier on if device was stopped while in bus-off state */
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
return 0;
}
EXPORT_SYMBOL_GPL(open_candev);
#ifdef CONFIG_OF
/* Common function that can be used to understand the limitation of
* a transceiver when it provides no means to determine these limitations
* at runtime.
*/
void of_can_transceiver(struct net_device *dev)
{
struct device_node *dn;
struct can_priv *priv = netdev_priv(dev);
struct device_node *np = dev->dev.parent->of_node;
int ret;
dn = of_get_child_by_name(np, "can-transceiver");
if (!dn)
return;
ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
of_node_put(dn);
if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
}
EXPORT_SYMBOL_GPL(of_can_transceiver);
#endif
/* Common close function for cleanup before the device gets closed.
*
* This function should be called in the close function of the device
* driver.
*/
void close_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
cancel_delayed_work_sync(&priv->restart_work);
can_flush_echo_skb(dev);
}
EXPORT_SYMBOL_GPL(close_candev);
static int can_set_termination(struct net_device *ndev, u16 term)
{
struct can_priv *priv = netdev_priv(ndev);
int set;
if (term == priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED])
set = 1;
else
set = 0;
gpiod_set_value(priv->termination_gpio, set);
return 0;
}
static int can_get_termination(struct net_device *ndev)
{
struct can_priv *priv = netdev_priv(ndev);
struct device *dev = ndev->dev.parent;
struct gpio_desc *gpio;
u32 term;
int ret;
/* Disabling termination by default is the safe choice: Else if many
* bus participants enable it, no communication is possible at all.
*/
gpio = devm_gpiod_get_optional(dev, "termination", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return dev_err_probe(dev, PTR_ERR(gpio),
"Cannot get termination-gpios\n");
if (!gpio)
return 0;
ret = device_property_read_u32(dev, "termination-ohms", &term);
if (ret) {
netdev_err(ndev, "Cannot get termination-ohms: %pe\n",
ERR_PTR(ret));
return ret;
}
if (term > U16_MAX) {
netdev_err(ndev, "Invalid termination-ohms value (%u > %u)\n",
term, U16_MAX);
return -EINVAL;
}
priv->termination_const_cnt = ARRAY_SIZE(priv->termination_gpio_ohms);
priv->termination_const = priv->termination_gpio_ohms;
priv->termination_gpio = gpio;
priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_DISABLED] =
CAN_TERMINATION_DISABLED;
priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED] = term;
priv->do_set_termination = can_set_termination;
return 0;
}
static bool
can_bittiming_const_valid(const struct can_bittiming_const *btc)
{
if (!btc)
return true;
if (!btc->sjw_max)
return false;
return true;
}
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
int err;
/* Ensure termination_const, termination_const_cnt and
* do_set_termination consistency. All must be either set or
* unset.
*/
if ((!priv->termination_const != !priv->termination_const_cnt) ||
(!priv->termination_const != !priv->do_set_termination))
return -EINVAL;
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
return -EINVAL;
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
return -EINVAL;
/* We only support either fixed bit rates or bit timing const. */
if ((priv->bitrate_const || priv->data_bitrate_const) &&
(priv->bittiming_const || priv->data_bittiming_const))
return -EINVAL;
if (!can_bittiming_const_valid(priv->bittiming_const) ||
!can_bittiming_const_valid(priv->data_bittiming_const))
return -EINVAL;
if (!priv->termination_const) {
err = can_get_termination(dev);
if (err)
return err;
}
dev->rtnl_link_ops = &can_link_ops;
netif_carrier_off(dev);
return register_netdev(dev);
}
EXPORT_SYMBOL_GPL(register_candev);
/* Unregister the CAN network device */
void unregister_candev(struct net_device *dev)
{
unregister_netdev(dev);
}
EXPORT_SYMBOL_GPL(unregister_candev);
/* Test if a network device is a candev based device
* and return the can_priv* if so.
*/
struct can_priv *safe_candev_priv(struct net_device *dev)
{
if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
return NULL;
return netdev_priv(dev);
}
EXPORT_SYMBOL_GPL(safe_candev_priv);
static __init int can_dev_init(void)
{
int err;
err = can_netlink_register();
if (!err)
pr_info("CAN device driver interface\n");
return err;
}
module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
can_netlink_unregister();
}
module_exit(can_dev_exit);
MODULE_ALIAS_RTNL_LINK("can");
| linux-master | drivers/net/can/dev/dev.c |
/*
* CAN bus driver for IFI CANFD controller
*
* Copyright (C) 2016 Marek Vasut <[email protected]>
*
* Details about this controller can be found at
* http://www.ifi-pld.de/IP/CANFD/canfd.html
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/can/dev.h>
#define IFI_CANFD_STCMD 0x0
#define IFI_CANFD_STCMD_HARDRESET 0xDEADCAFD
#define IFI_CANFD_STCMD_ENABLE BIT(0)
#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
#define IFI_CANFD_STCMD_BUSOFF BIT(4)
#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
#define IFI_CANFD_STCMD_ENABLE_ISO BIT(25)
#define IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING BIT(26)
#define IFI_CANFD_STCMD_NORMAL_MODE ((u32)BIT(31))
#define IFI_CANFD_RXSTCMD 0x4
#define IFI_CANFD_RXSTCMD_REMOVE_MSG BIT(0)
#define IFI_CANFD_RXSTCMD_RESET BIT(7)
#define IFI_CANFD_RXSTCMD_EMPTY BIT(8)
#define IFI_CANFD_RXSTCMD_OVERFLOW BIT(13)
#define IFI_CANFD_TXSTCMD 0x8
#define IFI_CANFD_TXSTCMD_ADD_MSG BIT(0)
#define IFI_CANFD_TXSTCMD_HIGH_PRIO BIT(1)
#define IFI_CANFD_TXSTCMD_RESET BIT(7)
#define IFI_CANFD_TXSTCMD_EMPTY BIT(8)
#define IFI_CANFD_TXSTCMD_FULL BIT(12)
#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
#define IFI_CANFD_INTERRUPT 0xc
#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY BIT(24)
#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER BIT(25)
#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
#define IFI_CANFD_IRQMASK 0x10
#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
#define IFI_CANFD_IRQMASK_SET_TX BIT(23)
#define IFI_CANFD_IRQMASK_RXFIFO_NEMPTY BIT(24)
#define IFI_CANFD_IRQMASK_SET_RX ((u32)BIT(31))
#define IFI_CANFD_TIME 0x14
#define IFI_CANFD_FTIME 0x18
#define IFI_CANFD_TIME_TIMEB_OFF 0
#define IFI_CANFD_TIME_TIMEA_OFF 8
#define IFI_CANFD_TIME_PRESCALE_OFF 16
#define IFI_CANFD_TIME_SJW_OFF_7_9_8_8 25
#define IFI_CANFD_TIME_SJW_OFF_4_12_6_6 28
#define IFI_CANFD_TIME_SET_SJW_4_12_6_6 BIT(6)
#define IFI_CANFD_TIME_SET_TIMEB_4_12_6_6 BIT(7)
#define IFI_CANFD_TIME_SET_PRESC_4_12_6_6 BIT(14)
#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
#define IFI_CANFD_TDELAY 0x1c
#define IFI_CANFD_TDELAY_DEFAULT 0xb
#define IFI_CANFD_TDELAY_MASK 0x3fff
#define IFI_CANFD_TDELAY_ABS BIT(14)
#define IFI_CANFD_TDELAY_EN BIT(15)
#define IFI_CANFD_ERROR 0x20
#define IFI_CANFD_ERROR_TX_OFFSET 0
#define IFI_CANFD_ERROR_TX_MASK 0xff
#define IFI_CANFD_ERROR_RX_OFFSET 16
#define IFI_CANFD_ERROR_RX_MASK 0xff
#define IFI_CANFD_ERRCNT 0x24
#define IFI_CANFD_SUSPEND 0x28
#define IFI_CANFD_REPEAT 0x2c
#define IFI_CANFD_TRAFFIC 0x30
#define IFI_CANFD_TSCONTROL 0x34
#define IFI_CANFD_TSC 0x38
#define IFI_CANFD_TST 0x3c
#define IFI_CANFD_RES1 0x40
#define IFI_CANFD_ERROR_CTR 0x44
#define IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC 0x21302899
#define IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST BIT(0)
#define IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST BIT(1)
#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST BIT(2)
#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST BIT(3)
#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST BIT(4)
#define IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST BIT(5)
#define IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST BIT(6)
#define IFI_CANFD_ERROR_CTR_OVERLOAD_ALL BIT(8)
#define IFI_CANFD_ERROR_CTR_ACK_ERROR_ALL BIT(9)
#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_ALL BIT(10)
#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_ALL BIT(11)
#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_ALL BIT(12)
#define IFI_CANFD_ERROR_CTR_CRC_ERROR_ALL BIT(13)
#define IFI_CANFD_ERROR_CTR_FORM_ERROR_ALL BIT(14)
#define IFI_CANFD_ERROR_CTR_BITPOSITION_OFFSET 16
#define IFI_CANFD_ERROR_CTR_BITPOSITION_MASK 0xff
#define IFI_CANFD_ERROR_CTR_ER_RESET BIT(30)
#define IFI_CANFD_ERROR_CTR_ER_ENABLE ((u32)BIT(31))
#define IFI_CANFD_PAR 0x48
#define IFI_CANFD_CANCLOCK 0x4c
#define IFI_CANFD_SYSCLOCK 0x50
#define IFI_CANFD_VER 0x54
#define IFI_CANFD_VER_REV_MASK 0xff
#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
#define IFI_CANFD_IP_ID 0x58
#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
#define IFI_CANFD_TEST 0x5c
#define IFI_CANFD_RXFIFO_TS_63_32 0x60
#define IFI_CANFD_RXFIFO_TS_31_0 0x64
#define IFI_CANFD_RXFIFO_DLC 0x68
#define IFI_CANFD_RXFIFO_DLC_DLC_OFFSET 0
#define IFI_CANFD_RXFIFO_DLC_DLC_MASK 0xf
#define IFI_CANFD_RXFIFO_DLC_RTR BIT(4)
#define IFI_CANFD_RXFIFO_DLC_EDL BIT(5)
#define IFI_CANFD_RXFIFO_DLC_BRS BIT(6)
#define IFI_CANFD_RXFIFO_DLC_ESI BIT(7)
#define IFI_CANFD_RXFIFO_DLC_OBJ_OFFSET 8
#define IFI_CANFD_RXFIFO_DLC_OBJ_MASK 0x1ff
#define IFI_CANFD_RXFIFO_DLC_FNR_OFFSET 24
#define IFI_CANFD_RXFIFO_DLC_FNR_MASK 0xff
#define IFI_CANFD_RXFIFO_ID 0x6c
#define IFI_CANFD_RXFIFO_ID_ID_OFFSET 0
#define IFI_CANFD_RXFIFO_ID_ID_STD_MASK CAN_SFF_MASK
#define IFI_CANFD_RXFIFO_ID_ID_STD_OFFSET 0
#define IFI_CANFD_RXFIFO_ID_ID_STD_WIDTH 10
#define IFI_CANFD_RXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK
#define IFI_CANFD_RXFIFO_ID_ID_XTD_OFFSET 11
#define IFI_CANFD_RXFIFO_ID_ID_XTD_WIDTH 18
#define IFI_CANFD_RXFIFO_ID_IDE BIT(29)
#define IFI_CANFD_RXFIFO_DATA 0x70 /* 0x70..0xac */
#define IFI_CANFD_TXFIFO_SUSPEND_US 0xb0
#define IFI_CANFD_TXFIFO_REPEATCOUNT 0xb4
#define IFI_CANFD_TXFIFO_DLC 0xb8
#define IFI_CANFD_TXFIFO_DLC_DLC_OFFSET 0
#define IFI_CANFD_TXFIFO_DLC_DLC_MASK 0xf
#define IFI_CANFD_TXFIFO_DLC_RTR BIT(4)
#define IFI_CANFD_TXFIFO_DLC_EDL BIT(5)
#define IFI_CANFD_TXFIFO_DLC_BRS BIT(6)
#define IFI_CANFD_TXFIFO_DLC_FNR_OFFSET 24
#define IFI_CANFD_TXFIFO_DLC_FNR_MASK 0xff
#define IFI_CANFD_TXFIFO_ID 0xbc
#define IFI_CANFD_TXFIFO_ID_ID_OFFSET 0
#define IFI_CANFD_TXFIFO_ID_ID_STD_MASK CAN_SFF_MASK
#define IFI_CANFD_TXFIFO_ID_ID_STD_OFFSET 0
#define IFI_CANFD_TXFIFO_ID_ID_STD_WIDTH 10
#define IFI_CANFD_TXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK
#define IFI_CANFD_TXFIFO_ID_ID_XTD_OFFSET 11
#define IFI_CANFD_TXFIFO_ID_ID_XTD_WIDTH 18
#define IFI_CANFD_TXFIFO_ID_IDE BIT(29)
#define IFI_CANFD_TXFIFO_DATA 0xc0 /* 0xb0..0xfc */
#define IFI_CANFD_FILTER_MASK(n) (0x800 + ((n) * 8) + 0)
#define IFI_CANFD_FILTER_MASK_EXT BIT(29)
#define IFI_CANFD_FILTER_MASK_EDL BIT(30)
#define IFI_CANFD_FILTER_MASK_VALID ((u32)BIT(31))
#define IFI_CANFD_FILTER_IDENT(n) (0x800 + ((n) * 8) + 4)
#define IFI_CANFD_FILTER_IDENT_IDE BIT(29)
#define IFI_CANFD_FILTER_IDENT_CANFD BIT(30)
#define IFI_CANFD_FILTER_IDENT_VALID ((u32)BIT(31))
/* IFI CANFD private data structure */
struct ifi_canfd_priv {
struct can_priv can; /* must be the first member */
struct napi_struct napi;
struct net_device *ndev;
void __iomem *base;
};
static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
u32 enirq = 0;
if (enable) {
enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
IFI_CANFD_IRQMASK_ERROR_WARNING |
IFI_CANFD_IRQMASK_ERROR_BUSOFF;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
}
writel(IFI_CANFD_IRQMASK_SET_ERR |
IFI_CANFD_IRQMASK_SET_TS |
IFI_CANFD_IRQMASK_SET_TX |
IFI_CANFD_IRQMASK_SET_RX | enirq,
priv->base + IFI_CANFD_IRQMASK);
}
static void ifi_canfd_read_fifo(struct net_device *ndev)
{
struct net_device_stats *stats = &ndev->stats;
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct canfd_frame *cf;
struct sk_buff *skb;
const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
u32 rxdlc, rxid;
u32 dlc, id;
int i;
rxdlc = readl(priv->base + IFI_CANFD_RXFIFO_DLC);
if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL)
skb = alloc_canfd_skb(ndev, &cf);
else
skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
if (!skb) {
stats->rx_dropped++;
return;
}
dlc = (rxdlc >> IFI_CANFD_RXFIFO_DLC_DLC_OFFSET) &
IFI_CANFD_RXFIFO_DLC_DLC_MASK;
if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL)
cf->len = can_fd_dlc2len(dlc);
else
cf->len = can_cc_dlc2len(dlc);
rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID);
id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET);
if (id & IFI_CANFD_RXFIFO_ID_IDE) {
id &= IFI_CANFD_RXFIFO_ID_ID_XTD_MASK;
/*
* In case the Extended ID frame is received, the standard
* and extended part of the ID are swapped in the register,
* so swap them back to obtain the correct ID.
*/
id = (id >> IFI_CANFD_RXFIFO_ID_ID_XTD_OFFSET) |
((id & IFI_CANFD_RXFIFO_ID_ID_STD_MASK) <<
IFI_CANFD_RXFIFO_ID_ID_XTD_WIDTH);
id |= CAN_EFF_FLAG;
} else {
id &= IFI_CANFD_RXFIFO_ID_ID_STD_MASK;
}
cf->can_id = id;
if (rxdlc & IFI_CANFD_RXFIFO_DLC_ESI) {
cf->flags |= CANFD_ESI;
netdev_dbg(ndev, "ESI Error\n");
}
if (!(rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) &&
(rxdlc & IFI_CANFD_RXFIFO_DLC_RTR)) {
cf->can_id |= CAN_RTR_FLAG;
} else {
if (rxdlc & IFI_CANFD_RXFIFO_DLC_BRS)
cf->flags |= CANFD_BRS;
for (i = 0; i < cf->len; i += 4) {
*(u32 *)(cf->data + i) =
readl(priv->base + IFI_CANFD_RXFIFO_DATA + i);
}
stats->rx_bytes += cf->len;
}
stats->rx_packets++;
/* Remove the packet from FIFO */
writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD);
writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
netif_receive_skb(skb);
}
static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
u32 pkts = 0;
u32 rxst;
rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
if (rxst & IFI_CANFD_RXSTCMD_EMPTY) {
netdev_dbg(ndev, "No messages in RX FIFO\n");
return 0;
}
for (;;) {
if (rxst & IFI_CANFD_RXSTCMD_EMPTY)
break;
if (quota <= 0)
break;
ifi_canfd_read_fifo(ndev);
quota--;
pkts++;
rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
}
return pkts;
}
static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
{
struct net_device_stats *stats = &ndev->stats;
struct sk_buff *skb;
struct can_frame *frame;
netdev_err(ndev, "RX FIFO overflow, message(s) lost.\n");
stats->rx_errors++;
stats->rx_over_errors++;
skb = alloc_can_err_skb(ndev, &frame);
if (unlikely(!skb))
return 0;
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
netif_receive_skb(skb);
return 1;
}
static int ifi_canfd_handle_lec_err(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST |
IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST;
if (!(errctr & errmask)) /* No error happened. */
return 0;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
/* Propagate the error condition to the CAN stack. */
skb = alloc_can_err_skb(ndev, &cf);
if (unlikely(!skb))
return 0;
/* Read the error counter register and check for new errors. */
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
cf->data[2] |= CAN_ERR_PROT_BIT0;
if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
cf->data[2] |= CAN_ERR_PROT_BIT1;
if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
cf->data[2] |= CAN_ERR_PROT_STUFF;
if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
cf->data[2] |= CAN_ERR_PROT_FORM;
/* Reset the error counter, ack the IRQ and re-enable the counter. */
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
writel(IFI_CANFD_INTERRUPT_ERROR_COUNTER,
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
netif_receive_skb(skb);
return 1;
}
static int ifi_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
u32 err;
err = readl(priv->base + IFI_CANFD_ERROR);
bec->rxerr = (err >> IFI_CANFD_ERROR_RX_OFFSET) &
IFI_CANFD_ERROR_RX_MASK;
bec->txerr = (err >> IFI_CANFD_ERROR_TX_OFFSET) &
IFI_CANFD_ERROR_TX_MASK;
return 0;
}
static int ifi_canfd_handle_state_change(struct net_device *ndev,
enum can_state new_state)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
/* error active state */
priv->can.can_stats.error_warning++;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
priv->can.can_stats.error_warning++;
priv->can.state = CAN_STATE_ERROR_WARNING;
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
priv->can.can_stats.error_passive++;
priv->can.state = CAN_STATE_ERROR_PASSIVE;
break;
case CAN_STATE_BUS_OFF:
/* bus-off state */
priv->can.state = CAN_STATE_BUS_OFF;
ifi_canfd_irq_enable(ndev, 0);
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
break;
default:
break;
}
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(ndev, &cf);
if (unlikely(!skb))
return 0;
ifi_canfd_get_berr_counter(ndev, &bec);
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case CAN_STATE_BUS_OFF:
/* bus-off state */
cf->can_id |= CAN_ERR_BUSOFF;
break;
default:
break;
}
netif_receive_skb(skb);
return 1;
}
static int ifi_canfd_handle_state_errors(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
int work_done = 0;
if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
netdev_dbg(ndev, "Error, entered active state\n");
work_done += ifi_canfd_handle_state_change(ndev,
CAN_STATE_ERROR_ACTIVE);
}
if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
(priv->can.state != CAN_STATE_ERROR_WARNING)) {
netdev_dbg(ndev, "Error, entered warning state\n");
work_done += ifi_canfd_handle_state_change(ndev,
CAN_STATE_ERROR_WARNING);
}
if ((stcmd & IFI_CANFD_STCMD_ERROR_PASSIVE) &&
(priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
netdev_dbg(ndev, "Error, entered passive state\n");
work_done += ifi_canfd_handle_state_change(ndev,
CAN_STATE_ERROR_PASSIVE);
}
if ((stcmd & IFI_CANFD_STCMD_BUSOFF) &&
(priv->can.state != CAN_STATE_BUS_OFF)) {
netdev_dbg(ndev, "Error, entered bus-off state\n");
work_done += ifi_canfd_handle_state_change(ndev,
CAN_STATE_BUS_OFF);
}
return work_done;
}
static int ifi_canfd_poll(struct napi_struct *napi, int quota)
{
struct net_device *ndev = napi->dev;
struct ifi_canfd_priv *priv = netdev_priv(ndev);
u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
int work_done = 0;
/* Handle bus state changes */
work_done += ifi_canfd_handle_state_errors(ndev);
/* Handle lost messages on RX */
if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
work_done += ifi_canfd_handle_lost_msg(ndev);
/* Handle lec errors on the bus */
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
work_done += ifi_canfd_handle_lec_err(ndev);
/* Handle normal messages on RX */
if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
if (work_done < quota) {
napi_complete_done(napi, work_done);
ifi_canfd_irq_enable(ndev, 1);
}
return work_done;
}
static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
IFI_CANFD_INTERRUPT_ERROR_COUNTER |
IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
IFI_CANFD_INTERRUPT_ERROR_WARNING |
IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
u32 isr;
isr = readl(priv->base + IFI_CANFD_INTERRUPT);
/* No interrupt */
if (isr == 0)
return IRQ_NONE;
/* Clear all pending interrupts but ErrWarn */
writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
/* RX IRQ or bus warning, start NAPI */
if (isr & rx_irq_mask) {
ifi_canfd_irq_enable(ndev, 0);
napi_schedule(&priv->napi);
}
/* TX IRQ */
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
}
if (isr & tx_irq_mask)
netif_wake_queue(ndev);
return IRQ_HANDLED;
}
static const struct can_bittiming_const ifi_canfd_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 256,
.tseg2_min = 2, /* Time segment 2 = phase_seg2 */
.tseg2_max = 256,
.sjw_max = 128,
.brp_min = 2,
.brp_max = 512,
.brp_inc = 1,
};
static void ifi_canfd_set_bittiming(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
brp = bt->brp - 2;
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 2;
writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
(tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
(sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_TIME);
/* Configure data bit timing */
brp = dbt->brp - 2;
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 2;
writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
(tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
(sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
tdc &= IFI_CANFD_TDELAY_MASK;
writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
const u32 mask, const u32 ident)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
writel(mask, priv->base + IFI_CANFD_FILTER_MASK(id));
writel(ident, priv->base + IFI_CANFD_FILTER_IDENT(id));
}
static void ifi_canfd_set_filters(struct net_device *ndev)
{
/* Receive all CAN frames (standard ID) */
ifi_canfd_set_filter(ndev, 0,
IFI_CANFD_FILTER_MASK_VALID |
IFI_CANFD_FILTER_MASK_EXT,
IFI_CANFD_FILTER_IDENT_VALID);
/* Receive all CAN frames (extended ID) */
ifi_canfd_set_filter(ndev, 1,
IFI_CANFD_FILTER_MASK_VALID |
IFI_CANFD_FILTER_MASK_EXT,
IFI_CANFD_FILTER_IDENT_VALID |
IFI_CANFD_FILTER_IDENT_IDE);
/* Receive all CANFD frames */
ifi_canfd_set_filter(ndev, 2,
IFI_CANFD_FILTER_MASK_VALID |
IFI_CANFD_FILTER_MASK_EDL |
IFI_CANFD_FILTER_MASK_EXT,
IFI_CANFD_FILTER_IDENT_VALID |
IFI_CANFD_FILTER_IDENT_CANFD |
IFI_CANFD_FILTER_IDENT_IDE);
}
static void ifi_canfd_start(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
u32 stcmd;
/* Reset the IP */
writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
writel(IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING,
priv->base + IFI_CANFD_STCMD);
ifi_canfd_set_bittiming(ndev);
ifi_canfd_set_filters(ndev);
/* Reset FIFOs */
writel(IFI_CANFD_RXSTCMD_RESET, priv->base + IFI_CANFD_RXSTCMD);
writel(0, priv->base + IFI_CANFD_RXSTCMD);
writel(IFI_CANFD_TXSTCMD_RESET, priv->base + IFI_CANFD_TXSTCMD);
writel(0, priv->base + IFI_CANFD_TXSTCMD);
/* Repeat transmission until successful */
writel(0, priv->base + IFI_CANFD_REPEAT);
writel(0, priv->base + IFI_CANFD_SUSPEND);
/* Clear all pending interrupts */
writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
priv->base + IFI_CANFD_INTERRUPT);
stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE |
IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
stcmd |= IFI_CANFD_STCMD_BUSMONITOR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
stcmd |= IFI_CANFD_STCMD_LOOPBACK;
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
stcmd |= IFI_CANFD_STCMD_ENABLE_ISO;
if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
ifi_canfd_irq_enable(ndev, 1);
/* Unlock, reset and enable the error counter. */
writel(IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC,
priv->base + IFI_CANFD_ERROR_CTR);
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
/* Enable controller */
writel(stcmd, priv->base + IFI_CANFD_STCMD);
}
static void ifi_canfd_stop(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
/* Reset and disable the error counter. */
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
writel(0, priv->base + IFI_CANFD_ERROR_CTR);
/* Reset the IP */
writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
/* Mask all interrupts */
writel(~0, priv->base + IFI_CANFD_IRQMASK);
/* Clear all pending interrupts */
writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
priv->base + IFI_CANFD_INTERRUPT);
/* Set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
}
static int ifi_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
ifi_canfd_start(ndev);
netif_wake_queue(ndev);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int ifi_canfd_open(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
int ret;
ret = open_candev(ndev);
if (ret) {
netdev_err(ndev, "Failed to open CAN device\n");
return ret;
}
/* Register interrupt handler */
ret = request_irq(ndev->irq, ifi_canfd_isr, IRQF_SHARED,
ndev->name, ndev);
if (ret < 0) {
netdev_err(ndev, "Failed to request interrupt\n");
goto err_irq;
}
ifi_canfd_start(ndev);
napi_enable(&priv->napi);
netif_start_queue(ndev);
return 0;
err_irq:
close_candev(ndev);
return ret;
}
static int ifi_canfd_close(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
napi_disable(&priv->napi);
ifi_canfd_stop(ndev);
free_irq(ndev->irq, ndev);
close_candev(ndev);
return 0;
}
static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 txst, txid, txdlc;
int i;
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
/* Check if the TX buffer is full */
txst = readl(priv->base + IFI_CANFD_TXSTCMD);
if (txst & IFI_CANFD_TXSTCMD_FULL) {
netif_stop_queue(ndev);
netdev_err(ndev, "BUG! TX FIFO full when queue awake!\n");
return NETDEV_TX_BUSY;
}
netif_stop_queue(ndev);
if (cf->can_id & CAN_EFF_FLAG) {
txid = cf->can_id & CAN_EFF_MASK;
/*
* In case the Extended ID frame is transmitted, the
* standard and extended part of the ID are swapped
* in the register, so swap them back to send the
* correct ID.
*/
txid = (txid >> IFI_CANFD_TXFIFO_ID_ID_XTD_WIDTH) |
((txid & IFI_CANFD_TXFIFO_ID_ID_XTD_MASK) <<
IFI_CANFD_TXFIFO_ID_ID_XTD_OFFSET);
txid |= IFI_CANFD_TXFIFO_ID_IDE;
} else {
txid = cf->can_id & CAN_SFF_MASK;
}
txdlc = can_fd_len2dlc(cf->len);
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
txdlc |= IFI_CANFD_TXFIFO_DLC_EDL;
if (cf->flags & CANFD_BRS)
txdlc |= IFI_CANFD_TXFIFO_DLC_BRS;
}
if (cf->can_id & CAN_RTR_FLAG)
txdlc |= IFI_CANFD_TXFIFO_DLC_RTR;
/* message ram configuration */
writel(txid, priv->base + IFI_CANFD_TXFIFO_ID);
writel(txdlc, priv->base + IFI_CANFD_TXFIFO_DLC);
for (i = 0; i < cf->len; i += 4) {
writel(*(u32 *)(cf->data + i),
priv->base + IFI_CANFD_TXFIFO_DATA + i);
}
writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT);
writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US);
can_put_echo_skb(skb, ndev, 0, 0);
/* Start the transmission */
writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD);
return NETDEV_TX_OK;
}
static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_open = ifi_canfd_open,
.ndo_stop = ifi_canfd_close,
.ndo_start_xmit = ifi_canfd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ifi_canfd_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
static int ifi_canfd_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct ifi_canfd_priv *priv;
void __iomem *addr;
int irq, ret;
u32 id, rev;
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr))
return PTR_ERR(addr);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
id = readl(addr + IFI_CANFD_IP_ID);
if (id != IFI_CANFD_IP_ID_VALUE) {
dev_err(dev, "This block is not IFI CANFD, id=%08x\n", id);
return -EINVAL;
}
rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
return -EINVAL;
}
ndev = alloc_candev(sizeof(*priv), 1);
if (!ndev)
return -ENOMEM;
ndev->irq = irq;
ndev->flags |= IFF_ECHO; /* we support local echo */
ndev->netdev_ops = &ifi_canfd_netdev_ops;
ndev->ethtool_ops = &ifi_canfd_ethtool_ops;
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->base = addr;
netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
priv->can.bittiming_const = &ifi_canfd_bittiming_const;
priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
priv->can.do_set_mode = ifi_canfd_set_mode;
priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode = CAN_CTRLMODE_FD;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, dev);
ret = register_candev(ndev);
if (ret) {
dev_err(dev, "Failed to register (ret=%d)\n", ret);
goto err_reg;
}
dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n",
priv->base, ndev->irq, priv->can.clock.freq);
return 0;
err_reg:
free_candev(ndev);
return ret;
}
static void ifi_canfd_plat_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
unregister_candev(ndev);
platform_set_drvdata(pdev, NULL);
free_candev(ndev);
}
static const struct of_device_id ifi_canfd_of_table[] = {
{ .compatible = "ifi,canfd-1.0", .data = NULL },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ifi_canfd_of_table);
static struct platform_driver ifi_canfd_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ifi_canfd_of_table,
},
.probe = ifi_canfd_plat_probe,
.remove_new = ifi_canfd_plat_remove,
};
module_platform_driver(ifi_canfd_plat_driver);
MODULE_AUTHOR("Marek Vasut <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for IFI CANFD controller");
| linux-master | drivers/net/can/ifi_canfd/ifi_canfd.c |
// SPDX-License-Identifier: GPL-2.0+
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* References:
*
* Dave Sawyer & Phil Weeks & Frank Itkowsky,
* "DEC FDDIcontroller 700 Port Specification",
* Revision 1.1, Digital Equipment Corporation
*/
/* ------------------------------------------------------------------------- */
/* FZA configurable parameters. */
/* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */
#define FZA_RING_TX_MODE 0
/* The number of receive ring descriptors; from 2 up to 256. */
#define FZA_RING_RX_SIZE 256
/* End of FZA configurable parameters. No need to change anything below. */
/* ------------------------------------------------------------------------- */
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/fddidevice.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/tc.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/barrier.h>
#include "defza.h"
#define DRV_NAME "defza"
#define DRV_VERSION "v.1.1.4"
#define DRV_RELDATE "Oct 6 2018"
static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
MODULE_AUTHOR("Maciej W. Rozycki <[email protected]>");
MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
MODULE_LICENSE("GPL");
static int loopback;
module_param(loopback, int, 0644);
/* Ring Purger Multicast */
static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
/* Directed Beacon Multicast */
static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
/* Shorthands for MMIO accesses that we require to be strongly ordered
* WRT preceding MMIO accesses.
*/
#define readw_o readw_relaxed
#define readl_o readl_relaxed
#define writew_o writew_relaxed
#define writel_o writel_relaxed
/* Shorthands for MMIO accesses that we are happy with being weakly ordered
* WRT preceding MMIO accesses.
*/
#define readw_u readw_relaxed
#define readl_u readl_relaxed
#define readq_u readq_relaxed
#define writew_u writew_relaxed
#define writel_u writel_relaxed
#define writeq_u writeq_relaxed
static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
static inline struct sk_buff *fza_alloc_skb(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb(dev, length, GFP_KERNEL);
}
static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
{
unsigned long x, y;
x = (unsigned long)skb->data;
y = ALIGN(x, v);
skb_reserve(skb, y - x);
}
static inline void fza_reads(const void __iomem *from, void *to,
unsigned long size)
{
if (sizeof(unsigned long) == 8) {
const u64 __iomem *src = from;
const u32 __iomem *src_trail;
u64 *dst = to;
u32 *dst_trail;
for (size = (size + 3) / 4; size > 1; size -= 2)
*dst++ = readq_u(src++);
if (size) {
src_trail = (u32 __iomem *)src;
dst_trail = (u32 *)dst;
*dst_trail = readl_u(src_trail);
}
} else {
const u32 __iomem *src = from;
u32 *dst = to;
for (size = (size + 3) / 4; size; size--)
*dst++ = readl_u(src++);
}
}
static inline void fza_writes(const void *from, void __iomem *to,
unsigned long size)
{
if (sizeof(unsigned long) == 8) {
const u64 *src = from;
const u32 *src_trail;
u64 __iomem *dst = to;
u32 __iomem *dst_trail;
for (size = (size + 3) / 4; size > 1; size -= 2)
writeq_u(*src++, dst++);
if (size) {
src_trail = (u32 *)src;
dst_trail = (u32 __iomem *)dst;
writel_u(*src_trail, dst_trail);
}
} else {
const u32 *src = from;
u32 __iomem *dst = to;
for (size = (size + 3) / 4; size; size--)
writel_u(*src++, dst++);
}
}
static inline void fza_moves(const void __iomem *from, void __iomem *to,
unsigned long size)
{
if (sizeof(unsigned long) == 8) {
const u64 __iomem *src = from;
const u32 __iomem *src_trail;
u64 __iomem *dst = to;
u32 __iomem *dst_trail;
for (size = (size + 3) / 4; size > 1; size -= 2)
writeq_u(readq_u(src++), dst++);
if (size) {
src_trail = (u32 __iomem *)src;
dst_trail = (u32 __iomem *)dst;
writel_u(readl_u(src_trail), dst_trail);
}
} else {
const u32 __iomem *src = from;
u32 __iomem *dst = to;
for (size = (size + 3) / 4; size; size--)
writel_u(readl_u(src++), dst++);
}
}
static inline void fza_zeros(void __iomem *to, unsigned long size)
{
if (sizeof(unsigned long) == 8) {
u64 __iomem *dst = to;
u32 __iomem *dst_trail;
for (size = (size + 3) / 4; size > 1; size -= 2)
writeq_u(0, dst++);
if (size) {
dst_trail = (u32 __iomem *)dst;
writel_u(0, dst_trail);
}
} else {
u32 __iomem *dst = to;
for (size = (size + 3) / 4; size; size--)
writel_u(0, dst++);
}
}
static inline void fza_regs_dump(struct fza_private *fp)
{
pr_debug("%s: iomem registers:\n", fp->name);
pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset));
pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event));
pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status));
pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask));
pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a));
pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b));
}
static inline void fza_do_reset(struct fza_private *fp)
{
/* Reset the board. */
writew_o(FZA_RESET_INIT, &fp->regs->reset);
readw_o(&fp->regs->reset); /* Synchronize. */
readw_o(&fp->regs->reset); /* Read it back for a small delay. */
writew_o(FZA_RESET_CLR, &fp->regs->reset);
/* Enable all interrupt events we handle. */
writew_o(fp->int_mask, &fp->regs->int_mask);
readw_o(&fp->regs->int_mask); /* Synchronize. */
}
static inline void fza_do_shutdown(struct fza_private *fp)
{
/* Disable the driver mode. */
writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b);
/* And reset the board. */
writew_o(FZA_RESET_INIT, &fp->regs->reset);
readw_o(&fp->regs->reset); /* Synchronize. */
writew_o(FZA_RESET_CLR, &fp->regs->reset);
readw_o(&fp->regs->reset); /* Synchronize. */
}
static int fza_reset(struct fza_private *fp)
{
unsigned long flags;
uint status, state;
long t;
pr_info("%s: resetting the board...\n", fp->name);
spin_lock_irqsave(&fp->lock, flags);
fp->state_chg_flag = 0;
fza_do_reset(fp);
spin_unlock_irqrestore(&fp->lock, flags);
/* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA
* rev. C03 happily finishes in 9.7 seconds. :-) But we need to
* be on the safe side...
*/
t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
45 * HZ);
status = readw_u(&fp->regs->status);
state = FZA_STATUS_GET_STATE(status);
if (fp->state_chg_flag == 0) {
pr_err("%s: RESET timed out!, state %x\n", fp->name, state);
return -EIO;
}
if (state != FZA_STATE_UNINITIALIZED) {
pr_err("%s: RESET failed!, state %x, failure ID %x\n",
fp->name, state, FZA_STATUS_GET_TEST(status));
return -EIO;
}
pr_info("%s: OK\n", fp->name);
pr_debug("%s: RESET: %lums elapsed\n", fp->name,
(45 * HZ - t) * 1000 / HZ);
return 0;
}
static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev,
int command)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
unsigned int old_mask, new_mask;
union fza_cmd_buf __iomem *buf;
struct netdev_hw_addr *ha;
int i;
old_mask = fp->int_mask;
new_mask = old_mask & ~FZA_MASK_STATE_CHG;
writew_u(new_mask, &fp->regs->int_mask);
readw_o(&fp->regs->int_mask); /* Synchronize. */
fp->int_mask = new_mask;
buf = fp->mmio + readl_u(&ring->buffer);
if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
FZA_RING_OWN_HOST) {
pr_warn("%s: command buffer full, command: %u!\n", fp->name,
command);
return NULL;
}
switch (command) {
case FZA_RING_CMD_INIT:
writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode);
writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size);
fza_zeros(&buf->init.counters, sizeof(buf->init.counters));
break;
case FZA_RING_CMD_MODCAM:
i = 0;
fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++],
sizeof(*buf->cam.hw_addr));
fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++],
sizeof(*buf->cam.hw_addr));
netdev_for_each_mc_addr(ha, dev) {
if (i >= FZA_CMD_CAM_SIZE)
break;
fza_writes(ha->addr, &buf->cam.hw_addr[i++],
sizeof(*buf->cam.hw_addr));
}
while (i < FZA_CMD_CAM_SIZE)
fza_zeros(&buf->cam.hw_addr[i++],
sizeof(*buf->cam.hw_addr));
break;
case FZA_RING_CMD_PARAM:
writel_u(loopback, &buf->param.loop_mode);
writel_u(fp->t_max, &buf->param.t_max);
writel_u(fp->t_req, &buf->param.t_req);
writel_u(fp->tvx, &buf->param.tvx);
writel_u(fp->lem_threshold, &buf->param.lem_threshold);
fza_writes(&fp->station_id, &buf->param.station_id,
sizeof(buf->param.station_id));
/* Convert to milliseconds due to buggy firmware. */
writel_u(fp->rtoken_timeout / 12500,
&buf->param.rtoken_timeout);
writel_u(fp->ring_purger, &buf->param.ring_purger);
break;
case FZA_RING_CMD_MODPROM:
if (dev->flags & IFF_PROMISC) {
writel_u(1, &buf->modprom.llc_prom);
writel_u(1, &buf->modprom.smt_prom);
} else {
writel_u(0, &buf->modprom.llc_prom);
writel_u(0, &buf->modprom.smt_prom);
}
if (dev->flags & IFF_ALLMULTI ||
netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2)
writel_u(1, &buf->modprom.llc_multi);
else
writel_u(0, &buf->modprom.llc_multi);
writel_u(1, &buf->modprom.llc_bcast);
break;
}
/* Trigger the command. */
writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a);
fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE;
fp->int_mask = old_mask;
writew_u(fp->int_mask, &fp->regs->int_mask);
return ring;
}
static int fza_init_send(struct net_device *dev,
struct fza_cmd_init *__iomem *init)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_ring_cmd __iomem *ring;
unsigned long flags;
u32 stat;
long t;
spin_lock_irqsave(&fp->lock, flags);
fp->cmd_done_flag = 0;
ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
spin_unlock_irqrestore(&fp->lock, flags);
if (!ring)
/* This should never happen in the uninitialized state,
* so do not try to recover and just consider it fatal.
*/
return -ENOBUFS;
/* INIT may take quite a long time (160ms for my C03). */
t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
if (fp->cmd_done_flag == 0) {
pr_err("%s: INIT command timed out!, state %x\n", fp->name,
FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
return -EIO;
}
stat = readl_u(&ring->stat);
if (stat != FZA_RING_STAT_SUCCESS) {
pr_err("%s: INIT command failed!, status %02x, state %x\n",
fp->name, stat,
FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
return -EIO;
}
pr_debug("%s: INIT: %lums elapsed\n", fp->name,
(3 * HZ - t) * 1000 / HZ);
if (init)
*init = fp->mmio + readl_u(&ring->buffer);
return 0;
}
static void fza_rx_init(struct fza_private *fp)
{
int i;
/* Fill the host receive descriptor ring. */
for (i = 0; i < FZA_RING_RX_SIZE; i++) {
writel_o(0, &fp->ring_hst_rx[i].rmc);
writel_o((fp->rx_dma[i] + 0x1000) >> 9,
&fp->ring_hst_rx[i].buffer1);
writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA,
&fp->ring_hst_rx[i].buf0_own);
}
}
static void fza_set_rx_mode(struct net_device *dev)
{
fza_cmd_send(dev, FZA_RING_CMD_MODCAM);
fza_cmd_send(dev, FZA_RING_CMD_MODPROM);
}
union fza_buffer_txp {
struct fza_buffer_tx *data_ptr;
struct fza_buffer_tx __iomem *mmio_ptr;
};
static int fza_do_xmit(union fza_buffer_txp ub, int len,
struct net_device *dev, int smt)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_buffer_tx __iomem *rmc_tx_ptr;
int i, first, frag_len, left_len;
u32 own, rmc;
if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
FZA_TX_BUFFER_SIZE) < len)
return 1;
first = fp->ring_rmc_tx_index;
left_len = len;
frag_len = FZA_TX_BUFFER_SIZE;
/* First descriptor is relinquished last. */
own = FZA_RING_TX_OWN_HOST;
/* First descriptor carries frame length; we don't use cut-through. */
rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len;
do {
i = fp->ring_rmc_tx_index;
rmc_tx_ptr = &fp->buffer_tx[i];
if (left_len < FZA_TX_BUFFER_SIZE)
frag_len = left_len;
left_len -= frag_len;
/* Length must be a multiple of 4 as only word writes are
* permitted!
*/
frag_len = (frag_len + 3) & ~3;
if (smt)
fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len);
else
fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len);
if (left_len == 0)
rmc |= FZA_RING_TX_EOP; /* Mark last frag. */
writel_o(rmc, &fp->ring_rmc_tx[i].rmc);
writel_o(own, &fp->ring_rmc_tx[i].own);
ub.data_ptr++;
fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) %
fp->ring_rmc_tx_size;
/* Settings for intermediate frags. */
own = FZA_RING_TX_OWN_RMC;
rmc = 0;
} while (left_len > 0);
if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) {
netif_stop_queue(dev);
pr_debug("%s: queue stopped\n", fp->name);
}
writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own);
/* Go, go, go! */
writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a);
return 0;
}
static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len,
u32 rmc, struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_buffer_tx __iomem *smt_rx_ptr;
u32 own;
int i;
i = fp->ring_smt_rx_index;
own = readl_o(&fp->ring_smt_rx[i].own);
if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
return 1;
smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer);
/* Length must be a multiple of 4 as only word writes are permitted! */
fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3);
writel_o(rmc, &fp->ring_smt_rx[i].rmc);
writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own);
fp->ring_smt_rx_index =
(fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size;
/* Grab it! */
writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a);
return 0;
}
static void fza_tx(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
u32 own, rmc;
int i;
while (1) {
i = fp->ring_rmc_txd_index;
if (i == fp->ring_rmc_tx_index)
break;
own = readl_o(&fp->ring_rmc_tx[i].own);
if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC)
break;
rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
/* Only process the first descriptor. */
if ((rmc & FZA_RING_TX_SOP) != 0) {
if ((rmc & FZA_RING_TX_DCC_MASK) ==
FZA_RING_TX_DCC_SUCCESS) {
int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3;
/* Omit PRH. */
fp->stats.tx_packets++;
fp->stats.tx_bytes += pkt_len;
} else {
fp->stats.tx_errors++;
switch (rmc & FZA_RING_TX_DCC_MASK) {
case FZA_RING_TX_DCC_DTP_SOP:
case FZA_RING_TX_DCC_DTP:
case FZA_RING_TX_DCC_ABORT:
fp->stats.tx_aborted_errors++;
break;
case FZA_RING_TX_DCC_UNDRRUN:
fp->stats.tx_fifo_errors++;
break;
case FZA_RING_TX_DCC_PARITY:
default:
break;
}
}
}
fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) %
fp->ring_rmc_tx_size;
}
if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) {
if (fp->queue_active) {
netif_wake_queue(dev);
pr_debug("%s: queue woken\n", fp->name);
}
}
}
static inline int fza_rx_err(struct fza_private *fp,
const u32 rmc, const u8 fc)
{
int len, min_len, max_len;
len = rmc & FZA_RING_PBC_MASK;
if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) {
fp->stats.rx_errors++;
/* Check special status codes. */
if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
(FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) {
if (len >= 8190)
fp->stats.rx_length_errors++;
return 1;
}
if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
(FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) {
/* Halt the interface to trigger a reset. */
writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
readw_o(&fp->regs->control_a); /* Synchronize. */
return 1;
}
/* Check the MAC status. */
switch (rmc & FZA_RING_RX_RRR_MASK) {
case FZA_RING_RX_RRR_OK:
if ((rmc & FZA_RING_RX_CRC) != 0)
fp->stats.rx_crc_errors++;
else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 ||
(rmc & FZA_RING_RX_FSB_ERR) != 0)
fp->stats.rx_frame_errors++;
return 1;
case FZA_RING_RX_RRR_SADDR:
case FZA_RING_RX_RRR_DADDR:
case FZA_RING_RX_RRR_ABORT:
/* Halt the interface to trigger a reset. */
writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
readw_o(&fp->regs->control_a); /* Synchronize. */
return 1;
case FZA_RING_RX_RRR_LENGTH:
fp->stats.rx_frame_errors++;
return 1;
default:
return 1;
}
}
/* Packet received successfully; validate the length. */
switch (fc & FDDI_FC_K_FORMAT_MASK) {
case FDDI_FC_K_FORMAT_MANAGEMENT:
if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC)
min_len = 37;
else
min_len = 17;
break;
case FDDI_FC_K_FORMAT_LLC:
min_len = 20;
break;
default:
min_len = 17;
break;
}
max_len = 4495;
if (len < min_len || len > max_len) {
fp->stats.rx_errors++;
fp->stats.rx_length_errors++;
return 1;
}
return 0;
}
static void fza_rx(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
struct sk_buff *skb, *newskb;
struct fza_fddihdr *frame;
dma_addr_t dma, newdma;
u32 own, rmc, buf;
int i, len;
u8 fc;
while (1) {
i = fp->ring_hst_rx_index;
own = readl_o(&fp->ring_hst_rx[i].buf0_own);
if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
break;
rmc = readl_u(&fp->ring_hst_rx[i].rmc);
skb = fp->rx_skbuff[i];
dma = fp->rx_dma[i];
/* The RMC doesn't count the preamble and the starting
* delimiter. We fix it up here for a total of 3 octets.
*/
dma_rmb();
len = (rmc & FZA_RING_PBC_MASK) + 3;
frame = (struct fza_fddihdr *)skb->data;
/* We need to get at real FC. */
dma_sync_single_for_cpu(fp->bdev,
dma +
((u8 *)&frame->hdr.fc - (u8 *)frame),
sizeof(frame->hdr.fc),
DMA_FROM_DEVICE);
fc = frame->hdr.fc;
if (fza_rx_err(fp, rmc, fc))
goto err_rx;
/* We have to 512-byte-align RX buffers... */
newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511);
if (newskb) {
fza_skb_align(newskb, 512);
newdma = dma_map_single(fp->bdev, newskb->data,
FZA_RX_BUFFER_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(fp->bdev, newdma)) {
dev_kfree_skb_irq(newskb);
newskb = NULL;
}
}
if (newskb) {
int pkt_len = len - 7; /* Omit P, SD and FCS. */
int is_multi;
int rx_stat;
dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
DMA_FROM_DEVICE);
/* Queue SMT frames to the SMT receive ring. */
if ((fc & (FDDI_FC_K_CLASS_MASK |
FDDI_FC_K_FORMAT_MASK)) ==
(FDDI_FC_K_CLASS_ASYNC |
FDDI_FC_K_FORMAT_MANAGEMENT) &&
(rmc & FZA_RING_RX_DA_MASK) !=
FZA_RING_RX_DA_PROM) {
if (fza_do_recv_smt((struct fza_buffer_tx *)
skb->data, len, rmc,
dev)) {
writel_o(FZA_CONTROL_A_SMT_RX_OVFL,
&fp->regs->control_a);
}
}
is_multi = ((frame->hdr.daddr[0] & 0x01) != 0);
skb_reserve(skb, 3); /* Skip over P and SD. */
skb_put(skb, pkt_len); /* And cut off FCS. */
skb->protocol = fddi_type_trans(skb, dev);
rx_stat = netif_rx(skb);
if (rx_stat != NET_RX_DROP) {
fp->stats.rx_packets++;
fp->stats.rx_bytes += pkt_len;
if (is_multi)
fp->stats.multicast++;
} else {
fp->stats.rx_dropped++;
}
skb = newskb;
dma = newdma;
fp->rx_skbuff[i] = skb;
fp->rx_dma[i] = dma;
} else {
fp->stats.rx_dropped++;
pr_notice("%s: memory squeeze, dropping packet\n",
fp->name);
}
err_rx:
writel_o(0, &fp->ring_hst_rx[i].rmc);
buf = (dma + 0x1000) >> 9;
writel_o(buf, &fp->ring_hst_rx[i].buffer1);
buf = dma >> 9 | FZA_RING_OWN_FZA;
writel_o(buf, &fp->ring_hst_rx[i].buf0_own);
fp->ring_hst_rx_index =
(fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size;
}
}
static void fza_tx_smt(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_buffer_tx __iomem *smt_tx_ptr;
int i, len;
u32 own;
while (1) {
i = fp->ring_smt_tx_index;
own = readl_o(&fp->ring_smt_tx[i].own);
if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
break;
smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer);
len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK;
if (!netif_queue_stopped(dev)) {
if (dev_nit_active(dev)) {
struct fza_buffer_tx *skb_data_ptr;
struct sk_buff *skb;
/* Length must be a multiple of 4 as only word
* reads are permitted!
*/
skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
if (!skb)
goto err_no_skb; /* Drop. */
skb_data_ptr = (struct fza_buffer_tx *)
skb->data;
fza_reads(smt_tx_ptr, skb_data_ptr,
(len + 3) & ~3);
skb->dev = dev;
skb_reserve(skb, 3); /* Skip over PRH. */
skb_put(skb, len - 3);
skb_reset_network_header(skb);
dev_queue_xmit_nit(skb, dev);
dev_kfree_skb_irq(skb);
err_no_skb:
;
}
/* Queue the frame to the RMC transmit ring. */
fza_do_xmit((union fza_buffer_txp)
{ .mmio_ptr = smt_tx_ptr },
len, dev, 1);
}
writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
fp->ring_smt_tx_index =
(fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
}
}
static void fza_uns(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
u32 own;
int i;
while (1) {
i = fp->ring_uns_index;
own = readl_o(&fp->ring_uns[i].own);
if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
break;
if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) {
fp->stats.rx_errors++;
fp->stats.rx_over_errors++;
}
writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own);
fp->ring_uns_index =
(fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE;
}
}
static void fza_tx_flush(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
u32 own;
int i;
/* Clean up the SMT TX ring. */
i = fp->ring_smt_tx_index;
do {
writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
fp->ring_smt_tx_index =
(fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
} while (i != fp->ring_smt_tx_index);
/* Clean up the RMC TX ring. */
i = fp->ring_rmc_tx_index;
do {
own = readl_o(&fp->ring_rmc_tx[i].own);
if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) {
u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
writel_u(rmc | FZA_RING_TX_DTP,
&fp->ring_rmc_tx[i].rmc);
}
fp->ring_rmc_tx_index =
(fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size;
} while (i != fp->ring_rmc_tx_index);
/* Done. */
writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a);
}
static irqreturn_t fza_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct fza_private *fp = netdev_priv(dev);
uint int_event;
/* Get interrupt events. */
int_event = readw_o(&fp->regs->int_event) & fp->int_mask;
if (int_event == 0)
return IRQ_NONE;
/* Clear the events. */
writew_u(int_event, &fp->regs->int_event);
/* Now handle the events. The order matters. */
/* Command finished interrupt. */
if ((int_event & FZA_EVENT_CMD_DONE) != 0) {
fp->irq_count_cmd_done++;
spin_lock(&fp->lock);
fp->cmd_done_flag = 1;
wake_up(&fp->cmd_done_wait);
spin_unlock(&fp->lock);
}
/* Transmit finished interrupt. */
if ((int_event & FZA_EVENT_TX_DONE) != 0) {
fp->irq_count_tx_done++;
fza_tx(dev);
}
/* Host receive interrupt. */
if ((int_event & FZA_EVENT_RX_POLL) != 0) {
fp->irq_count_rx_poll++;
fza_rx(dev);
}
/* SMT transmit interrupt. */
if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) {
fp->irq_count_smt_tx_poll++;
fza_tx_smt(dev);
}
/* Transmit ring flush request. */
if ((int_event & FZA_EVENT_FLUSH_TX) != 0) {
fp->irq_count_flush_tx++;
fza_tx_flush(dev);
}
/* Link status change interrupt. */
if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) {
uint status;
fp->irq_count_link_st_chg++;
status = readw_u(&fp->regs->status);
if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) {
netif_carrier_on(dev);
pr_info("%s: link available\n", fp->name);
} else {
netif_carrier_off(dev);
pr_info("%s: link unavailable\n", fp->name);
}
}
/* Unsolicited event interrupt. */
if ((int_event & FZA_EVENT_UNS_POLL) != 0) {
fp->irq_count_uns_poll++;
fza_uns(dev);
}
/* State change interrupt. */
if ((int_event & FZA_EVENT_STATE_CHG) != 0) {
uint status, state;
fp->irq_count_state_chg++;
status = readw_u(&fp->regs->status);
state = FZA_STATUS_GET_STATE(status);
pr_debug("%s: state change: %x\n", fp->name, state);
switch (state) {
case FZA_STATE_RESET:
break;
case FZA_STATE_UNINITIALIZED:
netif_carrier_off(dev);
del_timer_sync(&fp->reset_timer);
fp->ring_cmd_index = 0;
fp->ring_uns_index = 0;
fp->ring_rmc_tx_index = 0;
fp->ring_rmc_txd_index = 0;
fp->ring_hst_rx_index = 0;
fp->ring_smt_tx_index = 0;
fp->ring_smt_rx_index = 0;
if (fp->state > state) {
pr_info("%s: OK\n", fp->name);
fza_cmd_send(dev, FZA_RING_CMD_INIT);
}
break;
case FZA_STATE_INITIALIZED:
if (fp->state > state) {
fza_set_rx_mode(dev);
fza_cmd_send(dev, FZA_RING_CMD_PARAM);
}
break;
case FZA_STATE_RUNNING:
case FZA_STATE_MAINTENANCE:
fp->state = state;
fza_rx_init(fp);
fp->queue_active = 1;
netif_wake_queue(dev);
pr_debug("%s: queue woken\n", fp->name);
break;
case FZA_STATE_HALTED:
fp->queue_active = 0;
netif_stop_queue(dev);
pr_debug("%s: queue stopped\n", fp->name);
del_timer_sync(&fp->reset_timer);
pr_warn("%s: halted, reason: %x\n", fp->name,
FZA_STATUS_GET_HALT(status));
fza_regs_dump(fp);
pr_info("%s: resetting the board...\n", fp->name);
fza_do_reset(fp);
fp->timer_state = 0;
fp->reset_timer.expires = jiffies + 45 * HZ;
add_timer(&fp->reset_timer);
break;
default:
pr_warn("%s: undefined state: %x\n", fp->name, state);
break;
}
spin_lock(&fp->lock);
fp->state_chg_flag = 1;
wake_up(&fp->state_chg_wait);
spin_unlock(&fp->lock);
}
return IRQ_HANDLED;
}
static void fza_reset_timer(struct timer_list *t)
{
struct fza_private *fp = from_timer(fp, t, reset_timer);
if (!fp->timer_state) {
pr_err("%s: RESET timed out!\n", fp->name);
pr_info("%s: trying harder...\n", fp->name);
/* Assert the board reset. */
writew_o(FZA_RESET_INIT, &fp->regs->reset);
readw_o(&fp->regs->reset); /* Synchronize. */
fp->timer_state = 1;
fp->reset_timer.expires = jiffies + HZ;
} else {
/* Clear the board reset. */
writew_u(FZA_RESET_CLR, &fp->regs->reset);
/* Enable all interrupt events we handle. */
writew_o(fp->int_mask, &fp->regs->int_mask);
readw_o(&fp->regs->int_mask); /* Synchronize. */
fp->timer_state = 0;
fp->reset_timer.expires = jiffies + 45 * HZ;
}
add_timer(&fp->reset_timer);
}
static int fza_set_mac_address(struct net_device *dev, void *addr)
{
return -EOPNOTSUPP;
}
static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
unsigned int old_mask, new_mask;
int ret;
u8 fc;
skb_push(skb, 3); /* Make room for PRH. */
/* Decode FC to set PRH. */
fc = skb->data[3];
skb->data[0] = 0;
skb->data[1] = 0;
skb->data[2] = FZA_PRH2_NORMAL;
if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC)
skb->data[0] |= FZA_PRH0_FRAME_SYNC;
switch (fc & FDDI_FC_K_FORMAT_MASK) {
case FDDI_FC_K_FORMAT_MANAGEMENT:
if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) {
/* Token. */
skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
} else {
/* SMT or MAC. */
skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
}
skb->data[1] |= FZA_PRH1_CRC_NORMAL;
break;
case FDDI_FC_K_FORMAT_LLC:
case FDDI_FC_K_FORMAT_FUTURE:
skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
break;
case FDDI_FC_K_FORMAT_IMPLEMENTOR:
skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
break;
}
/* SMT transmit interrupts may sneak frames into the RMC
* transmit ring. We disable them while queueing a frame
* to maintain consistency.
*/
old_mask = fp->int_mask;
new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL;
writew_u(new_mask, &fp->regs->int_mask);
readw_o(&fp->regs->int_mask); /* Synchronize. */
fp->int_mask = new_mask;
ret = fza_do_xmit((union fza_buffer_txp)
{ .data_ptr = (struct fza_buffer_tx *)skb->data },
skb->len, dev, 0);
fp->int_mask = old_mask;
writew_u(fp->int_mask, &fp->regs->int_mask);
if (ret) {
/* Probably an SMT packet filled the remaining space,
* so just stop the queue, but don't report it as an error.
*/
netif_stop_queue(dev);
pr_debug("%s: queue stopped\n", fp->name);
fp->stats.tx_dropped++;
}
dev_kfree_skb(skb);
return ret;
}
static int fza_open(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
struct fza_ring_cmd __iomem *ring;
struct sk_buff *skb;
unsigned long flags;
dma_addr_t dma;
int ret, i;
u32 stat;
long t;
for (i = 0; i < FZA_RING_RX_SIZE; i++) {
/* We have to 512-byte-align RX buffers... */
skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
if (skb) {
fza_skb_align(skb, 512);
dma = dma_map_single(fp->bdev, skb->data,
FZA_RX_BUFFER_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(fp->bdev, dma)) {
dev_kfree_skb(skb);
skb = NULL;
}
}
if (!skb) {
for (--i; i >= 0; i--) {
dma_unmap_single(fp->bdev, fp->rx_dma[i],
FZA_RX_BUFFER_SIZE,
DMA_FROM_DEVICE);
dev_kfree_skb(fp->rx_skbuff[i]);
fp->rx_dma[i] = 0;
fp->rx_skbuff[i] = NULL;
}
return -ENOMEM;
}
fp->rx_skbuff[i] = skb;
fp->rx_dma[i] = dma;
}
ret = fza_init_send(dev, NULL);
if (ret != 0)
return ret;
/* Purger and Beacon multicasts need to be supplied before PARAM. */
fza_set_rx_mode(dev);
spin_lock_irqsave(&fp->lock, flags);
fp->cmd_done_flag = 0;
ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
spin_unlock_irqrestore(&fp->lock, flags);
if (!ring)
return -ENOBUFS;
t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
if (fp->cmd_done_flag == 0) {
pr_err("%s: PARAM command timed out!, state %x\n", fp->name,
FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
return -EIO;
}
stat = readl_u(&ring->stat);
if (stat != FZA_RING_STAT_SUCCESS) {
pr_err("%s: PARAM command failed!, status %02x, state %x\n",
fp->name, stat,
FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
return -EIO;
}
pr_debug("%s: PARAM: %lums elapsed\n", fp->name,
(3 * HZ - t) * 1000 / HZ);
return 0;
}
static int fza_close(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
unsigned long flags;
uint state;
long t;
int i;
netif_stop_queue(dev);
pr_debug("%s: queue stopped\n", fp->name);
del_timer_sync(&fp->reset_timer);
spin_lock_irqsave(&fp->lock, flags);
fp->state = FZA_STATE_UNINITIALIZED;
fp->state_chg_flag = 0;
/* Shut the interface down. */
writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a);
readw_o(&fp->regs->control_a); /* Synchronize. */
spin_unlock_irqrestore(&fp->lock, flags);
/* DEC says SHUT needs up to 10 seconds to complete. */
t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
15 * HZ);
state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status));
if (fp->state_chg_flag == 0) {
pr_err("%s: SHUT timed out!, state %x\n", fp->name, state);
return -EIO;
}
if (state != FZA_STATE_UNINITIALIZED) {
pr_err("%s: SHUT failed!, state %x\n", fp->name, state);
return -EIO;
}
pr_debug("%s: SHUT: %lums elapsed\n", fp->name,
(15 * HZ - t) * 1000 / HZ);
for (i = 0; i < FZA_RING_RX_SIZE; i++)
if (fp->rx_skbuff[i]) {
dma_unmap_single(fp->bdev, fp->rx_dma[i],
FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(fp->rx_skbuff[i]);
fp->rx_dma[i] = 0;
fp->rx_skbuff[i] = NULL;
}
return 0;
}
static struct net_device_stats *fza_get_stats(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
return &fp->stats;
}
static int fza_probe(struct device *bdev)
{
static const struct net_device_ops netdev_ops = {
.ndo_open = fza_open,
.ndo_stop = fza_close,
.ndo_start_xmit = fza_start_xmit,
.ndo_set_rx_mode = fza_set_rx_mode,
.ndo_set_mac_address = fza_set_mac_address,
.ndo_get_stats = fza_get_stats,
};
static int version_printed;
char rom_rev[4], fw_rev[4], rmc_rev[4];
struct tc_dev *tdev = to_tc_dev(bdev);
struct fza_cmd_init __iomem *init;
resource_size_t start, len;
struct net_device *dev;
struct fza_private *fp;
uint smt_ver, pmd_type;
void __iomem *mmio;
uint hw_addr[2];
int ret, i;
if (!version_printed) {
pr_info("%s", version);
version_printed = 1;
}
dev = alloc_fddidev(sizeof(*fp));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, bdev);
fp = netdev_priv(dev);
dev_set_drvdata(bdev, dev);
fp->bdev = bdev;
fp->name = dev_name(bdev);
/* Request the I/O MEM resource. */
start = tdev->resource.start;
len = tdev->resource.end - start + 1;
if (!request_mem_region(start, len, dev_name(bdev))) {
pr_err("%s: cannot reserve MMIO region\n", fp->name);
ret = -EBUSY;
goto err_out_kfree;
}
/* MMIO mapping setup. */
mmio = ioremap(start, len);
if (!mmio) {
pr_err("%s: cannot map MMIO\n", fp->name);
ret = -ENOMEM;
goto err_out_resource;
}
/* Initialize the new device structure. */
switch (loopback) {
case FZA_LOOP_NORMAL:
case FZA_LOOP_INTERN:
case FZA_LOOP_EXTERN:
break;
default:
loopback = FZA_LOOP_NORMAL;
}
fp->mmio = mmio;
dev->irq = tdev->interrupt;
pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
fp->name, (long long)tdev->resource.start, dev->irq);
pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio);
fp->regs = mmio + FZA_REG_BASE;
fp->ring_cmd = mmio + FZA_RING_CMD;
fp->ring_uns = mmio + FZA_RING_UNS;
init_waitqueue_head(&fp->state_chg_wait);
init_waitqueue_head(&fp->cmd_done_wait);
spin_lock_init(&fp->lock);
fp->int_mask = FZA_MASK_NORMAL;
timer_setup(&fp->reset_timer, fza_reset_timer, 0);
/* Sanitize the board. */
fza_regs_dump(fp);
fza_do_shutdown(fp);
ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev);
if (ret != 0) {
pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq);
goto err_out_map;
}
/* Enable the driver mode. */
writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b);
/* For some reason transmit done interrupts can trigger during
* reset. This avoids a division error in the handler.
*/
fp->ring_rmc_tx_size = FZA_RING_TX_SIZE;
ret = fza_reset(fp);
if (ret != 0)
goto err_out_irq;
ret = fza_init_send(dev, &init);
if (ret != 0)
goto err_out_irq;
fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
dev_addr_set(dev, (u8 *)&hw_addr);
fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev));
for (i = 3; i >= 0 && rom_rev[i] == ' '; i--)
rom_rev[i] = 0;
for (i = 3; i >= 0 && fw_rev[i] == ' '; i--)
fw_rev[i] = 0;
for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--)
rmc_rev[i] = 0;
fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx);
fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size);
fp->ring_hst_rx = mmio + readl_u(&init->hst_rx);
fp->ring_hst_rx_size = readl_u(&init->hst_rx_size);
fp->ring_smt_tx = mmio + readl_u(&init->smt_tx);
fp->ring_smt_tx_size = readl_u(&init->smt_tx_size);
fp->ring_smt_rx = mmio + readl_u(&init->smt_rx);
fp->ring_smt_rx_size = readl_u(&init->smt_rx_size);
fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx));
fp->t_max = readl_u(&init->def_t_max);
fp->t_req = readl_u(&init->def_t_req);
fp->tvx = readl_u(&init->def_tvx);
fp->lem_threshold = readl_u(&init->lem_threshold);
fza_reads(&init->def_station_id, &fp->station_id,
sizeof(fp->station_id));
fp->rtoken_timeout = readl_u(&init->rtoken_timeout);
fp->ring_purger = readl_u(&init->ring_purger);
smt_ver = readl_u(&init->smt_ver);
pmd_type = readl_u(&init->pmd_type);
pr_debug("%s: INIT parameters:\n", fp->name);
pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode));
pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size));
pr_debug(" rmc_rev: %.4s\n", rmc_rev);
pr_debug(" rom_rev: %.4s\n", rom_rev);
pr_debug(" fw_rev: %.4s\n", fw_rev);
pr_debug(" mop_type: %u\n", readl_u(&init->mop_type));
pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx));
pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx));
pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size));
pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx));
pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size));
pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx));
pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size));
/* TC systems are always LE, so don't bother swapping. */
pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
(readl_u(&init->hw_addr[0]) >> 0) & 0xff,
(readl_u(&init->hw_addr[0]) >> 8) & 0xff,
(readl_u(&init->hw_addr[0]) >> 16) & 0xff,
(readl_u(&init->hw_addr[0]) >> 24) & 0xff,
(readl_u(&init->hw_addr[1]) >> 0) & 0xff,
(readl_u(&init->hw_addr[1]) >> 8) & 0xff,
(readl_u(&init->hw_addr[1]) >> 16) & 0xff,
(readl_u(&init->hw_addr[1]) >> 24) & 0xff);
pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req));
pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx));
pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max));
pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold));
/* Don't bother swapping, see above. */
pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
(readl_u(&init->def_station_id[0]) >> 0) & 0xff,
(readl_u(&init->def_station_id[0]) >> 8) & 0xff,
(readl_u(&init->def_station_id[0]) >> 16) & 0xff,
(readl_u(&init->def_station_id[0]) >> 24) & 0xff,
(readl_u(&init->def_station_id[1]) >> 0) & 0xff,
(readl_u(&init->def_station_id[1]) >> 8) & 0xff,
(readl_u(&init->def_station_id[1]) >> 16) & 0xff,
(readl_u(&init->def_station_id[1]) >> 24) & 0xff);
pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt));
pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver));
pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout));
pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger));
pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max));
pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min));
pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type));
pr_info("%s: model %s, address %pMF\n",
fp->name,
pmd_type == FZA_PMD_TYPE_TW ?
"700-C (DEFZA-CA), ThinWire PMD selected" :
pmd_type == FZA_PMD_TYPE_STP ?
"700-C (DEFZA-CA), STP PMD selected" :
"700 (DEFZA-AA), MMF PMD",
dev->dev_addr);
pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
"SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver);
/* Now that we fetched initial parameters just shut the interface
* until opened.
*/
ret = fza_close(dev);
if (ret != 0)
goto err_out_irq;
/* The FZA-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
ret = register_netdev(dev);
if (ret != 0)
goto err_out_irq;
pr_info("%s: registered as %s\n", fp->name, dev->name);
fp->name = (const char *)dev->name;
get_device(bdev);
return 0;
err_out_irq:
del_timer_sync(&fp->reset_timer);
fza_do_shutdown(fp);
free_irq(dev->irq, dev);
err_out_map:
iounmap(mmio);
err_out_resource:
release_mem_region(start, len);
err_out_kfree:
pr_err("%s: initialization failure, aborting!\n", fp->name);
free_netdev(dev);
return ret;
}
static int fza_remove(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
struct fza_private *fp = netdev_priv(dev);
struct tc_dev *tdev = to_tc_dev(bdev);
resource_size_t start, len;
put_device(bdev);
unregister_netdev(dev);
del_timer_sync(&fp->reset_timer);
fza_do_shutdown(fp);
free_irq(dev->irq, dev);
iounmap(fp->mmio);
start = tdev->resource.start;
len = tdev->resource.end - start + 1;
release_mem_region(start, len);
free_netdev(dev);
return 0;
}
static struct tc_device_id const fza_tc_table[] = {
{ "DEC ", "PMAF-AA " },
{ }
};
MODULE_DEVICE_TABLE(tc, fza_tc_table);
static struct tc_driver fza_driver = {
.id_table = fza_tc_table,
.driver = {
.name = "defza",
.bus = &tc_bus_type,
.probe = fza_probe,
.remove = fza_remove,
},
};
static int fza_init(void)
{
return tc_register_driver(&fza_driver);
}
static void fza_exit(void)
{
tc_unregister_driver(&fza_driver);
}
module_init(fza_init);
module_exit(fza_exit);
| linux-master | drivers/net/fddi/defza.c |
/*
* File Name:
* defxx.c
*
* Copyright Information:
* Copyright Digital Equipment Corporation 1996.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License, incorporated herein by reference.
*
* Abstract:
* A Linux device driver supporting the Digital Equipment Corporation
* FDDI TURBOchannel, EISA and PCI controller families. Supported
* adapters include:
*
* DEC FDDIcontroller/TURBOchannel (DEFTA)
* DEC FDDIcontroller/EISA (DEFEA)
* DEC FDDIcontroller/PCI (DEFPA)
*
* The original author:
* LVS Lawrence V. Stefani <[email protected]>
*
* Maintainers:
* macro Maciej W. Rozycki <[email protected]>
*
* Credits:
* I'd like to thank Patricia Cross for helping me get started with
* Linux, David Davies for a lot of help upgrading and configuring
* my development system and for answering many OS and driver
* development questions, and Alan Cox for recommendations and
* integration help on getting FDDI support into Linux. LVS
*
* Driver Architecture:
* The driver architecture is largely based on previous driver work
* for other operating systems. The upper edge interface and
* functions were largely taken from existing Linux device drivers
* such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
* driver.
*
* Adapter Probe -
* The driver scans for supported EISA adapters by reading the
* SLOT ID register for each EISA slot and making a match
* against the expected value.
*
* Bus-Specific Initialization -
* This driver currently supports both EISA and PCI controller
* families. While the custom DMA chip and FDDI logic is similar
* or identical, the bus logic is very different. After
* initialization, the only bus-specific differences is in how the
* driver enables and disables interrupts. Other than that, the
* run-time critical code behaves the same on both families.
* It's important to note that both adapter families are configured
* to I/O map, rather than memory map, the adapter registers.
*
* Driver Open/Close -
* In the driver open routine, the driver ISR (interrupt service
* routine) is registered and the adapter is brought to an
* operational state. In the driver close routine, the opposite
* occurs; the driver ISR is deregistered and the adapter is
* brought to a safe, but closed state. Users may use consecutive
* commands to bring the adapter up and down as in the following
* example:
* ifconfig fddi0 up
* ifconfig fddi0 down
* ifconfig fddi0 up
*
* Driver Shutdown -
* Apparently, there is no shutdown or halt routine support under
* Linux. This routine would be called during "reboot" or
* "shutdown" to allow the driver to place the adapter in a safe
* state before a warm reboot occurs. To be really safe, the user
* should close the adapter before shutdown (eg. ifconfig fddi0 down)
* to ensure that the adapter DMA engine is taken off-line. However,
* the current driver code anticipates this problem and always issues
* a soft reset of the adapter at the beginning of driver initialization.
* A future driver enhancement in this area may occur in 2.1.X where
* Alan indicated that a shutdown handler may be implemented.
*
* Interrupt Service Routine -
* The driver supports shared interrupts, so the ISR is registered for
* each board with the appropriate flag and the pointer to that board's
* device structure. This provides the context during interrupt
* processing to support shared interrupts and multiple boards.
*
* Interrupt enabling/disabling can occur at many levels. At the host
* end, you can disable system interrupts, or disable interrupts at the
* PIC (on Intel systems). Across the bus, both EISA and PCI adapters
* have a bus-logic chip interrupt enable/disable as well as a DMA
* controller interrupt enable/disable.
*
* The driver currently enables and disables adapter interrupts at the
* bus-logic chip and assumes that Linux will take care of clearing or
* acknowledging any host-based interrupt chips.
*
* Control Functions -
* Control functions are those used to support functions such as adding
* or deleting multicast addresses, enabling or disabling packet
* reception filters, or other custom/proprietary commands. Presently,
* the driver supports the "get statistics", "set multicast list", and
* "set mac address" functions defined by Linux. A list of possible
* enhancements include:
*
* - Custom ioctl interface for executing port interface commands
* - Custom ioctl interface for adding unicast addresses to
* adapter CAM (to support bridge functions).
* - Custom ioctl interface for supporting firmware upgrades.
*
* Hardware (port interface) Support Routines -
* The driver function names that start with "dfx_hw_" represent
* low-level port interface routines that are called frequently. They
* include issuing a DMA or port control command to the adapter,
* resetting the adapter, or reading the adapter state. Since the
* driver initialization and run-time code must make calls into the
* port interface, these routines were written to be as generic and
* usable as possible.
*
* Receive Path -
* The adapter DMA engine supports a 256 entry receive descriptor block
* of which up to 255 entries can be used at any given time. The
* architecture is a standard producer, consumer, completion model in
* which the driver "produces" receive buffers to the adapter, the
* adapter "consumes" the receive buffers by DMAing incoming packet data,
* and the driver "completes" the receive buffers by servicing the
* incoming packet, then "produces" a new buffer and starts the cycle
* again. Receive buffers can be fragmented in up to 16 fragments
* (descriptor entries). For simplicity, this driver posts
* single-fragment receive buffers of 4608 bytes, then allocates a
* sk_buff, copies the data, then reposts the buffer. To reduce CPU
* utilization, a better approach would be to pass up the receive
* buffer (no extra copy) then allocate and post a replacement buffer.
* This is a performance enhancement that should be looked into at
* some point.
*
* Transmit Path -
* Like the receive path, the adapter DMA engine supports a 256 entry
* transmit descriptor block of which up to 255 entries can be used at
* any given time. Transmit buffers can be fragmented in up to 255
* fragments (descriptor entries). This driver always posts one
* fragment per transmit packet request.
*
* The fragment contains the entire packet from FC to end of data.
* Before posting the buffer to the adapter, the driver sets a three-byte
* packet request header (PRH) which is required by the Motorola MAC chip
* used on the adapters. The PRH tells the MAC the type of token to
* receive/send, whether or not to generate and append the CRC, whether
* synchronous or asynchronous framing is used, etc. Since the PRH
* definition is not necessarily consistent across all FDDI chipsets,
* the driver, rather than the common FDDI packet handler routines,
* sets these bytes.
*
* To reduce the amount of descriptor fetches needed per transmit request,
* the driver takes advantage of the fact that there are at least three
* bytes available before the skb->data field on the outgoing transmit
* request. This is guaranteed by having fddi_setup() in net_init.c set
* dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
* header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
* bytes which we'll use to store the PRH.
*
* There's a subtle advantage to adding these pad bytes to the
* hard_header_len, it ensures that the data portion of the packet for
* an 802.2 SNAP frame is longword aligned. Other FDDI driver
* implementations may not need the extra padding and can start copying
* or DMAing directly from the FC byte which starts at skb->data. Should
* another driver implementation need ADDITIONAL padding, the net_init.c
* module should be updated and dev->hard_header_len should be increased.
* NOTE: To maintain the alignment on the data portion of the packet,
* dev->hard_header_len should always be evenly divisible by 4 and at
* least 24 bytes in size.
*
* Modification History:
* Date Name Description
* 16-Aug-96 LVS Created.
* 20-Aug-96 LVS Updated dfx_probe so that version information
* string is only displayed if 1 or more cards are
* found. Changed dfx_rcv_queue_process to copy
* 3 NULL bytes before FC to ensure that data is
* longword aligned in receive buffer.
* 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
* LLC group promiscuous mode if multicast list
* is too large. LLC individual/group promiscuous
* mode is now disabled if IFF_PROMISC flag not set.
* dfx_xmt_queue_pkt no longer checks for NULL skb
* on Alan Cox recommendation. Added node address
* override support.
* 12-Sep-96 LVS Reset current address to factory address during
* device open. Updated transmit path to post a
* single fragment which includes PRH->end of data.
* Mar 2000 AC Did various cleanups for 2.3.x
* Jun 2000 jgarzik PCI and resource alloc cleanups
* Jul 2000 tjeerd Much cleanup and some bug fixes
* Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
* Feb 2001 Skb allocation fixes
* Feb 2001 davej PCI enable cleanups.
* 04 Aug 2003 macro Converted to the DMA API.
* 14 Aug 2004 macro Fix device names reported.
* 14 Jun 2005 macro Use irqreturn_t.
* 23 Oct 2006 macro Big-endian host support.
* 14 Dec 2006 macro TURBOchannel support.
* 01 Jul 2014 macro Fixes for DMA on 64-bit hosts.
* 10 Mar 2021 macro Dynamic MMIO vs port I/O.
*/
/* Include files */
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/eisa.h>
#include <linux/errno.h>
#include <linux/fddidevice.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tc.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include "defxx.h"
/* Version information string should be updated prior to each new release! */
#define DRV_NAME "defxx"
#define DRV_VERSION "v1.12"
#define DRV_RELDATE "2021/03/10"
static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
" Lawrence V. Stefani and others\n";
#define DYNAMIC_BUFFERS 1
#define SKBUFF_RX_COPYBREAK 200
/*
* NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
* alignment for compatibility with old EISA boards.
*/
#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
#ifdef CONFIG_EISA
#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
#else
#define DFX_BUS_EISA(dev) 0
#endif
#ifdef CONFIG_TC
#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
#else
#define DFX_BUS_TC(dev) 0
#endif
#if defined(CONFIG_EISA) || defined(CONFIG_PCI)
#define dfx_use_mmio bp->mmio
#else
#define dfx_use_mmio true
#endif
/* Define module-wide (static) routines */
static void dfx_bus_init(struct net_device *dev);
static void dfx_bus_uninit(struct net_device *dev);
static void dfx_bus_config_check(DFX_board_t *bp);
static int dfx_driver_init(struct net_device *dev,
const char *print_name,
resource_size_t bar_start);
static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
static int dfx_open(struct net_device *dev);
static int dfx_close(struct net_device *dev);
static void dfx_int_pr_halt_id(DFX_board_t *bp);
static void dfx_int_type_0_process(DFX_board_t *bp);
static void dfx_int_common(struct net_device *dev);
static irqreturn_t dfx_interrupt(int irq, void *dev_id);
static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
static void dfx_ctl_set_multicast_list(struct net_device *dev);
static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
static int dfx_ctl_update_cam(DFX_board_t *bp);
static int dfx_ctl_update_filters(DFX_board_t *bp);
static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
static int dfx_hw_adap_state_rd(DFX_board_t *bp);
static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
static void dfx_rcv_queue_process(DFX_board_t *bp);
#ifdef DYNAMIC_BUFFERS
static void dfx_rcv_flush(DFX_board_t *bp);
#else
static inline void dfx_rcv_flush(DFX_board_t *bp) {}
#endif
static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
struct net_device *dev);
static int dfx_xmt_done(DFX_board_t *bp);
static void dfx_xmt_flush(DFX_board_t *bp);
/* Define module-wide (static) variables */
static struct pci_driver dfx_pci_driver;
static struct eisa_driver dfx_eisa_driver;
static struct tc_driver dfx_tc_driver;
/*
* =======================
* = dfx_port_write_long =
* = dfx_port_read_long =
* =======================
*
* Overview:
* Routines for reading and writing values from/to adapter
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
* offset - register offset from base I/O address
* data - for dfx_port_write_long, this is a value to write;
* for dfx_port_read_long, this is a pointer to store
* the read value
*
* Functional Description:
* These routines perform the correct operation to read or write
* the adapter register.
*
* EISA port block base addresses are based on the slot number in which the
* controller is installed. For example, if the EISA controller is installed
* in slot 4, the port block base address is 0x4000. If the controller is
* installed in slot 2, the port block base address is 0x2000, and so on.
* This port block can be used to access PDQ, ESIC, and DEFEA on-board
* registers using the register offsets defined in DEFXX.H.
*
* PCI port block base addresses are assigned by the PCI BIOS or system
* firmware. There is one 128 byte port block which can be accessed. It
* allows for I/O mapping of both PDQ and PFI registers using the register
* offsets defined in DEFXX.H.
*
* Return Codes:
* None
*
* Assumptions:
* bp->base is a valid base I/O address for this adapter.
* offset is a valid register offset for this adapter.
*
* Side Effects:
* Rather than produce macros for these functions, these routines
* are defined using "inline" to ensure that the compiler will
* generate inline code and not waste a procedure call and return.
* This provides all the benefits of macros, but with the
* advantage of strict data type checking.
*/
static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
{
writel(data, bp->base.mem + offset);
mb();
}
static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
{
outl(data, bp->base.port + offset);
}
static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
{
struct device __maybe_unused *bdev = bp->bus_dev;
if (dfx_use_mmio)
dfx_writel(bp, offset, data);
else
dfx_outl(bp, offset, data);
}
static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
{
mb();
*data = readl(bp->base.mem + offset);
}
static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
{
*data = inl(bp->base.port + offset);
}
static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
{
struct device __maybe_unused *bdev = bp->bus_dev;
if (dfx_use_mmio)
dfx_readl(bp, offset, data);
else
dfx_inl(bp, offset, data);
}
/*
* ================
* = dfx_get_bars =
* ================
*
* Overview:
* Retrieves the address ranges used to access control and status
* registers.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
* bar_start - pointer to store the start addresses
* bar_len - pointer to store the lengths of the areas
*
* Assumptions:
* I am sure there are some.
*
* Side Effects:
* None
*/
static void dfx_get_bars(DFX_board_t *bp,
resource_size_t *bar_start, resource_size_t *bar_len)
{
struct device *bdev = bp->bus_dev;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
if (dfx_bus_pci) {
int num = dfx_use_mmio ? 0 : 1;
bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
bar_start[2] = bar_start[1] = 0;
bar_len[2] = bar_len[1] = 0;
}
if (dfx_bus_eisa) {
unsigned long base_addr = to_eisa_device(bdev)->base_addr;
resource_size_t bar_lo;
resource_size_t bar_hi;
if (dfx_use_mmio) {
bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
bar_lo <<= 8;
bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
bar_lo <<= 8;
bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
bar_lo <<= 8;
bar_start[0] = bar_lo;
bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
bar_hi <<= 8;
bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
bar_hi <<= 8;
bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
bar_hi <<= 8;
bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
1;
} else {
bar_start[0] = base_addr;
bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
}
bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
}
if (dfx_bus_tc) {
bar_start[0] = to_tc_dev(bdev)->resource.start +
PI_TC_K_CSR_OFFSET;
bar_len[0] = PI_TC_K_CSR_LEN;
bar_start[2] = bar_start[1] = 0;
bar_len[2] = bar_len[1] = 0;
}
}
static const struct net_device_ops dfx_netdev_ops = {
.ndo_open = dfx_open,
.ndo_stop = dfx_close,
.ndo_start_xmit = dfx_xmt_queue_pkt,
.ndo_get_stats = dfx_ctl_get_stats,
.ndo_set_rx_mode = dfx_ctl_set_multicast_list,
.ndo_set_mac_address = dfx_ctl_set_mac_address,
};
static void dfx_register_res_err(const char *print_name, bool mmio,
unsigned long start, unsigned long len)
{
pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
print_name, mmio ? "MMIO" : "I/O", len, start);
}
/*
* ================
* = dfx_register =
* ================
*
* Overview:
* Initializes a supported FDDI controller
*
* Returns:
* Condition code
*
* Arguments:
* bdev - pointer to device information
*
* Functional Description:
*
* Return Codes:
* 0 - This device (fddi0, fddi1, etc) configured successfully
* -EBUSY - Failed to get resources, or dfx_driver_init failed.
*
* Assumptions:
* It compiles so it should work :-( (PCI cards do :-)
*
* Side Effects:
* Device structures for FDDI adapters (fddi0, fddi1, etc) are
* initialized and the board resources are read and stored in
* the device structure.
*/
static int dfx_register(struct device *bdev)
{
static int version_disp;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
const char *print_name = dev_name(bdev);
struct net_device *dev;
DFX_board_t *bp; /* board pointer */
resource_size_t bar_start[3] = {0}; /* pointers to ports */
resource_size_t bar_len[3] = {0}; /* resource length */
int alloc_size; /* total buffer size used */
struct resource *region;
int err = 0;
if (!version_disp) { /* display version info if adapter is found */
version_disp = 1; /* set display flag to TRUE so that */
printk(version); /* we only display this string ONCE */
}
dev = alloc_fddidev(sizeof(*bp));
if (!dev) {
printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
print_name);
return -ENOMEM;
}
/* Enable PCI device. */
if (dfx_bus_pci) {
err = pci_enable_device(to_pci_dev(bdev));
if (err) {
pr_err("%s: Cannot enable PCI device, aborting\n",
print_name);
goto err_out;
}
}
SET_NETDEV_DEV(dev, bdev);
bp = netdev_priv(dev);
bp->bus_dev = bdev;
dev_set_drvdata(bdev, dev);
bp->mmio = true;
dfx_get_bars(bp, bar_start, bar_len);
if (bar_len[0] == 0 ||
(dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
bp->mmio = false;
dfx_get_bars(bp, bar_start, bar_len);
}
if (dfx_use_mmio) {
region = request_mem_region(bar_start[0], bar_len[0],
bdev->driver->name);
if (!region && (dfx_bus_eisa || dfx_bus_pci)) {
bp->mmio = false;
dfx_get_bars(bp, bar_start, bar_len);
}
}
if (!dfx_use_mmio)
region = request_region(bar_start[0], bar_len[0],
bdev->driver->name);
if (!region) {
dfx_register_res_err(print_name, dfx_use_mmio,
bar_start[0], bar_len[0]);
err = -EBUSY;
goto err_out_disable;
}
if (bar_start[1] != 0) {
region = request_region(bar_start[1], bar_len[1],
bdev->driver->name);
if (!region) {
dfx_register_res_err(print_name, 0,
bar_start[1], bar_len[1]);
err = -EBUSY;
goto err_out_csr_region;
}
}
if (bar_start[2] != 0) {
region = request_region(bar_start[2], bar_len[2],
bdev->driver->name);
if (!region) {
dfx_register_res_err(print_name, 0,
bar_start[2], bar_len[2]);
err = -EBUSY;
goto err_out_bh_region;
}
}
/* Set up I/O base address. */
if (dfx_use_mmio) {
bp->base.mem = ioremap(bar_start[0], bar_len[0]);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
err = -ENOMEM;
goto err_out_esic_region;
}
} else {
bp->base.port = bar_start[0];
dev->base_addr = bar_start[0];
}
/* Initialize new device structure */
dev->netdev_ops = &dfx_netdev_ops;
if (dfx_bus_pci)
pci_set_master(to_pci_dev(bdev));
if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
err = -ENODEV;
goto err_out_unmap;
}
err = register_netdev(dev);
if (err)
goto err_out_kfree;
printk("%s: registered as %s\n", print_name, dev->name);
return 0;
err_out_kfree:
alloc_size = sizeof(PI_DESCR_BLOCK) +
PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
#ifndef DYNAMIC_BUFFERS
(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
if (bp->kmalloced)
dma_free_coherent(bdev, alloc_size,
bp->kmalloced, bp->kmalloced_dma);
err_out_unmap:
if (dfx_use_mmio)
iounmap(bp->base.mem);
err_out_esic_region:
if (bar_start[2] != 0)
release_region(bar_start[2], bar_len[2]);
err_out_bh_region:
if (bar_start[1] != 0)
release_region(bar_start[1], bar_len[1]);
err_out_csr_region:
if (dfx_use_mmio)
release_mem_region(bar_start[0], bar_len[0]);
else
release_region(bar_start[0], bar_len[0]);
err_out_disable:
if (dfx_bus_pci)
pci_disable_device(to_pci_dev(bdev));
err_out:
free_netdev(dev);
return err;
}
/*
* ================
* = dfx_bus_init =
* ================
*
* Overview:
* Initializes the bus-specific controller logic.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* Determine and save adapter IRQ in device table,
* then perform bus-specific logic initialization.
*
* Return Codes:
* None
*
* Assumptions:
* bp->base has already been set with the proper
* base I/O address for this device.
*
* Side Effects:
* Interrupts are enabled at the adapter bus-specific logic.
* Note: Interrupts at the DMA engine (PDQ chip) are not
* enabled yet.
*/
static void dfx_bus_init(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
u8 val;
DBG_printk("In dfx_bus_init...\n");
/* Initialize a pointer back to the net_device struct */
bp->dev = dev;
/* Initialize adapter based on bus type */
if (dfx_bus_tc)
dev->irq = to_tc_dev(bdev)->interrupt;
if (dfx_bus_eisa) {
unsigned long base_addr = to_eisa_device(bdev)->base_addr;
/* Disable the board before fiddling with the decoders. */
outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
/* Get the interrupt level from the ESIC chip. */
val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
val &= PI_CONFIG_STAT_0_M_IRQ;
val >>= PI_CONFIG_STAT_0_V_IRQ;
switch (val) {
case PI_CONFIG_STAT_0_IRQ_K_9:
dev->irq = 9;
break;
case PI_CONFIG_STAT_0_IRQ_K_10:
dev->irq = 10;
break;
case PI_CONFIG_STAT_0_IRQ_K_11:
dev->irq = 11;
break;
case PI_CONFIG_STAT_0_IRQ_K_15:
dev->irq = 15;
break;
}
/*
* Enable memory decoding (MEMCS1) and/or port decoding
* (IOCS1/IOCS0) as appropriate in Function Control
* Register. MEMCS1 or IOCS0 is used for PDQ registers,
* taking 16 32-bit words, while IOCS1 is used for the
* Burst Holdoff register, taking a single 32-bit word
* only. We use the slot-specific I/O range as per the
* ESIC spec, that is set bits 15:12 in the mask registers
* to mask them out.
*/
/* Set the decode range of the board. */
val = 0;
outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
val = PI_DEFEA_K_CSR_IO;
outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
val = PI_IO_CMP_M_SLOT;
outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
val = 0;
outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
val = PI_DEFEA_K_BURST_HOLDOFF;
outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
val = PI_IO_CMP_M_SLOT;
outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
/* Enable the decoders. */
val = PI_FUNCTION_CNTRL_M_IOCS1;
if (dfx_use_mmio)
val |= PI_FUNCTION_CNTRL_M_MEMCS1;
else
val |= PI_FUNCTION_CNTRL_M_IOCS0;
outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
/*
* Enable access to the rest of the module
* (including PDQ and packet memory).
*/
val = PI_SLOT_CNTRL_M_ENB;
outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
/*
* Map PDQ registers into memory or port space. This is
* done with a bit in the Burst Holdoff register.
*/
val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
if (dfx_use_mmio)
val |= PI_BURST_HOLDOFF_M_MEM_MAP;
else
val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
/* Enable interrupts at EISA bus interface chip (ESIC) */
val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
val |= PI_CONFIG_STAT_0_M_INT_ENB;
outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
}
if (dfx_bus_pci) {
struct pci_dev *pdev = to_pci_dev(bdev);
/* Get the interrupt level from the PCI Configuration Table */
dev->irq = pdev->irq;
/* Check Latency Timer and set if less than minimal */
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
if (val < PFI_K_LAT_TIMER_MIN) {
val = PFI_K_LAT_TIMER_DEF;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
}
/* Enable interrupts at PCI bus interface chip (PFI) */
val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
}
}
/*
* ==================
* = dfx_bus_uninit =
* ==================
*
* Overview:
* Uninitializes the bus-specific controller logic.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* Perform bus-specific logic uninitialization.
*
* Return Codes:
* None
*
* Assumptions:
* bp->base has already been set with the proper
* base I/O address for this device.
*
* Side Effects:
* Interrupts are disabled at the adapter bus-specific logic.
*/
static void dfx_bus_uninit(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
u8 val;
DBG_printk("In dfx_bus_uninit...\n");
/* Uninitialize adapter based on bus type */
if (dfx_bus_eisa) {
unsigned long base_addr = to_eisa_device(bdev)->base_addr;
/* Disable interrupts at EISA bus interface chip (ESIC) */
val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
/* Disable the board. */
outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
/* Disable memory and port decoders. */
outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
}
if (dfx_bus_pci) {
/* Disable interrupts at PCI bus interface chip (PFI) */
dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
}
}
/*
* ========================
* = dfx_bus_config_check =
* ========================
*
* Overview:
* Checks the configuration (burst size, full-duplex, etc.) If any parameters
* are illegal, then this routine will set new defaults.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
* PDQ, and all FDDI PCI controllers, all values are legal.
*
* Return Codes:
* None
*
* Assumptions:
* dfx_adap_init has NOT been called yet so burst size and other items have
* not been set.
*
* Side Effects:
* None
*/
static void dfx_bus_config_check(DFX_board_t *bp)
{
struct device __maybe_unused *bdev = bp->bus_dev;
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int status; /* return code from adapter port control call */
u32 host_data; /* LW data returned from port control call */
DBG_printk("In dfx_bus_config_check...\n");
/* Configuration check only valid for EISA adapter */
if (dfx_bus_eisa) {
/*
* First check if revision 2 EISA controller. Rev. 1 cards used
* PDQ revision B, so no workaround needed in this case. Rev. 3
* cards used PDQ revision E, so no workaround needed in this
* case, either. Only Rev. 2 cards used either Rev. D or E
* chips, so we must verify the chip revision on Rev. 2 cards.
*/
if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
/*
* Revision 2 FDDI EISA controller found,
* so let's check PDQ revision of adapter.
*/
status = dfx_hw_port_ctrl_req(bp,
PI_PCTRL_M_SUB_CMD,
PI_SUB_CMD_K_PDQ_REV_GET,
0,
&host_data);
if ((status != DFX_K_SUCCESS) || (host_data == 2))
{
/*
* Either we couldn't determine the PDQ revision, or
* we determined that it is at revision D. In either case,
* we need to implement the workaround.
*/
/* Ensure that the burst size is set to 8 longwords or less */
switch (bp->burst_size)
{
case PI_PDATA_B_DMA_BURST_SIZE_32:
case PI_PDATA_B_DMA_BURST_SIZE_16:
bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
break;
default:
break;
}
/* Ensure that full-duplex mode is not enabled */
bp->full_duplex_enb = PI_SNMP_K_FALSE;
}
}
}
}
/*
* ===================
* = dfx_driver_init =
* ===================
*
* Overview:
* Initializes remaining adapter board structure information
* and makes sure adapter is in a safe state prior to dfx_open().
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
* print_name - printable device name
*
* Functional Description:
* This function allocates additional resources such as the host memory
* blocks needed by the adapter (eg. descriptor and consumer blocks).
* Remaining bus initialization steps are also completed. The adapter
* is also reset so that it is in the DMA_UNAVAILABLE state. The OS
* must call dfx_open() to open the adapter and bring it on-line.
*
* Return Codes:
* DFX_K_SUCCESS - initialization succeeded
* DFX_K_FAILURE - initialization failed - could not allocate memory
* or read adapter MAC address
*
* Assumptions:
* Memory allocated from dma_alloc_coherent() call is physically
* contiguous, locked memory.
*
* Side Effects:
* Adapter is reset and should be in DMA_UNAVAILABLE state before
* returning from this routine.
*/
static int dfx_driver_init(struct net_device *dev, const char *print_name,
resource_size_t bar_start)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int alloc_size; /* total buffer size needed */
char *top_v, *curr_v; /* virtual addrs into memory block */
dma_addr_t top_p, curr_p; /* physical addrs into memory block */
u32 data; /* host data register value */
__le32 le32;
char *board_name = NULL;
DBG_printk("In dfx_driver_init...\n");
/* Initialize bus-specific hardware registers */
dfx_bus_init(dev);
/*
* Initialize default values for configurable parameters
*
* Note: All of these parameters are ones that a user may
* want to customize. It'd be nice to break these
* out into Space.c or someplace else that's more
* accessible/understandable than this file.
*/
bp->full_duplex_enb = PI_SNMP_K_FALSE;
bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
bp->rcv_bufs_to_post = RCV_BUFS_DEF;
/*
* Ensure that HW configuration is OK
*
* Note: Depending on the hardware revision, we may need to modify
* some of the configurable parameters to workaround hardware
* limitations. We'll perform this configuration check AFTER
* setting the parameters to their default values.
*/
dfx_bus_config_check(bp);
/* Disable PDQ interrupts first */
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
/* Read the factory MAC address from the adapter then save it */
if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
&data) != DFX_K_SUCCESS) {
printk("%s: Could not read adapter factory MAC address!\n",
print_name);
return DFX_K_FAILURE;
}
le32 = cpu_to_le32(data);
memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
&data) != DFX_K_SUCCESS) {
printk("%s: Could not read adapter factory MAC address!\n",
print_name);
return DFX_K_FAILURE;
}
le32 = cpu_to_le32(data);
memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
/*
* Set current address to factory address
*
* Note: Node address override support is handled through
* dfx_ctl_set_mac_address.
*/
dev_addr_set(dev, bp->factory_mac_addr);
if (dfx_bus_tc)
board_name = "DEFTA";
if (dfx_bus_eisa)
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
(long long)bar_start, dev->irq, dev->dev_addr);
/*
* Get memory for descriptor block, consumer block, and other buffers
* that need to be DMA read or written to by the adapter.
*/
alloc_size = sizeof(PI_DESCR_BLOCK) +
PI_CMD_REQ_K_SIZE_MAX +
PI_CMD_RSP_K_SIZE_MAX +
#ifndef DYNAMIC_BUFFERS
(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
&bp->kmalloced_dma,
GFP_ATOMIC);
if (top_v == NULL)
return DFX_K_FAILURE;
top_p = bp->kmalloced_dma; /* get physical address of buffer */
/*
* To guarantee the 8K alignment required for the descriptor block, 8K - 1
* plus the amount of memory needed was allocated. The physical address
* is now 8K aligned. By carving up the memory in a specific order,
* we'll guarantee the alignment requirements for all other structures.
*
* Note: If the assumptions change regarding the non-paged, non-cached,
* physically contiguous nature of the memory block or the address
* alignments, then we'll need to implement a different algorithm
* for allocating the needed memory.
*/
curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
curr_v = top_v + (curr_p - top_p);
/* Reserve space for descriptor block */
bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
bp->descr_block_phys = curr_p;
curr_v += sizeof(PI_DESCR_BLOCK);
curr_p += sizeof(PI_DESCR_BLOCK);
/* Reserve space for command request buffer */
bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
bp->cmd_req_phys = curr_p;
curr_v += PI_CMD_REQ_K_SIZE_MAX;
curr_p += PI_CMD_REQ_K_SIZE_MAX;
/* Reserve space for command response buffer */
bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
bp->cmd_rsp_phys = curr_p;
curr_v += PI_CMD_RSP_K_SIZE_MAX;
curr_p += PI_CMD_RSP_K_SIZE_MAX;
/* Reserve space for the LLC host receive queue buffers */
bp->rcv_block_virt = curr_v;
bp->rcv_block_phys = curr_p;
#ifndef DYNAMIC_BUFFERS
curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
#endif
/* Reserve space for the consumer block */
bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
bp->cons_block_phys = curr_p;
/* Display virtual and physical addresses if debug driver */
DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
print_name, bp->descr_block_virt, &bp->descr_block_phys);
DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
print_name, bp->cons_block_virt, &bp->cons_block_phys);
return DFX_K_SUCCESS;
}
/*
* =================
* = dfx_adap_init =
* =================
*
* Overview:
* Brings the adapter to the link avail/link unavailable state.
*
* Returns:
* Condition code
*
* Arguments:
* bp - pointer to board information
* get_buffers - non-zero if buffers to be allocated
*
* Functional Description:
* Issues the low-level firmware/hardware calls necessary to bring
* the adapter up, or to properly reset and restore adapter during
* run-time.
*
* Return Codes:
* DFX_K_SUCCESS - Adapter brought up successfully
* DFX_K_FAILURE - Adapter initialization failed
*
* Assumptions:
* bp->reset_type should be set to a valid reset type value before
* calling this routine.
*
* Side Effects:
* Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
* upon a successful return of this routine.
*/
static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
{
DBG_printk("In dfx_adap_init...\n");
/* Disable PDQ interrupts first */
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
{
printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/*
* When the PDQ is reset, some false Type 0 interrupts may be pending,
* so we'll acknowledge all Type 0 interrupts now before continuing.
*/
dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
/*
* Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
*
* Note: We only need to clear host copies of these registers. The PDQ reset
* takes care of the on-board register values.
*/
bp->cmd_req_reg.lword = 0;
bp->cmd_rsp_reg.lword = 0;
bp->rcv_xmt_reg.lword = 0;
/* Clear consumer block before going to DMA_AVAILABLE state */
memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
/* Initialize the DMA Burst Size */
if (dfx_hw_port_ctrl_req(bp,
PI_PCTRL_M_SUB_CMD,
PI_SUB_CMD_K_BURST_SIZE_SET,
bp->burst_size,
NULL) != DFX_K_SUCCESS)
{
printk("%s: Could not set adapter burst size!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/*
* Set base address of Consumer Block
*
* Assumption: 32-bit physical address of consumer block is 64 byte
* aligned. That is, bits 0-5 of the address must be zero.
*/
if (dfx_hw_port_ctrl_req(bp,
PI_PCTRL_M_CONS_BLOCK,
bp->cons_block_phys,
0,
NULL) != DFX_K_SUCCESS)
{
printk("%s: Could not set consumer block address!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/*
* Set the base address of Descriptor Block and bring adapter
* to DMA_AVAILABLE state.
*
* Note: We also set the literal and data swapping requirements
* in this command.
*
* Assumption: 32-bit physical address of descriptor block
* is 8Kbyte aligned.
*/
if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
(u32)(bp->descr_block_phys |
PI_PDATA_A_INIT_M_BSWAP_INIT),
0, NULL) != DFX_K_SUCCESS) {
printk("%s: Could not set descriptor block address!\n",
bp->dev->name);
return DFX_K_FAILURE;
}
/* Set transmit flush timeout value */
bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
bp->cmd_req_virt->char_set.item[0].item_index = 0;
bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
{
printk("%s: DMA command request failed!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/* Set the initial values for eFDXEnable and MACTReq MIB objects */
bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
{
printk("%s: DMA command request failed!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/* Initialize adapter CAM */
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
printk("%s: Adapter CAM update failed!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/* Initialize adapter filters */
if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
{
printk("%s: Adapter filters update failed!\n", bp->dev->name);
return DFX_K_FAILURE;
}
/*
* Remove any existing dynamic buffers (i.e. if the adapter is being
* reinitialized)
*/
if (get_buffers)
dfx_rcv_flush(bp);
/* Initialize receive descriptor block and produce buffers */
if (dfx_rcv_init(bp, get_buffers))
{
printk("%s: Receive buffer allocation failed\n", bp->dev->name);
if (get_buffers)
dfx_rcv_flush(bp);
return DFX_K_FAILURE;
}
/* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
{
printk("%s: Start command failed\n", bp->dev->name);
if (get_buffers)
dfx_rcv_flush(bp);
return DFX_K_FAILURE;
}
/* Initialization succeeded, reenable PDQ interrupts */
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
return DFX_K_SUCCESS;
}
/*
* ============
* = dfx_open =
* ============
*
* Overview:
* Opens the adapter
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function brings the adapter to an operational state.
*
* Return Codes:
* 0 - Adapter was successfully opened
* -EAGAIN - Could not register IRQ or adapter initialization failed
*
* Assumptions:
* This routine should only be called for a device that was
* initialized successfully.
*
* Side Effects:
* Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
* if the open is successful.
*/
static int dfx_open(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
int ret;
DBG_printk("In dfx_open...\n");
/* Register IRQ - support shared interrupts by passing device ptr */
ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
dev);
if (ret) {
printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
return ret;
}
/*
* Set current address to factory MAC address
*
* Note: We've already done this step in dfx_driver_init.
* However, it's possible that a user has set a node
* address override, then closed and reopened the
* adapter. Unless we reset the device address field
* now, we'll continue to use the existing modified
* address.
*/
dev_addr_set(dev, bp->factory_mac_addr);
/* Clear local unicast/multicast address tables and counts */
memset(bp->uc_table, 0, sizeof(bp->uc_table));
memset(bp->mc_table, 0, sizeof(bp->mc_table));
bp->uc_count = 0;
bp->mc_count = 0;
/* Disable promiscuous filter settings */
bp->ind_group_prom = PI_FSTATE_K_BLOCK;
bp->group_prom = PI_FSTATE_K_BLOCK;
spin_lock_init(&bp->lock);
/* Reset and initialize adapter */
bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
{
printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
free_irq(dev->irq, dev);
return -EAGAIN;
}
/* Set device structure info */
netif_start_queue(dev);
return 0;
}
/*
* =============
* = dfx_close =
* =============
*
* Overview:
* Closes the device/module.
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This routine closes the adapter and brings it to a safe state.
* The interrupt service routine is deregistered with the OS.
* The adapter can be opened again with another call to dfx_open().
*
* Return Codes:
* Always return 0.
*
* Assumptions:
* No further requests for this adapter are made after this routine is
* called. dfx_open() can be called to reset and reinitialize the
* adapter.
*
* Side Effects:
* Adapter should be in DMA_UNAVAILABLE state upon completion of this
* routine.
*/
static int dfx_close(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
DBG_printk("In dfx_close...\n");
/* Disable PDQ interrupts first */
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
/*
* Flush any pending transmit buffers
*
* Note: It's important that we flush the transmit buffers
* BEFORE we clear our copy of the Type 2 register.
* Otherwise, we'll have no idea how many buffers
* we need to free.
*/
dfx_xmt_flush(bp);
/*
* Clear Type 1 and Type 2 registers after adapter reset
*
* Note: Even though we're closing the adapter, it's
* possible that an interrupt will occur after
* dfx_close is called. Without some assurance to
* the contrary we want to make sure that we don't
* process receive and transmit LLC frames and update
* the Type 2 register with bad information.
*/
bp->cmd_req_reg.lword = 0;
bp->cmd_rsp_reg.lword = 0;
bp->rcv_xmt_reg.lword = 0;
/* Clear consumer block for the same reason given above */
memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
/* Release all dynamically allocate skb in the receive ring. */
dfx_rcv_flush(bp);
/* Clear device structure flags */
netif_stop_queue(dev);
/* Deregister (free) IRQ */
free_irq(dev->irq, dev);
return 0;
}
/*
* ======================
* = dfx_int_pr_halt_id =
* ======================
*
* Overview:
* Displays halt id's in string form.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Determine current halt id and display appropriate string.
*
* Return Codes:
* None
*
* Assumptions:
* None
*
* Side Effects:
* None
*/
static void dfx_int_pr_halt_id(DFX_board_t *bp)
{
PI_UINT32 port_status; /* PDQ port status register value */
PI_UINT32 halt_id; /* PDQ port status halt ID */
/* Read the latest port status */
dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
/* Display halt state transition information */
halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
switch (halt_id)
{
case PI_HALT_ID_K_SELFTEST_TIMEOUT:
printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
break;
case PI_HALT_ID_K_PARITY_ERROR:
printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
break;
case PI_HALT_ID_K_HOST_DIR_HALT:
printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
break;
case PI_HALT_ID_K_SW_FAULT:
printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
break;
case PI_HALT_ID_K_HW_FAULT:
printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
break;
case PI_HALT_ID_K_PC_TRACE:
printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
break;
case PI_HALT_ID_K_DMA_ERROR:
printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
break;
case PI_HALT_ID_K_IMAGE_CRC_ERROR:
printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
break;
case PI_HALT_ID_K_BUS_EXCEPTION:
printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
break;
default:
printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
break;
}
}
/*
* ==========================
* = dfx_int_type_0_process =
* ==========================
*
* Overview:
* Processes Type 0 interrupts.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Processes all enabled Type 0 interrupts. If the reason for the interrupt
* is a serious fault on the adapter, then an error message is displayed
* and the adapter is reset.
*
* One tricky potential timing window is the rapid succession of "link avail"
* "link unavail" state change interrupts. The acknowledgement of the Type 0
* interrupt must be done before reading the state from the Port Status
* register. This is true because a state change could occur after reading
* the data, but before acknowledging the interrupt. If this state change
* does happen, it would be lost because the driver is using the old state,
* and it will never know about the new state because it subsequently
* acknowledges the state change interrupt.
*
* INCORRECT CORRECT
* read type 0 int reasons read type 0 int reasons
* read adapter state ack type 0 interrupts
* ack type 0 interrupts read adapter state
* ... process interrupt ... ... process interrupt ...
*
* Return Codes:
* None
*
* Assumptions:
* None
*
* Side Effects:
* An adapter reset may occur if the adapter has any Type 0 error interrupts
* or if the port status indicates that the adapter is halted. The driver
* is responsible for reinitializing the adapter with the current CAM
* contents and adapter filter settings.
*/
static void dfx_int_type_0_process(DFX_board_t *bp)
{
PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
PI_UINT32 state; /* current adap state (from port status) */
/*
* Read host interrupt Type 0 register to determine which Type 0
* interrupts are pending. Immediately write it back out to clear
* those interrupts.
*/
dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
/* Check for Type 0 error interrupts */
if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
PI_TYPE_0_STAT_M_PM_PAR_ERR |
PI_TYPE_0_STAT_M_BUS_PAR_ERR))
{
/* Check for Non-Existent Memory error */
if (type_0_status & PI_TYPE_0_STAT_M_NXM)
printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
/* Check for Packet Memory Parity error */
if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
printk("%s: Packet Memory Parity Error\n", bp->dev->name);
/* Check for Host Bus Parity error */
if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
printk("%s: Host Bus Parity Error\n", bp->dev->name);
/* Reset adapter and bring it back on-line */
bp->link_available = PI_K_FALSE; /* link is no longer available */
bp->reset_type = 0; /* rerun on-board diagnostics */
printk("%s: Resetting adapter...\n", bp->dev->name);
if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
{
printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
return;
}
printk("%s: Adapter reset successful!\n", bp->dev->name);
return;
}
/* Check for transmit flush interrupt */
if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
{
/* Flush any pending xmt's and acknowledge the flush interrupt */
bp->link_available = PI_K_FALSE; /* link is no longer available */
dfx_xmt_flush(bp); /* flush any outstanding packets */
(void) dfx_hw_port_ctrl_req(bp,
PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
0,
0,
NULL);
}
/* Check for adapter state change */
if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
{
/* Get latest adapter state */
state = dfx_hw_adap_state_rd(bp); /* get adapter state */
if (state == PI_STATE_K_HALTED)
{
/*
* Adapter has transitioned to HALTED state, try to reset
* adapter to bring it back on-line. If reset fails,
* leave the adapter in the broken state.
*/
printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
dfx_int_pr_halt_id(bp); /* display halt id as string */
/* Reset adapter and bring it back on-line */
bp->link_available = PI_K_FALSE; /* link is no longer available */
bp->reset_type = 0; /* rerun on-board diagnostics */
printk("%s: Resetting adapter...\n", bp->dev->name);
if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
{
printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
return;
}
printk("%s: Adapter reset successful!\n", bp->dev->name);
}
else if (state == PI_STATE_K_LINK_AVAIL)
{
bp->link_available = PI_K_TRUE; /* set link available flag */
}
}
}
/*
* ==================
* = dfx_int_common =
* ==================
*
* Overview:
* Interrupt service routine (ISR)
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* This is the ISR which processes incoming adapter interrupts.
*
* Return Codes:
* None
*
* Assumptions:
* This routine assumes PDQ interrupts have not been disabled.
* When interrupts are disabled at the PDQ, the Port Status register
* is automatically cleared. This routine uses the Port Status
* register value to determine whether a Type 0 interrupt occurred,
* so it's important that adapter interrupts are not normally
* enabled/disabled at the PDQ.
*
* It's vital that this routine is NOT reentered for the
* same board and that the OS is not in another section of
* code (eg. dfx_xmt_queue_pkt) for the same board on a
* different thread.
*
* Side Effects:
* Pending interrupts are serviced. Depending on the type of
* interrupt, acknowledging and clearing the interrupt at the
* PDQ involves writing a register to clear the interrupt bit
* or updating completion indices.
*/
static void dfx_int_common(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
PI_UINT32 port_status; /* Port Status register */
/* Process xmt interrupts - frequent case, so always call this routine */
if(dfx_xmt_done(bp)) /* free consumed xmt packets */
netif_wake_queue(dev);
/* Process rcv interrupts - frequent case, so always call this routine */
dfx_rcv_queue_process(bp); /* service received LLC frames */
/*
* Transmit and receive producer and completion indices are updated on the
* adapter by writing to the Type 2 Producer register. Since the frequent
* case is that we'll be processing either LLC transmit or receive buffers,
* we'll optimize I/O writes by doing a single register write here.
*/
dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
/* Read PDQ Port Status register to find out which interrupts need processing */
dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
/* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
dfx_int_type_0_process(bp); /* process Type 0 interrupts */
}
/*
* =================
* = dfx_interrupt =
* =================
*
* Overview:
* Interrupt processing routine
*
* Returns:
* Whether a valid interrupt was seen.
*
* Arguments:
* irq - interrupt vector
* dev_id - pointer to device information
*
* Functional Description:
* This routine calls the interrupt processing routine for this adapter. It
* disables and reenables adapter interrupts, as appropriate. We can support
* shared interrupts since the incoming dev_id pointer provides our device
* structure context.
*
* Return Codes:
* IRQ_HANDLED - an IRQ was handled.
* IRQ_NONE - no IRQ was handled.
*
* Assumptions:
* The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
* on Intel-based systems) is done by the operating system outside this
* routine.
*
* System interrupts are enabled through this call.
*
* Side Effects:
* Interrupts are disabled, then reenabled at the adapter.
*/
static irqreturn_t dfx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
/* Service adapter interrupts */
if (dfx_bus_pci) {
u32 status;
dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
if (!(status & PFI_STATUS_M_PDQ_INT))
return IRQ_NONE;
spin_lock(&bp->lock);
/* Disable PDQ-PFI interrupts at PFI */
dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
PFI_MODE_M_DMA_ENB);
/* Call interrupt service routine for this adapter */
dfx_int_common(dev);
/* Clear PDQ interrupt status bit and reenable interrupts */
dfx_port_write_long(bp, PFI_K_REG_STATUS,
PFI_STATUS_M_PDQ_INT);
dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
(PFI_MODE_M_PDQ_INT_ENB |
PFI_MODE_M_DMA_ENB));
spin_unlock(&bp->lock);
}
if (dfx_bus_eisa) {
unsigned long base_addr = to_eisa_device(bdev)->base_addr;
u8 status;
status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
if (!(status & PI_CONFIG_STAT_0_M_PEND))
return IRQ_NONE;
spin_lock(&bp->lock);
/* Disable interrupts at the ESIC */
status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
/* Call interrupt service routine for this adapter */
dfx_int_common(dev);
/* Reenable interrupts at the ESIC */
status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
status |= PI_CONFIG_STAT_0_M_INT_ENB;
outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
spin_unlock(&bp->lock);
}
if (dfx_bus_tc) {
u32 status;
dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
PI_PSTATUS_M_XMT_DATA_PENDING |
PI_PSTATUS_M_SMT_HOST_PENDING |
PI_PSTATUS_M_UNSOL_PENDING |
PI_PSTATUS_M_CMD_RSP_PENDING |
PI_PSTATUS_M_CMD_REQ_PENDING |
PI_PSTATUS_M_TYPE_0_PENDING)))
return IRQ_NONE;
spin_lock(&bp->lock);
/* Call interrupt service routine for this adapter */
dfx_int_common(dev);
spin_unlock(&bp->lock);
}
return IRQ_HANDLED;
}
/*
* =====================
* = dfx_ctl_get_stats =
* =====================
*
* Overview:
* Get statistics for FDDI adapter
*
* Returns:
* Pointer to FDDI statistics structure
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* Gets current MIB objects from adapter, then
* returns FDDI statistics structure as defined
* in if_fddi.h.
*
* Note: Since the FDDI statistics structure is
* still new and the device structure doesn't
* have an FDDI-specific get statistics handler,
* we'll return the FDDI statistics structure as
* a pointer to an Ethernet statistics structure.
* That way, at least the first part of the statistics
* structure can be decoded properly, and it allows
* "smart" applications to perform a second cast to
* decode the FDDI-specific statistics.
*
* We'll have to pay attention to this routine as the
* device structure becomes more mature and LAN media
* independent.
*
* Return Codes:
* None
*
* Assumptions:
* None
*
* Side Effects:
* None
*/
static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
/* Fill the bp->stats structure with driver-maintained counters */
bp->stats.gen.rx_packets = bp->rcv_total_frames;
bp->stats.gen.tx_packets = bp->xmt_total_frames;
bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
bp->stats.gen.rx_errors = bp->rcv_crc_errors +
bp->rcv_frame_status_errors +
bp->rcv_length_errors;
bp->stats.gen.tx_errors = bp->xmt_length_errors;
bp->stats.gen.rx_dropped = bp->rcv_discards;
bp->stats.gen.tx_dropped = bp->xmt_discards;
bp->stats.gen.multicast = bp->rcv_multicast_frames;
bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
/* Get FDDI SMT MIB objects */
bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
return (struct net_device_stats *)&bp->stats;
/* Fill the bp->stats structure with the SMT MIB object values */
memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
/* Get FDDI counters */
bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
return (struct net_device_stats *)&bp->stats;
/* Fill the bp->stats structure with the FDDI counter values */
bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
return (struct net_device_stats *)&bp->stats;
}
/*
* ==============================
* = dfx_ctl_set_multicast_list =
* ==============================
*
* Overview:
* Enable/Disable LLC frame promiscuous mode reception
* on the adapter and/or update multicast address table.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This routine follows a fairly simple algorithm for setting the
* adapter filters and CAM:
*
* if IFF_PROMISC flag is set
* enable LLC individual/group promiscuous mode
* else
* disable LLC individual/group promiscuous mode
* if number of incoming multicast addresses >
* (CAM max size - number of unicast addresses in CAM)
* enable LLC group promiscuous mode
* set driver-maintained multicast address count to zero
* else
* disable LLC group promiscuous mode
* set driver-maintained multicast address count to incoming count
* update adapter CAM
* update adapter filters
*
* Return Codes:
* None
*
* Assumptions:
* Multicast addresses are presented in canonical (LSB) format.
*
* Side Effects:
* On-board adapter CAM and filters are updated.
*/
static void dfx_ctl_set_multicast_list(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
int i; /* used as index in for loop */
struct netdev_hw_addr *ha;
/* Enable LLC frame promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC)
bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
/* Else, update multicast address table */
else
{
bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
/*
* Check whether incoming multicast address count exceeds table size
*
* Note: The adapters utilize an on-board 64 entry CAM for
* supporting perfect filtering of multicast packets
* and bridge functions when adding unicast addresses.
* There is no hash function available. To support
* additional multicast addresses, the all multicast
* filter (LLC group promiscuous mode) must be enabled.
*
* The firmware reserves two CAM entries for SMT-related
* multicast addresses, which leaves 62 entries available.
* The following code ensures that we're not being asked
* to add more than 62 addresses to the CAM. If we are,
* the driver will enable the all multicast filter.
* Should the number of multicast addresses drop below
* the high water mark, the filter will be disabled and
* perfect filtering will be used.
*/
if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
}
else
{
bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
}
/* Copy addresses to multicast address table, then update adapter CAM */
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
ha->addr, FDDI_K_ALEN);
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
DBG_printk("%s: Could not update multicast address table!\n", dev->name);
}
else
{
DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
}
}
/* Update adapter filters */
if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
{
DBG_printk("%s: Could not update adapter filters!\n", dev->name);
}
else
{
DBG_printk("%s: Adapter filters updated!\n", dev->name);
}
}
/*
* ===========================
* = dfx_ctl_set_mac_address =
* ===========================
*
* Overview:
* Add node address override (unicast address) to adapter
* CAM and update dev_addr field in device table.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
* addr - pointer to sockaddr structure containing unicast address to add
*
* Functional Description:
* The adapter supports node address overrides by adding one or more
* unicast addresses to the adapter CAM. This is similar to adding
* multicast addresses. In this routine we'll update the driver and
* device structures with the new address, then update the adapter CAM
* to ensure that the adapter will copy and strip frames destined and
* sourced by that address.
*
* Return Codes:
* Always returns zero.
*
* Assumptions:
* The address pointed to by addr->sa_data is a valid unicast
* address and is presented in canonical (LSB) format.
*
* Side Effects:
* On-board adapter CAM is updated. On-board adapter filters
* may be updated.
*/
static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
DFX_board_t *bp = netdev_priv(dev);
/* Copy unicast address to driver-maintained structs and update count */
dev_addr_set(dev, p_sockaddr->sa_data); /* update device struct */
memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
bp->uc_count = 1;
/*
* Verify we're not exceeding the CAM size by adding unicast address
*
* Note: It's possible that before entering this routine we've
* already filled the CAM with 62 multicast addresses.
* Since we need to place the node address override into
* the CAM, we have to check to see that we're not
* exceeding the CAM size. If we are, we have to enable
* the LLC group (multicast) promiscuous mode filter as
* in dfx_ctl_set_multicast_list.
*/
if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
/* Update adapter filters */
if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
{
DBG_printk("%s: Could not update adapter filters!\n", dev->name);
}
else
{
DBG_printk("%s: Adapter filters updated!\n", dev->name);
}
}
/* Update adapter CAM with new unicast address */
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
DBG_printk("%s: Could not set new MAC address!\n", dev->name);
}
else
{
DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
}
return 0; /* always return zero */
}
/*
* ======================
* = dfx_ctl_update_cam =
* ======================
*
* Overview:
* Procedure to update adapter CAM (Content Addressable Memory)
* with desired unicast and multicast address entries.
*
* Returns:
* Condition code
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Updates adapter CAM with current contents of board structure
* unicast and multicast address tables. Since there are only 62
* free entries in CAM, this routine ensures that the command
* request buffer is not overrun.
*
* Return Codes:
* DFX_K_SUCCESS - Request succeeded
* DFX_K_FAILURE - Request failed
*
* Assumptions:
* All addresses being added (unicast and multicast) are in canonical
* order.
*
* Side Effects:
* On-board adapter CAM is updated.
*/
static int dfx_ctl_update_cam(DFX_board_t *bp)
{
int i; /* used as index */
PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
/*
* Fill in command request information
*
* Note: Even though both the unicast and multicast address
* table entries are stored as contiguous 6 byte entries,
* the firmware address filter set command expects each
* entry to be two longwords (8 bytes total). We must be
* careful to only copy the six bytes of each unicast and
* multicast table entry into each command entry. This
* is also why we must first clear the entire command
* request buffer.
*/
memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
/* Now add unicast addresses to command request buffer, if any */
for (i=0; i < (int)bp->uc_count; i++)
{
if (i < PI_CMD_ADDR_FILTER_K_SIZE)
{
memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
p_addr++; /* point to next command entry */
}
}
/* Now add multicast addresses to command request buffer, if any */
for (i=0; i < (int)bp->mc_count; i++)
{
if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
{
memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
p_addr++; /* point to next command entry */
}
}
/* Issue command to update adapter CAM, then return */
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
return DFX_K_FAILURE;
return DFX_K_SUCCESS;
}
/*
* ==========================
* = dfx_ctl_update_filters =
* ==========================
*
* Overview:
* Procedure to update adapter filters with desired
* filter settings.
*
* Returns:
* Condition code
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Enables or disables filter using current filter settings.
*
* Return Codes:
* DFX_K_SUCCESS - Request succeeded.
* DFX_K_FAILURE - Request failed.
*
* Assumptions:
* We must always pass up packets destined to the broadcast
* address (FF-FF-FF-FF-FF-FF), so we'll always keep the
* broadcast filter enabled.
*
* Side Effects:
* On-board adapter filters are updated.
*/
static int dfx_ctl_update_filters(DFX_board_t *bp)
{
int i = 0; /* used as index */
/* Fill in command request information */
bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
/* Initialize Broadcast filter - * ALWAYS ENABLED * */
bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
/* Initialize LLC Individual/Group Promiscuous filter */
bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
/* Initialize LLC Group Promiscuous filter */
bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
/* Terminate the item code list */
bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
/* Issue command to update adapter filters, then return */
if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
return DFX_K_FAILURE;
return DFX_K_SUCCESS;
}
/*
* ======================
* = dfx_hw_dma_cmd_req =
* ======================
*
* Overview:
* Sends PDQ DMA command to adapter firmware
*
* Returns:
* Condition code
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* The command request and response buffers are posted to the adapter in the manner
* described in the PDQ Port Specification:
*
* 1. Command Response Buffer is posted to adapter.
* 2. Command Request Buffer is posted to adapter.
* 3. Command Request consumer index is polled until it indicates that request
* buffer has been DMA'd to adapter.
* 4. Command Response consumer index is polled until it indicates that response
* buffer has been DMA'd from adapter.
*
* This ordering ensures that a response buffer is already available for the firmware
* to use once it's done processing the request buffer.
*
* Return Codes:
* DFX_K_SUCCESS - DMA command succeeded
* DFX_K_OUTSTATE - Adapter is NOT in proper state
* DFX_K_HW_TIMEOUT - DMA command timed out
*
* Assumptions:
* Command request buffer has already been filled with desired DMA command.
*
* Side Effects:
* None
*/
static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
{
int status; /* adapter status */
int timeout_cnt; /* used in for loops */
/* Make sure the adapter is in a state that we can issue the DMA command in */
status = dfx_hw_adap_state_rd(bp);
if ((status == PI_STATE_K_RESET) ||
(status == PI_STATE_K_HALTED) ||
(status == PI_STATE_K_DMA_UNAVAIL) ||
(status == PI_STATE_K_UPGRADE))
return DFX_K_OUTSTATE;
/* Put response buffer on the command response queue */
bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
/* Bump (and wrap) the producer index and write out to register */
bp->cmd_rsp_reg.index.prod += 1;
bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
/* Put request buffer on the command request queue */
bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
/* Bump (and wrap) the producer index and write out to register */
bp->cmd_req_reg.index.prod += 1;
bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
/*
* Here we wait for the command request consumer index to be equal
* to the producer, indicating that the adapter has DMAed the request.
*/
for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
{
if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
break;
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
return DFX_K_HW_TIMEOUT;
/* Bump (and wrap) the completion index and write out to register */
bp->cmd_req_reg.index.comp += 1;
bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
/*
* Here we wait for the command response consumer index to be equal
* to the producer, indicating that the adapter has DMAed the response.
*/
for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
{
if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
break;
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
return DFX_K_HW_TIMEOUT;
/* Bump (and wrap) the completion index and write out to register */
bp->cmd_rsp_reg.index.comp += 1;
bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
return DFX_K_SUCCESS;
}
/*
* ========================
* = dfx_hw_port_ctrl_req =
* ========================
*
* Overview:
* Sends PDQ port control command to adapter firmware
*
* Returns:
* Host data register value in host_data if ptr is not NULL
*
* Arguments:
* bp - pointer to board information
* command - port control command
* data_a - port data A register value
* data_b - port data B register value
* host_data - ptr to host data register value
*
* Functional Description:
* Send generic port control command to adapter by writing
* to various PDQ port registers, then polling for completion.
*
* Return Codes:
* DFX_K_SUCCESS - port control command succeeded
* DFX_K_HW_TIMEOUT - port control command timed out
*
* Assumptions:
* None
*
* Side Effects:
* None
*/
static int dfx_hw_port_ctrl_req(
DFX_board_t *bp,
PI_UINT32 command,
PI_UINT32 data_a,
PI_UINT32 data_b,
PI_UINT32 *host_data
)
{
PI_UINT32 port_cmd; /* Port Control command register value */
int timeout_cnt; /* used in for loops */
/* Set Command Error bit in command longword */
port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
/* Issue port command to the adapter */
dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
/* Now wait for command to complete */
if (command == PI_PCTRL_M_BLAST_FLASH)
timeout_cnt = 600000; /* set command timeout count to 60 seconds */
else
timeout_cnt = 20000; /* set command timeout count to 2 seconds */
for (; timeout_cnt > 0; timeout_cnt--)
{
dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
break;
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
return DFX_K_HW_TIMEOUT;
/*
* If the address of host_data is non-zero, assume caller has supplied a
* non NULL pointer, and return the contents of the HOST_DATA register in
* it.
*/
if (host_data != NULL)
dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
return DFX_K_SUCCESS;
}
/*
* =====================
* = dfx_hw_adap_reset =
* =====================
*
* Overview:
* Resets adapter
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
* type - type of reset to perform
*
* Functional Description:
* Issue soft reset to adapter by writing to PDQ Port Reset
* register. Use incoming reset type to tell adapter what
* kind of reset operation to perform.
*
* Return Codes:
* None
*
* Assumptions:
* This routine merely issues a soft reset to the adapter.
* It is expected that after this routine returns, the caller
* will appropriately poll the Port Status register for the
* adapter to enter the proper state.
*
* Side Effects:
* Internal adapter registers are cleared.
*/
static void dfx_hw_adap_reset(
DFX_board_t *bp,
PI_UINT32 type
)
{
/* Set Reset type and assert reset */
dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
/* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
udelay(20);
/* Deassert reset */
dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
}
/*
* ========================
* = dfx_hw_adap_state_rd =
* ========================
*
* Overview:
* Returns current adapter state
*
* Returns:
* Adapter state per PDQ Port Specification
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Reads PDQ Port Status register and returns adapter state.
*
* Return Codes:
* None
*
* Assumptions:
* None
*
* Side Effects:
* None
*/
static int dfx_hw_adap_state_rd(DFX_board_t *bp)
{
PI_UINT32 port_status; /* Port Status register value */
dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
}
/*
* =====================
* = dfx_hw_dma_uninit =
* =====================
*
* Overview:
* Brings adapter to DMA_UNAVAILABLE state
*
* Returns:
* Condition code
*
* Arguments:
* bp - pointer to board information
* type - type of reset to perform
*
* Functional Description:
* Bring adapter to DMA_UNAVAILABLE state by performing the following:
* 1. Set reset type bit in Port Data A Register then reset adapter.
* 2. Check that adapter is in DMA_UNAVAILABLE state.
*
* Return Codes:
* DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
* DFX_K_HW_TIMEOUT - adapter did not reset properly
*
* Assumptions:
* None
*
* Side Effects:
* Internal adapter registers are cleared.
*/
static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
{
int timeout_cnt; /* used in for loops */
/* Set reset type bit and reset adapter */
dfx_hw_adap_reset(bp, type);
/* Now wait for adapter to enter DMA_UNAVAILABLE state */
for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
{
if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
break;
udelay(100); /* wait for 100 microseconds */
}
if (timeout_cnt == 0)
return DFX_K_HW_TIMEOUT;
return DFX_K_SUCCESS;
}
/*
* Align an sk_buff to a boundary power of 2
*
*/
#ifdef DYNAMIC_BUFFERS
static void my_skb_align(struct sk_buff *skb, int n)
{
unsigned long x = (unsigned long)skb->data;
unsigned long v;
v = ALIGN(x, n); /* Where we want to be */
skb_reserve(skb, v - x);
}
#endif
/*
* ================
* = dfx_rcv_init =
* ================
*
* Overview:
* Produces buffers to adapter LLC Host receive descriptor block
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
* get_buffers - non-zero if buffers to be allocated
*
* Functional Description:
* This routine can be called during dfx_adap_init() or during an adapter
* reset. It initializes the descriptor block and produces all allocated
* LLC Host queue receive buffers.
*
* Return Codes:
* Return 0 on success or -ENOMEM if buffer allocation failed (when using
* dynamic buffer allocation). If the buffer allocation failed, the
* already allocated buffers will not be released and the caller should do
* this.
*
* Assumptions:
* The PDQ has been reset and the adapter and driver maintained Type 2
* register indices are cleared.
*
* Side Effects:
* Receive buffers are posted to the adapter LLC queue and the adapter
* is notified.
*/
static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
{
int i, j; /* used in for loop */
/*
* Since each receive buffer is a single fragment of same length, initialize
* first longword in each receive descriptor for entire LLC Host descriptor
* block. Also initialize second longword in each receive descriptor with
* physical address of receive buffer. We'll always allocate receive
* buffers in powers of 2 so that we can easily fill the 256 entry descriptor
* block and produce new receive buffers by simply updating the receive
* producer index.
*
* Assumptions:
* To support all shipping versions of PDQ, the receive buffer size
* must be mod 128 in length and the physical address must be 128 byte
* aligned. In other words, bits 0-6 of the length and address must
* be zero for the following descriptor field entries to be correct on
* all PDQ-based boards. We guaranteed both requirements during
* driver initialization when we allocated memory for the receive buffers.
*/
if (get_buffers) {
#ifdef DYNAMIC_BUFFERS
for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
{
struct sk_buff *newskb;
dma_addr_t dma_addr;
newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
GFP_NOIO);
if (!newskb)
return -ENOMEM;
/*
* align to 128 bytes for compatibility with
* the old EISA boards.
*/
my_skb_align(newskb, 128);
dma_addr = dma_map_single(bp->bus_dev,
newskb->data,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
if (dma_mapping_error(bp->bus_dev, dma_addr)) {
dev_kfree_skb(newskb);
return -ENOMEM;
}
bp->descr_block_virt->rcv_data[i + j].long_0 =
(u32)(PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX /
PI_ALIGN_K_RCV_DATA_BUFF) <<
PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->rcv_data[i + j].long_1 =
(u32)dma_addr;
/*
* p_rcv_buff_va is only used inside the
* kernel so we put the skb pointer here.
*/
bp->p_rcv_buff_va[i+j] = (char *) newskb;
}
#else
for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
{
bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
}
#endif
}
/* Update receive producer and Type 2 register */
bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
return 0;
}
/*
* =========================
* = dfx_rcv_queue_process =
* =========================
*
* Overview:
* Process received LLC frames.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Received LLC frames are processed until there are no more consumed frames.
* Once all frames are processed, the receive buffers are returned to the
* adapter. Note that this algorithm fixes the length of time that can be spent
* in this routine, because there are a fixed number of receive buffers to
* process and buffers are not produced until this routine exits and returns
* to the ISR.
*
* Return Codes:
* None
*
* Assumptions:
* None
*
* Side Effects:
* None
*/
static void dfx_rcv_queue_process(
DFX_board_t *bp
)
{
PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
u32 descr, pkt_len; /* FMC descriptor field and packet length */
struct sk_buff *skb = NULL; /* pointer to a sk_buff to hold incoming packet data */
/* Service all consumed LLC receive frames */
p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
{
/* Process any errors */
dma_addr_t dma_addr;
int entry;
entry = bp->rcv_xmt_reg.index.rcv_comp;
#ifdef DYNAMIC_BUFFERS
p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
#else
p_buff = bp->p_rcv_buff_va[entry];
#endif
dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
dma_sync_single_for_cpu(bp->bus_dev,
dma_addr + RCV_BUFF_K_DESCR,
sizeof(u32),
DMA_FROM_DEVICE);
memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
{
if (descr & PI_FMC_DESCR_M_RCC_CRC)
bp->rcv_crc_errors++;
else
bp->rcv_frame_status_errors++;
}
else
{
int rx_in_place = 0;
/* The frame was received without errors - verify packet length */
pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
pkt_len -= 4; /* subtract 4 byte CRC */
if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
bp->rcv_length_errors++;
else{
#ifdef DYNAMIC_BUFFERS
struct sk_buff *newskb = NULL;
if (pkt_len > SKBUFF_RX_COPYBREAK) {
dma_addr_t new_dma_addr;
newskb = netdev_alloc_skb(bp->dev,
NEW_SKB_SIZE);
if (newskb){
my_skb_align(newskb, 128);
new_dma_addr = dma_map_single(
bp->bus_dev,
newskb->data,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
if (dma_mapping_error(
bp->bus_dev,
new_dma_addr)) {
dev_kfree_skb(newskb);
newskb = NULL;
}
}
if (newskb) {
rx_in_place = 1;
skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
dma_unmap_single(bp->bus_dev,
dma_addr,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
skb_reserve(skb, RCV_BUFF_K_PADDING);
bp->p_rcv_buff_va[entry] = (char *)newskb;
bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
}
}
if (!newskb)
#endif
/* Alloc new buffer to pass up,
* add room for PRH. */
skb = netdev_alloc_skb(bp->dev,
pkt_len + 3);
if (skb == NULL)
{
printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
bp->rcv_discards++;
break;
}
else {
if (!rx_in_place) {
/* Receive buffer allocated, pass receive packet up */
dma_sync_single_for_cpu(
bp->bus_dev,
dma_addr +
RCV_BUFF_K_PADDING,
pkt_len + 3,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
p_buff + RCV_BUFF_K_PADDING,
pkt_len + 3);
}
skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
skb->protocol = fddi_type_trans(skb, bp->dev);
bp->rcv_total_bytes += skb->len;
netif_rx(skb);
/* Update the rcv counters */
bp->rcv_total_frames++;
if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
bp->rcv_multicast_frames++;
}
}
}
/*
* Advance the producer (for recycling) and advance the completion
* (for servicing received frames). Note that it is okay to
* advance the producer without checking that it passes the
* completion index because they are both advanced at the same
* rate.
*/
bp->rcv_xmt_reg.index.rcv_prod += 1;
bp->rcv_xmt_reg.index.rcv_comp += 1;
}
}
/*
* =====================
* = dfx_xmt_queue_pkt =
* =====================
*
* Overview:
* Queues packets for transmission
*
* Returns:
* Condition code
*
* Arguments:
* skb - pointer to sk_buff to queue for transmission
* dev - pointer to device information
*
* Functional Description:
* Here we assume that an incoming skb transmit request
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
* by using dma_map_single().
*
* Since the adapter architecture requires a three byte
* packet request header to prepend the start of packet,
* we'll write the three byte field immediately prior to
* the FC byte. This assumption is valid because we've
* ensured that dev->hard_header_len includes three pad
* bytes. By posting a single fragment to the adapter,
* we'll reduce the number of descriptor fetches and
* bus traffic needed to send the request.
*
* Also, we can't free the skb until after it's been DMA'd
* out by the adapter, so we'll queue it in the driver and
* return it in dfx_xmt_done.
*
* Return Codes:
* 0 - driver queued packet, link is unavailable, or skbuff was bad
* 1 - caller should requeue the sk_buff for later transmission
*
* Assumptions:
* First and foremost, we assume the incoming skb pointer
* is NOT NULL and is pointing to a valid sk_buff structure.
*
* The outgoing packet is complete, starting with the
* frame control byte including the last byte of data,
* but NOT including the 4 byte CRC. We'll let the
* adapter hardware generate and append the CRC.
*
* The entire packet is stored in one physically
* contiguous buffer which is not cached and whose
* 32-bit physical address can be determined.
*
* It's vital that this routine is NOT reentered for the
* same board and that the OS is not in another section of
* code (eg. dfx_int_common) for the same board on a
* different thread.
*
* Side Effects:
* None
*/
static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
u8 prod; /* local transmit producer index */
PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
dma_addr_t dma_addr;
unsigned long flags;
netif_stop_queue(dev);
/*
* Verify that incoming transmit request is OK
*
* Note: The packet size check is consistent with other
* Linux device drivers, although the correct packet
* size should be verified before calling the
* transmit routine.
*/
if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
{
printk("%s: Invalid packet length - %u bytes\n",
dev->name, skb->len);
bp->xmt_length_errors++; /* bump error counter */
netif_wake_queue(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* return "success" */
}
/*
* See if adapter link is available, if not, free buffer
*
* Note: If the link isn't available, free buffer and return 0
* rather than tell the upper layer to requeue the packet.
* The methodology here is that by the time the link
* becomes available, the packet to be sent will be
* fairly stale. By simply dropping the packet, the
* higher layer protocols will eventually time out
* waiting for response packets which it won't receive.
*/
if (bp->link_available == PI_K_FALSE)
{
if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
else
{
bp->xmt_discards++; /* bump error counter */
dev_kfree_skb(skb); /* free sk_buff now */
netif_wake_queue(dev);
return NETDEV_TX_OK; /* return "success" */
}
}
/* Write the three PRH bytes immediately before the FC byte */
skb_push(skb, 3);
skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
skb->data[2] = DFX_PRH2_BYTE; /* specification */
dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(bp->bus_dev, dma_addr)) {
skb_pull(skb, 3);
return NETDEV_TX_BUSY;
}
spin_lock_irqsave(&bp->lock, flags);
/* Get the current producer and the next free xmt data descriptor */
prod = bp->rcv_xmt_reg.index.xmt_prod;
p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
/*
* Get pointer to auxiliary queue entry to contain information
* for this packet.
*
* Note: The current xmt producer index will become the
* current xmt completion index when we complete this
* packet later on. So, we'll get the pointer to the
* next auxiliary queue entry now before we bump the
* producer index.
*/
p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
/*
* Write the descriptor with buffer info and bump producer
*
* Note: Since we need to start DMA from the packet request
* header, we'll add 3 bytes to the DMA buffer length,
* and we'll determine the physical address of the
* buffer from the PRH, not skb->data.
*
* Assumptions:
* 1. Packet starts with the frame control (FC) byte
* at skb->data.
* 2. The 4-byte CRC is not appended to the buffer or
* included in the length.
* 3. Packet length (skb->len) is from FC to end of
* data, inclusive.
* 4. The packet length does not exceed the maximum
* FDDI LLC frame length of 4491 bytes.
* 5. The entire packet is contained in a physically
* contiguous, non-cached, locked memory space
* comprised of a single buffer pointed to by
* skb->data.
* 6. The physical address of the start of packet
* can be determined from the virtual address
* by using dma_map_single() and is only 32-bits
* wide.
*/
p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
p_xmt_descr->long_1 = (u32)dma_addr;
/*
* Verify that descriptor is actually available
*
* Note: If descriptor isn't available, return 1 which tells
* the upper layer to requeue the packet for later
* transmission.
*
* We need to ensure that the producer never reaches the
* completion, except to indicate that the queue is empty.
*/
if (prod == bp->rcv_xmt_reg.index.xmt_comp)
{
skb_pull(skb,3);
spin_unlock_irqrestore(&bp->lock, flags);
return NETDEV_TX_BUSY; /* requeue packet for later */
}
/*
* Save info for this packet for xmt done indication routine
*
* Normally, we'd save the producer index in the p_xmt_drv_descr
* structure so that we'd have it handy when we complete this
* packet later (in dfx_xmt_done). However, since the current
* transmit architecture guarantees a single fragment for the
* entire packet, we can simply bump the completion index by
* one (1) for each completed packet.
*
* Note: If this assumption changes and we're presented with
* an inconsistent number of transmit fragments for packet
* data, we'll need to modify this code to save the current
* transmit producer index.
*/
p_xmt_drv_descr->p_skb = skb;
/* Update Type 2 register */
bp->rcv_xmt_reg.index.xmt_prod = prod;
dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
spin_unlock_irqrestore(&bp->lock, flags);
netif_wake_queue(dev);
return NETDEV_TX_OK; /* packet queued to adapter */
}
/*
* ================
* = dfx_xmt_done =
* ================
*
* Overview:
* Processes all frames that have been transmitted.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* For all consumed transmit descriptors that have not
* yet been completed, we'll free the skb we were holding
* onto using dev_kfree_skb and bump the appropriate
* counters.
*
* Return Codes:
* None
*
* Assumptions:
* The Type 2 register is not updated in this routine. It is
* assumed that it will be updated in the ISR when dfx_xmt_done
* returns.
*
* Side Effects:
* None
*/
static int dfx_xmt_done(DFX_board_t *bp)
{
XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
u8 comp; /* local transmit completion index */
int freed = 0; /* buffers freed */
/* Service all consumed transmit frames */
p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
{
/* Get pointer to the transmit driver descriptor block information */
p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
/* Increment transmit counters */
bp->xmt_total_frames++;
bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
/* Return skb to operating system */
comp = bp->rcv_xmt_reg.index.xmt_comp;
dma_unmap_single(bp->bus_dev,
bp->descr_block_virt->xmt_data[comp].long_1,
p_xmt_drv_descr->p_skb->len,
DMA_TO_DEVICE);
dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
/*
* Move to start of next packet by updating completion index
*
* Here we assume that a transmit packet request is always
* serviced by posting one fragment. We can therefore
* simplify the completion code by incrementing the
* completion index by one. This code will need to be
* modified if this assumption changes. See comments
* in dfx_xmt_queue_pkt for more details.
*/
bp->rcv_xmt_reg.index.xmt_comp += 1;
freed++;
}
return freed;
}
/*
* =================
* = dfx_rcv_flush =
* =================
*
* Overview:
* Remove all skb's in the receive ring.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* Free's all the dynamically allocated skb's that are
* currently attached to the device receive ring. This
* function is typically only used when the device is
* initialized or reinitialized.
*
* Return Codes:
* None
*
* Side Effects:
* None
*/
#ifdef DYNAMIC_BUFFERS
static void dfx_rcv_flush( DFX_board_t *bp )
{
int i, j;
for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
{
struct sk_buff *skb;
skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
if (skb) {
dma_unmap_single(bp->bus_dev,
bp->descr_block_virt->rcv_data[i+j].long_1,
PI_RCV_DATA_K_SIZE_MAX,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
bp->p_rcv_buff_va[i+j] = NULL;
}
}
#endif /* DYNAMIC_BUFFERS */
/*
* =================
* = dfx_xmt_flush =
* =================
*
* Overview:
* Processes all frames whether they've been transmitted
* or not.
*
* Returns:
* None
*
* Arguments:
* bp - pointer to board information
*
* Functional Description:
* For all produced transmit descriptors that have not
* yet been completed, we'll free the skb we were holding
* onto using dev_kfree_skb and bump the appropriate
* counters. Of course, it's possible that some of
* these transmit requests actually did go out, but we
* won't make that distinction here. Finally, we'll
* update the consumer index to match the producer.
*
* Return Codes:
* None
*
* Assumptions:
* This routine does NOT update the Type 2 register. It
* is assumed that this routine is being called during a
* transmit flush interrupt, or a shutdown or close routine.
*
* Side Effects:
* None
*/
static void dfx_xmt_flush( DFX_board_t *bp )
{
u32 prod_cons; /* rcv/xmt consumer block longword */
XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
u8 comp; /* local transmit completion index */
/* Flush all outstanding transmit frames */
while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
{
/* Get pointer to the transmit driver descriptor block information */
p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
/* Return skb to operating system */
comp = bp->rcv_xmt_reg.index.xmt_comp;
dma_unmap_single(bp->bus_dev,
bp->descr_block_virt->xmt_data[comp].long_1,
p_xmt_drv_descr->p_skb->len,
DMA_TO_DEVICE);
dev_kfree_skb(p_xmt_drv_descr->p_skb);
/* Increment transmit error counter */
bp->xmt_discards++;
/*
* Move to start of next packet by updating completion index
*
* Here we assume that a transmit packet request is always
* serviced by posting one fragment. We can therefore
* simplify the completion code by incrementing the
* completion index by one. This code will need to be
* modified if this assumption changes. See comments
* in dfx_xmt_queue_pkt for more details.
*/
bp->rcv_xmt_reg.index.xmt_comp += 1;
}
/* Update the transmit consumer index in the consumer block */
prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
bp->cons_block_virt->xmt_rcv_data = prod_cons;
}
/*
* ==================
* = dfx_unregister =
* ==================
*
* Overview:
* Shuts down an FDDI controller
*
* Returns:
* Condition code
*
* Arguments:
* bdev - pointer to device information
*
* Functional Description:
*
* Return Codes:
* None
*
* Assumptions:
* It compiles so it should work :-( (PCI cards do :-)
*
* Side Effects:
* Device structures for FDDI adapters (fddi0, fddi1, etc) are
* freed.
*/
static void dfx_unregister(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
DFX_board_t *bp = netdev_priv(dev);
int dfx_bus_pci = dev_is_pci(bdev);
resource_size_t bar_start[3] = {0}; /* pointers to ports */
resource_size_t bar_len[3] = {0}; /* resource lengths */
int alloc_size; /* total buffer size used */
unregister_netdev(dev);
alloc_size = sizeof(PI_DESCR_BLOCK) +
PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
#ifndef DYNAMIC_BUFFERS
(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
if (bp->kmalloced)
dma_free_coherent(bdev, alloc_size,
bp->kmalloced, bp->kmalloced_dma);
dfx_bus_uninit(dev);
dfx_get_bars(bp, bar_start, bar_len);
if (bar_start[2] != 0)
release_region(bar_start[2], bar_len[2]);
if (bar_start[1] != 0)
release_region(bar_start[1], bar_len[1]);
if (dfx_use_mmio) {
iounmap(bp->base.mem);
release_mem_region(bar_start[0], bar_len[0]);
} else
release_region(bar_start[0], bar_len[0]);
if (dfx_bus_pci)
pci_disable_device(to_pci_dev(bdev));
free_netdev(dev);
}
static int __maybe_unused dfx_dev_register(struct device *);
static int __maybe_unused dfx_dev_unregister(struct device *);
#ifdef CONFIG_PCI
static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
static void dfx_pci_unregister(struct pci_dev *);
static const struct pci_device_id dfx_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
{ }
};
MODULE_DEVICE_TABLE(pci, dfx_pci_table);
static struct pci_driver dfx_pci_driver = {
.name = DRV_NAME,
.id_table = dfx_pci_table,
.probe = dfx_pci_register,
.remove = dfx_pci_unregister,
};
static int dfx_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
return dfx_register(&pdev->dev);
}
static void dfx_pci_unregister(struct pci_dev *pdev)
{
dfx_unregister(&pdev->dev);
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_EISA
static const struct eisa_device_id dfx_eisa_table[] = {
{ "DEC3001", DEFEA_PROD_ID_1 },
{ "DEC3002", DEFEA_PROD_ID_2 },
{ "DEC3003", DEFEA_PROD_ID_3 },
{ "DEC3004", DEFEA_PROD_ID_4 },
{ }
};
MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
static struct eisa_driver dfx_eisa_driver = {
.id_table = dfx_eisa_table,
.driver = {
.name = DRV_NAME,
.bus = &eisa_bus_type,
.probe = dfx_dev_register,
.remove = dfx_dev_unregister,
},
};
#endif /* CONFIG_EISA */
#ifdef CONFIG_TC
static struct tc_device_id const dfx_tc_table[] = {
{ "DEC ", "PMAF-FA " },
{ "DEC ", "PMAF-FD " },
{ "DEC ", "PMAF-FS " },
{ "DEC ", "PMAF-FU " },
{ }
};
MODULE_DEVICE_TABLE(tc, dfx_tc_table);
static struct tc_driver dfx_tc_driver = {
.id_table = dfx_tc_table,
.driver = {
.name = DRV_NAME,
.bus = &tc_bus_type,
.probe = dfx_dev_register,
.remove = dfx_dev_unregister,
},
};
#endif /* CONFIG_TC */
static int __maybe_unused dfx_dev_register(struct device *dev)
{
int status;
status = dfx_register(dev);
if (!status)
get_device(dev);
return status;
}
static int __maybe_unused dfx_dev_unregister(struct device *dev)
{
put_device(dev);
dfx_unregister(dev);
return 0;
}
static int dfx_init(void)
{
int status;
status = pci_register_driver(&dfx_pci_driver);
if (status)
goto err_pci_register;
status = eisa_driver_register(&dfx_eisa_driver);
if (status)
goto err_eisa_register;
status = tc_register_driver(&dfx_tc_driver);
if (status)
goto err_tc_register;
return 0;
err_tc_register:
eisa_driver_unregister(&dfx_eisa_driver);
err_eisa_register:
pci_unregister_driver(&dfx_pci_driver);
err_pci_register:
return status;
}
static void dfx_cleanup(void)
{
tc_unregister_driver(&dfx_tc_driver);
eisa_driver_unregister(&dfx_eisa_driver);
pci_unregister_driver(&dfx_pci_driver);
}
module_init(dfx_init);
module_exit(dfx_cleanup);
MODULE_AUTHOR("Lawrence V. Stefani");
MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
DRV_VERSION " " DRV_RELDATE);
MODULE_LICENSE("GPL");
| linux-master | drivers/net/fddi/defxx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
* FBI board dependent Driver for SMT and LLC
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#include "h/skfbiinc.h"
#include <linux/bitrev.h>
#include <linux/pci.h>
/*
* PCM active state
*/
#define PC8_ACTIVE 8
#define LED_Y_ON 0x11 /* Used for ring up/down indication */
#define LED_Y_OFF 0x10
#define MS2BCLK(x) ((x)*12500L)
/*
* valid configuration values are:
*/
/*
* xPOS_ID:xxxx
* | \ /
* | \/
* | --------------------- the patched POS_ID of the Adapter
* | xxxx = (Vendor ID low byte,
* | Vendor ID high byte,
* | Device ID low byte,
* | Device ID high byte)
* +------------------------------ the patched oem_id must be
* 'S' for SK or 'I' for IBM
* this is a short id for the driver.
*/
#ifndef MULT_OEM
#ifndef OEM_CONCEPT
const u_char oem_id[] = "xPOS_ID:xxxx" ;
#else /* OEM_CONCEPT */
const u_char oem_id[] = OEM_ID ;
#endif /* OEM_CONCEPT */
#define ID_BYTE0 8
#define OEMID(smc,i) oem_id[ID_BYTE0 + i]
#else /* MULT_OEM */
const struct s_oem_ids oem_ids[] = {
#include "oemids.h"
{0}
};
#define OEMID(smc,i) smc->hw.oem_id->oi_id[i]
#endif /* MULT_OEM */
/* Prototypes of external functions */
#ifdef AIX
extern int AIX_vpdReadByte() ;
#endif
/* Prototype of a local function. */
static void smt_stop_watchdog(struct s_smc *smc);
/*
* FDDI card reset
*/
static void card_start(struct s_smc *smc)
{
int i ;
#ifdef PCI
u_char rev_id ;
u_short word;
#endif
smt_stop_watchdog(smc) ;
#ifdef PCI
/*
* make sure no transfer activity is pending
*/
outpw(FM_A(FM_MDREG1),FM_MINIT) ;
outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
hwt_wait_time(smc,hwt_quick_read(smc),MS2BCLK(10)) ;
/*
* now reset everything
*/
outp(ADDR(B0_CTRL),CTRL_RST_SET) ; /* reset for all chips */
i = (int) inp(ADDR(B0_CTRL)) ; /* do dummy read */
SK_UNUSED(i) ; /* Make LINT happy. */
outp(ADDR(B0_CTRL), CTRL_RST_CLR) ;
/*
* Reset all bits in the PCI STATUS register
*/
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_ON) ; /* enable for writes */
word = inpw(PCI_C(PCI_STATUS)) ;
outpw(PCI_C(PCI_STATUS), word | PCI_STATUS_ERROR_BITS);
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_OFF) ; /* disable writes */
/*
* Release the reset of all the State machines
* Release Master_Reset
* Release HPI_SM_Reset
*/
outp(ADDR(B0_CTRL), CTRL_MRST_CLR|CTRL_HPI_CLR) ;
/*
* determine the adapter type
* Note: Do it here, because some drivers may call card_start() once
* at very first before any other initialization functions is
* executed.
*/
rev_id = inp(PCI_C(PCI_REVISION_ID)) ;
if ((rev_id & 0xf0) == SK_ML_ID_1 || (rev_id & 0xf0) == SK_ML_ID_2) {
smc->hw.hw_is_64bit = TRUE ;
} else {
smc->hw.hw_is_64bit = FALSE ;
}
/*
* Watermark initialization
*/
if (!smc->hw.hw_is_64bit) {
outpd(ADDR(B4_R1_F), RX_WATERMARK) ;
outpd(ADDR(B5_XA_F), TX_WATERMARK) ;
outpd(ADDR(B5_XS_F), TX_WATERMARK) ;
}
outp(ADDR(B0_CTRL),CTRL_RST_CLR) ; /* clear the reset chips */
outp(ADDR(B0_LED),LED_GA_OFF|LED_MY_ON|LED_GB_OFF) ; /* ye LED on */
/* init the timer value for the watch dog 2,5 minutes */
outpd(ADDR(B2_WDOG_INI),0x6FC23AC0) ;
/* initialize the ISR mask */
smc->hw.is_imask = ISR_MASK ;
smc->hw.hw_state = STOPPED ;
#endif
GET_PAGE(0) ; /* necessary for BOOT */
}
void card_stop(struct s_smc *smc)
{
smt_stop_watchdog(smc) ;
smc->hw.mac_ring_is_up = 0 ; /* ring down */
#ifdef PCI
/*
* make sure no transfer activity is pending
*/
outpw(FM_A(FM_MDREG1),FM_MINIT) ;
outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
hwt_wait_time(smc,hwt_quick_read(smc),MS2BCLK(10)) ;
/*
* now reset everything
*/
outp(ADDR(B0_CTRL),CTRL_RST_SET) ; /* reset for all chips */
outp(ADDR(B0_CTRL),CTRL_RST_CLR) ; /* reset for all chips */
outp(ADDR(B0_LED),LED_GA_OFF|LED_MY_OFF|LED_GB_OFF) ; /* all LEDs off */
smc->hw.hw_state = STOPPED ;
#endif
}
/*--------------------------- ISR handling ----------------------------------*/
void mac1_irq(struct s_smc *smc, u_short stu, u_short stl)
{
int restart_tx = 0 ;
again:
/*
* parity error: note encoding error is not possible in tag mode
*/
if (stl & (FM_SPCEPDS | /* parity err. syn.q.*/
FM_SPCEPDA0 | /* parity err. a.q.0 */
FM_SPCEPDA1)) { /* parity err. a.q.1 */
SMT_PANIC(smc,SMT_E0134, SMT_E0134_MSG) ;
}
/*
* buffer underrun: can only occur if a tx threshold is specified
*/
if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/
FM_STBURA0 | /* tx buffer underrun a.q.0 */
FM_STBURA1)) { /* tx buffer underrun a.q.2 */
SMT_PANIC(smc,SMT_E0133, SMT_E0133_MSG) ;
}
if ( (stu & (FM_SXMTABT | /* transmit abort */
FM_STXABRS | /* syn. tx abort */
FM_STXABRA0)) || /* asyn. tx abort */
(stl & (FM_SQLCKS | /* lock for syn. q. */
FM_SQLCKA0)) ) { /* lock for asyn. q. */
formac_tx_restart(smc) ; /* init tx */
restart_tx = 1 ;
stu = inpw(FM_A(FM_ST1U)) ;
stl = inpw(FM_A(FM_ST1L)) ;
stu &= ~ (FM_STECFRMA0 | FM_STEFRMA0 | FM_STEFRMS) ;
if (stu || stl)
goto again ;
}
if (stu & (FM_STEFRMA0 | /* end of asyn tx */
FM_STEFRMS)) { /* end of sync tx */
restart_tx = 1 ;
}
if (restart_tx)
llc_restart_tx(smc) ;
}
/*
* interrupt source= plc1
* this function is called in nwfbisr.asm
*/
void plc1_irq(struct s_smc *smc)
{
u_short st = inpw(PLC(PB,PL_INTR_EVENT)) ;
plc_irq(smc,PB,st) ;
}
/*
* interrupt source= plc2
* this function is called in nwfbisr.asm
*/
void plc2_irq(struct s_smc *smc)
{
u_short st = inpw(PLC(PA,PL_INTR_EVENT)) ;
plc_irq(smc,PA,st) ;
}
/*
* interrupt source= timer
*/
void timer_irq(struct s_smc *smc)
{
hwt_restart(smc);
smc->hw.t_stop = smc->hw.t_start;
smt_timer_done(smc) ;
}
/*
* return S-port (PA or PB)
*/
int pcm_get_s_port(struct s_smc *smc)
{
SK_UNUSED(smc) ;
return PS;
}
/*
* Station Label = "FDDI-XYZ" where
*
* X = connector type
* Y = PMD type
* Z = port type
*/
#define STATION_LABEL_CONNECTOR_OFFSET 5
#define STATION_LABEL_PMD_OFFSET 6
#define STATION_LABEL_PORT_OFFSET 7
void read_address(struct s_smc *smc, u_char *mac_addr)
{
char ConnectorType ;
char PmdType ;
int i ;
#ifdef PCI
for (i = 0; i < 6; i++) { /* read mac address from board */
smc->hw.fddi_phys_addr.a[i] =
bitrev8(inp(ADDR(B2_MAC_0+i)));
}
#endif
ConnectorType = inp(ADDR(B2_CONN_TYP)) ;
PmdType = inp(ADDR(B2_PMD_TYP)) ;
smc->y[PA].pmd_type[PMD_SK_CONN] =
smc->y[PB].pmd_type[PMD_SK_CONN] = ConnectorType ;
smc->y[PA].pmd_type[PMD_SK_PMD ] =
smc->y[PB].pmd_type[PMD_SK_PMD ] = PmdType ;
if (mac_addr) {
for (i = 0; i < 6 ;i++) {
smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
smc->hw.fddi_home_addr.a[i] = bitrev8(mac_addr[i]);
}
return ;
}
smc->hw.fddi_home_addr = smc->hw.fddi_phys_addr ;
for (i = 0; i < 6 ;i++) {
smc->hw.fddi_canon_addr.a[i] =
bitrev8(smc->hw.fddi_phys_addr.a[i]);
}
}
/*
* FDDI card soft reset
*/
void init_board(struct s_smc *smc, u_char *mac_addr)
{
card_start(smc) ;
read_address(smc,mac_addr) ;
if (!(inp(ADDR(B0_DAS)) & DAS_AVAIL))
smc->s.sas = SMT_SAS ; /* Single att. station */
else
smc->s.sas = SMT_DAS ; /* Dual att. station */
if (!(inp(ADDR(B0_DAS)) & DAS_BYP_ST))
smc->mib.fddiSMTBypassPresent = 0 ;
/* without opt. bypass */
else
smc->mib.fddiSMTBypassPresent = 1 ;
/* with opt. bypass */
}
/*
* insert or deinsert optical bypass (called by ECM)
*/
void sm_pm_bypass_req(struct s_smc *smc, int mode)
{
DB_ECMN(1, "ECM : sm_pm_bypass_req(%s)",
mode == BP_INSERT ? "BP_INSERT" : "BP_DEINSERT");
if (smc->s.sas != SMT_DAS)
return ;
#ifdef PCI
switch(mode) {
case BP_INSERT :
outp(ADDR(B0_DAS),DAS_BYP_INS) ; /* insert station */
break ;
case BP_DEINSERT :
outp(ADDR(B0_DAS),DAS_BYP_RMV) ; /* bypass station */
break ;
}
#endif
}
/*
* check if bypass connected
*/
int sm_pm_bypass_present(struct s_smc *smc)
{
return (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE : FALSE;
}
void plc_clear_irq(struct s_smc *smc, int p)
{
SK_UNUSED(p) ;
SK_UNUSED(smc) ;
}
/*
* led_indication called by rmt_indication() and
* pcm_state_change()
*
* Input:
* smc: SMT context
* led_event:
* 0 Only switch green LEDs according to their respective PCM state
* LED_Y_OFF just switch yellow LED off
* LED_Y_ON just switch yello LED on
*/
static void led_indication(struct s_smc *smc, int led_event)
{
/* use smc->hw.mac_ring_is_up == TRUE
* as indication for Ring Operational
*/
u_short led_state ;
struct s_phy *phy ;
struct fddi_mib_p *mib_a ;
struct fddi_mib_p *mib_b ;
phy = &smc->y[PA] ;
mib_a = phy->mib ;
phy = &smc->y[PB] ;
mib_b = phy->mib ;
#ifdef PCI
led_state = 0 ;
/* Ring up = yellow led OFF*/
if (led_event == LED_Y_ON) {
led_state |= LED_MY_ON ;
}
else if (led_event == LED_Y_OFF) {
led_state |= LED_MY_OFF ;
}
else { /* PCM state changed */
/* Link at Port A/S = green led A ON */
if (mib_a->fddiPORTPCMState == PC8_ACTIVE) {
led_state |= LED_GA_ON ;
}
else {
led_state |= LED_GA_OFF ;
}
/* Link at Port B = green led B ON */
if (mib_b->fddiPORTPCMState == PC8_ACTIVE) {
led_state |= LED_GB_ON ;
}
else {
led_state |= LED_GB_OFF ;
}
}
outp(ADDR(B0_LED), led_state) ;
#endif /* PCI */
}
void pcm_state_change(struct s_smc *smc, int plc, int p_state)
{
/*
* the current implementation of pcm_state_change() in the driver
* parts must be renamed to drv_pcm_state_change() which will be called
* now after led_indication.
*/
DRV_PCM_STATE_CHANGE(smc,plc,p_state) ;
led_indication(smc,0) ;
}
void rmt_indication(struct s_smc *smc, int i)
{
/* Call a driver special function if defined */
DRV_RMT_INDICATION(smc,i) ;
led_indication(smc, i ? LED_Y_OFF : LED_Y_ON) ;
}
/*
* llc_recover_tx called by init_tx (fplus.c)
*/
void llc_recover_tx(struct s_smc *smc)
{
#ifdef LOAD_GEN
extern int load_gen_flag ;
load_gen_flag = 0 ;
#endif
#ifndef SYNC
smc->hw.n_a_send= 0 ;
#else
SK_UNUSED(smc) ;
#endif
}
#ifdef MULT_OEM
static int is_equal_num(char comp1[], char comp2[], int num)
{
int i ;
for (i = 0 ; i < num ; i++) {
if (comp1[i] != comp2[i])
return 0;
}
return 1;
} /* is_equal_num */
/*
* set the OEM ID defaults, and test the contents of the OEM data base
* The default OEM is the first ACTIVE entry in the OEM data base
*
* returns: 0 success
* 1 error in data base
* 2 data base empty
* 3 no active entry
*/
int set_oi_id_def(struct s_smc *smc)
{
int sel_id ;
int i ;
int act_entries ;
i = 0 ;
sel_id = -1 ;
act_entries = FALSE ;
smc->hw.oem_id = 0 ;
smc->hw.oem_min_status = OI_STAT_ACTIVE ;
/* check OEM data base */
while (oem_ids[i].oi_status) {
switch (oem_ids[i].oi_status) {
case OI_STAT_ACTIVE:
act_entries = TRUE ; /* we have active IDs */
if (sel_id == -1)
sel_id = i ; /* save the first active ID */
case OI_STAT_VALID:
case OI_STAT_PRESENT:
i++ ;
break ; /* entry ok */
default:
return 1; /* invalid oi_status */
}
}
if (i == 0)
return 2;
if (!act_entries)
return 3;
/* ok, we have a valid OEM data base with an active entry */
smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ;
return 0;
}
#endif /* MULT_OEM */
void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr)
{
int i ;
for (i = 0 ; i < 6 ; i++)
bia_addr->a[i] = bitrev8(smc->hw.fddi_phys_addr.a[i]);
}
void smt_start_watchdog(struct s_smc *smc)
{
SK_UNUSED(smc) ; /* Make LINT happy. */
#ifndef DEBUG
#ifdef PCI
if (smc->hw.wdog_used) {
outpw(ADDR(B2_WDOG_CRTL),TIM_START) ; /* Start timer. */
}
#endif
#endif /* DEBUG */
}
static void smt_stop_watchdog(struct s_smc *smc)
{
SK_UNUSED(smc) ; /* Make LINT happy. */
#ifndef DEBUG
#ifdef PCI
if (smc->hw.wdog_used) {
outpw(ADDR(B2_WDOG_CRTL),TIM_STOP) ; /* Stop timer. */
}
#endif
#endif /* DEBUG */
}
#ifdef PCI
void mac_do_pci_fix(struct s_smc *smc)
{
SK_UNUSED(smc) ;
}
#endif /* PCI */
| linux-master | drivers/net/fddi/skfp/drvfbi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT 7.2 Status Response Frame Implementation
SRF state machine and frame generation
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/smt_p.h"
#define KERNEL
#include "h/smtstate.h"
#ifndef SLIM_SMT
#ifndef BOOT
/*
* function declarations
*/
static void clear_all_rep(struct s_smc *smc);
static void clear_reported(struct s_smc *smc);
static void smt_send_srf(struct s_smc *smc);
static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index);
#define MAX_EVCS ARRAY_SIZE(smc->evcs)
struct evc_init {
u_char code ;
u_char index ;
u_char n ;
u_short para ;
} ;
static const struct evc_init evc_inits[] = {
{ SMT_COND_SMT_PEER_WRAP, 0,1,SMT_P1048 } ,
{ SMT_COND_MAC_DUP_ADDR, INDEX_MAC, NUMMACS,SMT_P208C } ,
{ SMT_COND_MAC_FRAME_ERROR, INDEX_MAC, NUMMACS,SMT_P208D } ,
{ SMT_COND_MAC_NOT_COPIED, INDEX_MAC, NUMMACS,SMT_P208E } ,
{ SMT_EVENT_MAC_NEIGHBOR_CHANGE, INDEX_MAC, NUMMACS,SMT_P208F } ,
{ SMT_EVENT_MAC_PATH_CHANGE, INDEX_MAC, NUMMACS,SMT_P2090 } ,
{ SMT_COND_PORT_LER, INDEX_PORT,NUMPHYS,SMT_P4050 } ,
{ SMT_COND_PORT_EB_ERROR, INDEX_PORT,NUMPHYS,SMT_P4052 } ,
{ SMT_EVENT_PORT_CONNECTION, INDEX_PORT,NUMPHYS,SMT_P4051 } ,
{ SMT_EVENT_PORT_PATH_CHANGE, INDEX_PORT,NUMPHYS,SMT_P4053 } ,
} ;
#define MAX_INIT_EVC ARRAY_SIZE(evc_inits)
void smt_init_evc(struct s_smc *smc)
{
struct s_srf_evc *evc ;
const struct evc_init *init ;
unsigned int i ;
int index ;
int offset ;
static u_char fail_safe = FALSE ;
memset((char *)smc->evcs,0,sizeof(smc->evcs)) ;
evc = smc->evcs ;
init = evc_inits ;
for (i = 0 ; i < MAX_INIT_EVC ; i++) {
for (index = 0 ; index < init->n ; index++) {
evc->evc_code = init->code ;
evc->evc_para = init->para ;
evc->evc_index = init->index + index ;
#ifndef DEBUG
evc->evc_multiple = &fail_safe ;
evc->evc_cond_state = &fail_safe ;
#endif
evc++ ;
}
init++ ;
}
if ((unsigned int) (evc - smc->evcs) > MAX_EVCS) {
SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
}
/*
* conditions
*/
smc->evcs[0].evc_cond_state = &smc->mib.fddiSMTPeerWrapFlag ;
smc->evcs[1].evc_cond_state =
&smc->mib.m[MAC0].fddiMACDuplicateAddressCond ;
smc->evcs[2].evc_cond_state =
&smc->mib.m[MAC0].fddiMACFrameErrorFlag ;
smc->evcs[3].evc_cond_state =
&smc->mib.m[MAC0].fddiMACNotCopiedFlag ;
/*
* events
*/
smc->evcs[4].evc_multiple = &smc->mib.m[MAC0].fddiMACMultiple_N ;
smc->evcs[5].evc_multiple = &smc->mib.m[MAC0].fddiMACMultiple_P ;
offset = 6 ;
for (i = 0 ; i < NUMPHYS ; i++) {
/*
* conditions
*/
smc->evcs[offset + 0*NUMPHYS].evc_cond_state =
&smc->mib.p[i].fddiPORTLerFlag ;
smc->evcs[offset + 1*NUMPHYS].evc_cond_state =
&smc->mib.p[i].fddiPORTEB_Condition ;
/*
* events
*/
smc->evcs[offset + 2*NUMPHYS].evc_multiple =
&smc->mib.p[i].fddiPORTMultiple_U ;
smc->evcs[offset + 3*NUMPHYS].evc_multiple =
&smc->mib.p[i].fddiPORTMultiple_P ;
offset++ ;
}
#ifdef DEBUG
for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (!evc->evc_cond_state) {
SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
}
evc->evc_multiple = &fail_safe ;
}
else {
if (!evc->evc_multiple) {
SMT_PANIC(smc,SMT_E0129, SMT_E0129_MSG) ;
}
evc->evc_cond_state = &fail_safe ;
}
}
#endif
smc->srf.TSR = smt_get_time() ;
smc->srf.sr_state = SR0_WAIT ;
}
static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
{
unsigned int i ;
struct s_srf_evc *evc ;
for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_code == code && evc->evc_index == index)
return evc;
}
return NULL;
}
#define THRESHOLD_2 (2*TICKS_PER_SECOND)
#define THRESHOLD_32 (32*TICKS_PER_SECOND)
static const char * const srf_names[] = {
"None","MACPathChangeEvent", "MACNeighborChangeEvent",
"PORTPathChangeEvent", "PORTUndesiredConnectionAttemptEvent",
"SMTPeerWrapCondition", "SMTHoldCondition",
"MACFrameErrorCondition", "MACDuplicateAddressCondition",
"MACNotCopiedCondition", "PORTEBErrorCondition",
"PORTLerCondition"
} ;
void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
{
struct s_srf_evc *evc ;
int cond_asserted = 0 ;
int cond_deasserted = 0 ;
int event_occurred = 0 ;
int tsr ;
int T_Limit = 2*TICKS_PER_SECOND ;
if (code == SMT_COND_MAC_DUP_ADDR && cond) {
RS_SET(smc,RS_DUPADDR) ;
}
if (code) {
DB_SMT("SRF: %s index %d", srf_names[code], index);
if (!(evc = smt_get_evc(smc,code,index))) {
DB_SMT("SRF : smt_get_evc() failed");
return ;
}
/*
* ignore condition if no change
*/
if (SMT_IS_CONDITION(code)) {
if (*evc->evc_cond_state == cond)
return ;
}
/*
* set transition time stamp
*/
smt_set_timestamp(smc,smc->mib.fddiSMTTransitionTimeStamp) ;
if (SMT_IS_CONDITION(code)) {
DB_SMT("SRF: condition is %s", cond ? "ON" : "OFF");
if (cond) {
*evc->evc_cond_state = TRUE ;
evc->evc_rep_required = TRUE ;
smc->srf.any_report = TRUE ;
cond_asserted = TRUE ;
}
else {
*evc->evc_cond_state = FALSE ;
cond_deasserted = TRUE ;
}
}
else {
if (evc->evc_rep_required) {
*evc->evc_multiple = TRUE ;
}
else {
evc->evc_rep_required = TRUE ;
*evc->evc_multiple = FALSE ;
}
smc->srf.any_report = TRUE ;
event_occurred = TRUE ;
}
#ifdef FDDI_MIB
snmp_srf_event(smc,evc) ;
#endif /* FDDI_MIB */
}
tsr = smt_get_time() - smc->srf.TSR ;
switch (smc->srf.sr_state) {
case SR0_WAIT :
/* SR01a */
if (cond_asserted && tsr < T_Limit) {
smc->srf.SRThreshold = THRESHOLD_2 ;
smc->srf.sr_state = SR1_HOLDOFF ;
break ;
}
/* SR01b */
if (cond_deasserted && tsr < T_Limit) {
smc->srf.sr_state = SR1_HOLDOFF ;
break ;
}
/* SR01c */
if (event_occurred && tsr < T_Limit) {
smc->srf.sr_state = SR1_HOLDOFF ;
break ;
}
/* SR00b */
if (cond_asserted && tsr >= T_Limit) {
smc->srf.SRThreshold = THRESHOLD_2 ;
smc->srf.TSR = smt_get_time() ;
smt_send_srf(smc) ;
break ;
}
/* SR00c */
if (cond_deasserted && tsr >= T_Limit) {
smc->srf.TSR = smt_get_time() ;
smt_send_srf(smc) ;
break ;
}
/* SR00d */
if (event_occurred && tsr >= T_Limit) {
smc->srf.TSR = smt_get_time() ;
smt_send_srf(smc) ;
break ;
}
/* SR00e */
if (smc->srf.any_report && (u_long) tsr >=
smc->srf.SRThreshold) {
smc->srf.SRThreshold *= 2 ;
if (smc->srf.SRThreshold > THRESHOLD_32)
smc->srf.SRThreshold = THRESHOLD_32 ;
smc->srf.TSR = smt_get_time() ;
smt_send_srf(smc) ;
break ;
}
/* SR02 */
if (!smc->mib.fddiSMTStatRptPolicy) {
smc->srf.sr_state = SR2_DISABLED ;
break ;
}
break ;
case SR1_HOLDOFF :
/* SR10b */
if (tsr >= T_Limit) {
smc->srf.sr_state = SR0_WAIT ;
smc->srf.TSR = smt_get_time() ;
smt_send_srf(smc) ;
break ;
}
/* SR11a */
if (cond_asserted) {
smc->srf.SRThreshold = THRESHOLD_2 ;
}
/* SR11b */
/* SR11c */
/* handled above */
/* SR12 */
if (!smc->mib.fddiSMTStatRptPolicy) {
smc->srf.sr_state = SR2_DISABLED ;
break ;
}
break ;
case SR2_DISABLED :
if (smc->mib.fddiSMTStatRptPolicy) {
smc->srf.sr_state = SR0_WAIT ;
smc->srf.TSR = smt_get_time() ;
smc->srf.SRThreshold = THRESHOLD_2 ;
clear_all_rep(smc) ;
break ;
}
break ;
}
}
static void clear_all_rep(struct s_smc *smc)
{
struct s_srf_evc *evc ;
unsigned int i ;
for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
evc->evc_rep_required = FALSE ;
if (SMT_IS_CONDITION(evc->evc_code))
*evc->evc_cond_state = FALSE ;
}
smc->srf.any_report = FALSE ;
}
static void clear_reported(struct s_smc *smc)
{
struct s_srf_evc *evc ;
unsigned int i ;
smc->srf.any_report = FALSE ;
for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (SMT_IS_CONDITION(evc->evc_code)) {
if (*evc->evc_cond_state == FALSE)
evc->evc_rep_required = FALSE ;
else
smc->srf.any_report = TRUE ;
}
else {
evc->evc_rep_required = FALSE ;
*evc->evc_multiple = FALSE ;
}
}
}
/*
* build and send SMT SRF frame
*/
static void smt_send_srf(struct s_smc *smc)
{
struct smt_header *smt ;
struct s_srf_evc *evc ;
SK_LOC_DECL(struct s_pcon,pcon) ;
SMbuf *mb ;
unsigned int i ;
static const struct fddi_addr SMT_SRF_DA = {
{ 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
} ;
/*
* build SMT header
*/
if (!smc->r.sm_ma_avail)
return ;
if (!(mb = smt_build_frame(smc,SMT_SRF,SMT_ANNOUNCE,0)))
return ;
RS_SET(smc,RS_SOFTERROR) ;
smt = smtod(mb, struct smt_header *) ;
smt->smt_dest = SMT_SRF_DA ; /* DA == SRF multicast */
/*
* setup parameter status
*/
pcon.pc_len = SMT_MAX_INFO_LEN ; /* max para length */
pcon.pc_err = 0 ; /* no error */
pcon.pc_badset = 0 ; /* no bad set count */
pcon.pc_p = (void *) (smt + 1) ; /* paras start here */
smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
if (evc->evc_rep_required) {
smt_add_para(smc,&pcon,evc->evc_para,
(int)evc->evc_index,0) ;
}
}
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
DB_SMT("SRF: sending SRF at %p, len %d", smt, mb->sm_len);
DB_SMT("SRF: state SR%d Threshold %lu",
smc->srf.sr_state, smc->srf.SRThreshold / TICKS_PER_SECOND);
#ifdef DEBUG
dump_smt(smc,smt,"SRF Send") ;
#endif
smt_send_frame(smc,mb,FC_SMT_INFO,0) ;
clear_reported(smc) ;
}
#endif /* no BOOT */
#endif /* no SLIM_SMT */
| linux-master | drivers/net/fddi/skfp/srf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT ECM
Entity Coordination Management
Hardware independent state machine
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
* smt_timer_start()
* smt_timer_stop()
*
* The following external HW dependent functions are referenced :
* sm_pm_bypass_req()
* sm_pm_get_ls()
*
* The following HW dependent events are required :
* NONE
*
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#define KERNEL
#include "h/smtstate.h"
/*
* FSM Macros
*/
#define AFLAG 0x10
#define GO_STATE(x) (smc->mib.fddiSMTECMState = (x)|AFLAG)
#define ACTIONS_DONE() (smc->mib.fddiSMTECMState &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
#define EC0_OUT 0 /* not inserted */
#define EC1_IN 1 /* inserted */
#define EC2_TRACE 2 /* tracing */
#define EC3_LEAVE 3 /* leaving the ring */
#define EC4_PATH_TEST 4 /* performing path test */
#define EC5_INSERT 5 /* bypass being turned on */
#define EC6_CHECK 6 /* checking bypass */
#define EC7_DEINSERT 7 /* bypass being turnde off */
/*
* symbolic state names
*/
static const char * const ecm_states[] = {
"EC0_OUT","EC1_IN","EC2_TRACE","EC3_LEAVE","EC4_PATH_TEST",
"EC5_INSERT","EC6_CHECK","EC7_DEINSERT"
} ;
/*
* symbolic event names
*/
static const char * const ecm_events[] = {
"NONE","EC_CONNECT","EC_DISCONNECT","EC_TRACE_PROP","EC_PATH_TEST",
"EC_TIMEOUT_TD","EC_TIMEOUT_TMAX",
"EC_TIMEOUT_IMAX","EC_TIMEOUT_INMAX","EC_TEST_DONE"
} ;
/*
* all Globals are defined in smc.h
* struct s_ecm
*/
/*
* function declarations
*/
static void ecm_fsm(struct s_smc *smc, int cmd);
static void start_ecm_timer(struct s_smc *smc, u_long value, int event);
static void stop_ecm_timer(struct s_smc *smc);
static void prop_actions(struct s_smc *smc);
/*
init ECM state machine
clear all ECM vars and flags
*/
void ecm_init(struct s_smc *smc)
{
smc->e.path_test = PT_PASSED ;
smc->e.trace_prop = 0 ;
smc->e.sb_flag = 0 ;
smc->mib.fddiSMTECMState = ACTIONS(EC0_OUT) ;
smc->e.ecm_line_state = FALSE ;
}
/*
ECM state machine
called by dispatcher
do
display state change
process event
until SM is stable
*/
void ecm(struct s_smc *smc, int event)
{
int state ;
do {
DB_ECM("ECM : state %s%s event %s",
smc->mib.fddiSMTECMState & AFLAG ? "ACTIONS " : "",
ecm_states[smc->mib.fddiSMTECMState & ~AFLAG],
ecm_events[event]);
state = smc->mib.fddiSMTECMState ;
ecm_fsm(smc,event) ;
event = 0 ;
} while (state != smc->mib.fddiSMTECMState) ;
ecm_state_change(smc,(int)smc->mib.fddiSMTECMState) ;
}
/*
process ECM event
*/
static void ecm_fsm(struct s_smc *smc, int cmd)
{
int ls_a ; /* current line state PHY A */
int ls_b ; /* current line state PHY B */
int p ; /* ports */
smc->mib.fddiSMTBypassPresent = sm_pm_bypass_present(smc) ;
if (cmd == EC_CONNECT)
smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ;
/* For AIX event notification: */
/* Is a disconnect command remotely issued ? */
if (cmd == EC_DISCONNECT &&
smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) {
AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long)
FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc),
smt_get_error_word(smc) );
}
/*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/
if (cmd == EC_CONNECT) {
smc->e.DisconnectFlag = FALSE ;
}
else if (cmd == EC_DISCONNECT) {
smc->e.DisconnectFlag = TRUE ;
}
switch(smc->mib.fddiSMTECMState) {
case ACTIONS(EC0_OUT) :
/*
* We do not perform a path test
*/
smc->e.path_test = PT_PASSED ;
smc->e.ecm_line_state = FALSE ;
stop_ecm_timer(smc) ;
ACTIONS_DONE() ;
break ;
case EC0_OUT:
/*EC01*/
if (cmd == EC_CONNECT && !smc->mib.fddiSMTBypassPresent
&& smc->e.path_test==PT_PASSED) {
GO_STATE(EC1_IN) ;
break ;
}
/*EC05*/
else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) &&
smc->mib.fddiSMTBypassPresent &&
(smc->s.sas == SMT_DAS)) {
GO_STATE(EC5_INSERT) ;
break ;
}
break;
case ACTIONS(EC1_IN) :
stop_ecm_timer(smc) ;
smc->e.trace_prop = 0 ;
sm_ma_control(smc,MA_TREQ) ;
for (p = 0 ; p < NUMPHYS ; p++)
if (smc->mib.p[p].fddiPORTHardwarePresent)
queue_event(smc,EVENT_PCMA+p,PC_START) ;
ACTIONS_DONE() ;
break ;
case EC1_IN:
/*EC12*/
if (cmd == EC_TRACE_PROP) {
prop_actions(smc) ;
GO_STATE(EC2_TRACE) ;
break ;
}
/*EC13*/
else if (cmd == EC_DISCONNECT) {
GO_STATE(EC3_LEAVE) ;
break ;
}
break;
case ACTIONS(EC2_TRACE) :
start_ecm_timer(smc,MIB2US(smc->mib.fddiSMTTrace_MaxExpiration),
EC_TIMEOUT_TMAX) ;
ACTIONS_DONE() ;
break ;
case EC2_TRACE :
/*EC22*/
if (cmd == EC_TRACE_PROP) {
prop_actions(smc) ;
GO_STATE(EC2_TRACE) ;
break ;
}
/*EC23a*/
else if (cmd == EC_DISCONNECT) {
smc->e.path_test = PT_EXITING ;
GO_STATE(EC3_LEAVE) ;
break ;
}
/*EC23b*/
else if (smc->e.path_test == PT_PENDING) {
GO_STATE(EC3_LEAVE) ;
break ;
}
/*EC23c*/
else if (cmd == EC_TIMEOUT_TMAX) {
/* Trace_Max is expired */
/* -> send AIX_EVENT */
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
(u_long) FDDI_SMT_ERROR, (u_long)
FDDI_TRACE_MAX, smt_get_error_word(smc));
smc->e.path_test = PT_PENDING ;
GO_STATE(EC3_LEAVE) ;
break ;
}
break ;
case ACTIONS(EC3_LEAVE) :
start_ecm_timer(smc,smc->s.ecm_td_min,EC_TIMEOUT_TD) ;
for (p = 0 ; p < NUMPHYS ; p++)
queue_event(smc,EVENT_PCMA+p,PC_STOP) ;
ACTIONS_DONE() ;
break ;
case EC3_LEAVE:
/*EC30*/
if (cmd == EC_TIMEOUT_TD && !smc->mib.fddiSMTBypassPresent &&
(smc->e.path_test != PT_PENDING)) {
GO_STATE(EC0_OUT) ;
break ;
}
/*EC34*/
else if (cmd == EC_TIMEOUT_TD &&
(smc->e.path_test == PT_PENDING)) {
GO_STATE(EC4_PATH_TEST) ;
break ;
}
/*EC31*/
else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) {
GO_STATE(EC1_IN) ;
break ;
}
/*EC33*/
else if (cmd == EC_DISCONNECT &&
smc->e.path_test == PT_PENDING) {
smc->e.path_test = PT_EXITING ;
/*
* stay in state - state will be left via timeout
*/
}
/*EC37*/
else if (cmd == EC_TIMEOUT_TD &&
smc->mib.fddiSMTBypassPresent &&
smc->e.path_test != PT_PENDING) {
GO_STATE(EC7_DEINSERT) ;
break ;
}
break ;
case ACTIONS(EC4_PATH_TEST) :
stop_ecm_timer(smc) ;
smc->e.path_test = PT_TESTING ;
start_ecm_timer(smc,smc->s.ecm_test_done,EC_TEST_DONE) ;
/* now perform path test ... just a simulation */
ACTIONS_DONE() ;
break ;
case EC4_PATH_TEST :
/* path test done delay */
if (cmd == EC_TEST_DONE)
smc->e.path_test = PT_PASSED ;
if (smc->e.path_test == PT_FAILED)
RS_SET(smc,RS_PATHTEST) ;
/*EC40a*/
if (smc->e.path_test == PT_FAILED &&
!smc->mib.fddiSMTBypassPresent) {
GO_STATE(EC0_OUT) ;
break ;
}
/*EC40b*/
else if (cmd == EC_DISCONNECT &&
!smc->mib.fddiSMTBypassPresent) {
GO_STATE(EC0_OUT) ;
break ;
}
/*EC41*/
else if (smc->e.path_test == PT_PASSED) {
GO_STATE(EC1_IN) ;
break ;
}
/*EC47a*/
else if (smc->e.path_test == PT_FAILED &&
smc->mib.fddiSMTBypassPresent) {
GO_STATE(EC7_DEINSERT) ;
break ;
}
/*EC47b*/
else if (cmd == EC_DISCONNECT &&
smc->mib.fddiSMTBypassPresent) {
GO_STATE(EC7_DEINSERT) ;
break ;
}
break ;
case ACTIONS(EC5_INSERT) :
sm_pm_bypass_req(smc,BP_INSERT);
start_ecm_timer(smc,smc->s.ecm_in_max,EC_TIMEOUT_INMAX) ;
ACTIONS_DONE() ;
break ;
case EC5_INSERT :
/*EC56*/
if (cmd == EC_TIMEOUT_INMAX) {
GO_STATE(EC6_CHECK) ;
break ;
}
/*EC57*/
else if (cmd == EC_DISCONNECT) {
GO_STATE(EC7_DEINSERT) ;
break ;
}
break ;
case ACTIONS(EC6_CHECK) :
/*
* in EC6_CHECK, we *POLL* the line state !
* check whether both bypass switches have switched.
*/
start_ecm_timer(smc,smc->s.ecm_check_poll,0) ;
smc->e.ecm_line_state = TRUE ; /* flag to pcm: report Q/HLS */
ACTIONS_DONE() ;
break ;
case EC6_CHECK :
ls_a = sm_pm_get_ls(smc,PA) ;
ls_b = sm_pm_get_ls(smc,PB) ;
/*EC61*/
if (((ls_a == PC_QLS) || (ls_a == PC_HLS)) &&
((ls_b == PC_QLS) || (ls_b == PC_HLS)) ) {
smc->e.sb_flag = FALSE ;
smc->e.ecm_line_state = FALSE ;
GO_STATE(EC1_IN) ;
break ;
}
/*EC66*/
else if (!smc->e.sb_flag &&
(((ls_a == PC_ILS) && (ls_b == PC_QLS)) ||
((ls_a == PC_QLS) && (ls_b == PC_ILS)))){
smc->e.sb_flag = TRUE ;
DB_ECMN(1, "ECM : EC6_CHECK - stuck bypass");
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK,
smt_get_error_word(smc));
}
/*EC67*/
else if (cmd == EC_DISCONNECT) {
smc->e.ecm_line_state = FALSE ;
GO_STATE(EC7_DEINSERT) ;
break ;
}
else {
/*
* restart poll
*/
start_ecm_timer(smc,smc->s.ecm_check_poll,0) ;
}
break ;
case ACTIONS(EC7_DEINSERT) :
sm_pm_bypass_req(smc,BP_DEINSERT);
start_ecm_timer(smc,smc->s.ecm_i_max,EC_TIMEOUT_IMAX) ;
ACTIONS_DONE() ;
break ;
case EC7_DEINSERT:
/*EC70*/
if (cmd == EC_TIMEOUT_IMAX) {
GO_STATE(EC0_OUT) ;
break ;
}
/*EC75*/
else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) {
GO_STATE(EC5_INSERT) ;
break ;
}
break;
default:
SMT_PANIC(smc,SMT_E0107, SMT_E0107_MSG) ;
break;
}
}
#ifndef CONCENTRATOR
/*
* trace propagation actions for SAS & DAS
*/
static void prop_actions(struct s_smc *smc)
{
int port_in = 0 ;
int port_out = 0 ;
RS_SET(smc,RS_EVENT) ;
switch (smc->s.sas) {
case SMT_SAS :
port_in = port_out = pcm_get_s_port(smc) ;
break ;
case SMT_DAS :
port_in = cfm_get_mac_input(smc) ; /* PA or PB */
port_out = cfm_get_mac_output(smc) ; /* PA or PB */
break ;
case SMT_NAC :
SMT_PANIC(smc,SMT_E0108, SMT_E0108_MSG) ;
return ;
}
DB_ECM("ECM : prop_actions - trace_prop %lu", smc->e.trace_prop);
DB_ECM("ECM : prop_actions - in %d out %d", port_in, port_out);
if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
/* trace initiatior */
DB_ECM("ECM : initiate TRACE on PHY %c", 'A' + port_in - PA);
queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ;
}
else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) &&
port_out != PA) {
/* trace propagate upstream */
DB_ECM("ECM : propagate TRACE on PHY B");
queue_event(smc,EVENT_PCMB,PC_TRACE) ;
}
else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) &&
port_out != PB) {
/* trace propagate upstream */
DB_ECM("ECM : propagate TRACE on PHY A");
queue_event(smc,EVENT_PCMA,PC_TRACE) ;
}
else {
/* signal trace termination */
DB_ECM("ECM : TRACE terminated");
smc->e.path_test = PT_PENDING ;
}
smc->e.trace_prop = 0 ;
}
#else
/*
* trace propagation actions for Concentrator
*/
static void prop_actions(struct s_smc *smc)
{
int initiator ;
int upstream ;
int p ;
RS_SET(smc,RS_EVENT) ;
while (smc->e.trace_prop) {
DB_ECM("ECM : prop_actions - trace_prop %d",
smc->e.trace_prop);
if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
initiator = ENTITY_MAC ;
smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ;
DB_ECM("ECM: MAC initiates trace");
}
else {
for (p = NUMPHYS-1 ; p >= 0 ; p--) {
if (smc->e.trace_prop &
ENTITY_BIT(ENTITY_PHY(p)))
break ;
}
initiator = ENTITY_PHY(p) ;
smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_PHY(p)) ;
}
upstream = cem_get_upstream(smc,initiator) ;
if (upstream == ENTITY_MAC) {
/* signal trace termination */
DB_ECM("ECM : TRACE terminated");
smc->e.path_test = PT_PENDING ;
}
else {
/* trace propagate upstream */
DB_ECM("ECM : propagate TRACE on PHY %d", upstream);
queue_event(smc,EVENT_PCM+upstream,PC_TRACE) ;
}
}
}
#endif
/*
* SMT timer interface
* start ECM timer
*/
static void start_ecm_timer(struct s_smc *smc, u_long value, int event)
{
smt_timer_start(smc,&smc->e.ecm_timer,value,EV_TOKEN(EVENT_ECM,event));
}
/*
* SMT timer interface
* stop ECM timer
*/
static void stop_ecm_timer(struct s_smc *smc)
{
if (smc->e.ecm_timer.tm_active)
smt_timer_stop(smc,&smc->e.ecm_timer) ;
}
| linux-master | drivers/net/fddi/skfp/ecm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT CFM
Configuration Management
DAS with single MAC
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
*
* The following external HW dependent functions are referenced :
* config_mux()
*
* The following HW dependent events are required :
* NONE
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#define KERNEL
#include "h/smtstate.h"
/*
* FSM Macros
*/
#define AFLAG 0x10
#define GO_STATE(x) (smc->mib.fddiSMTCF_State = (x)|AFLAG)
#define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
/*
* symbolic state names
*/
static const char * const cfm_states[] = {
"SC0_ISOLATED","CF1","CF2","CF3","CF4",
"SC1_WRAP_A","SC2_WRAP_B","SC5_TRHU_B","SC7_WRAP_S",
"SC9_C_WRAP_A","SC10_C_WRAP_B","SC11_C_WRAP_S","SC4_THRU_A"
} ;
/*
* symbolic event names
*/
static const char * const cfm_events[] = {
"NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B"
} ;
/*
* map from state to downstream port type
*/
static const unsigned char cf_to_ptype[] = {
TNONE,TNONE,TNONE,TNONE,TNONE,
TNONE,TB,TB,TS,
TA,TB,TS,TB
} ;
/*
* CEM port states
*/
#define CEM_PST_DOWN 0
#define CEM_PST_UP 1
#define CEM_PST_HOLD 2
/* define portstate array only for A and B port */
/* Do this within the smc structure (use in multiple cards) */
/*
* all Globals are defined in smc.h
* struct s_cfm
*/
/*
* function declarations
*/
static void cfm_fsm(struct s_smc *smc, int cmd);
/*
init CFM state machine
clear all CFM vars and flags
*/
void cfm_init(struct s_smc *smc)
{
smc->mib.fddiSMTCF_State = ACTIONS(SC0_ISOLATED) ;
smc->r.rm_join = 0 ;
smc->r.rm_loop = 0 ;
smc->y[PA].scrub = 0 ;
smc->y[PB].scrub = 0 ;
smc->y[PA].cem_pst = CEM_PST_DOWN ;
smc->y[PB].cem_pst = CEM_PST_DOWN ;
}
/* Some terms conditions used by the selection criteria */
#define THRU_ENABLED(smc) (smc->y[PA].pc_mode != PM_TREE && \
smc->y[PB].pc_mode != PM_TREE)
/* Selection criteria for the ports */
static void selection_criteria (struct s_smc *smc, struct s_phy *phy)
{
switch (phy->mib->fddiPORTMy_Type) {
case TA:
if ( !THRU_ENABLED(smc) && smc->y[PB].cf_join ) {
phy->wc_flag = TRUE ;
} else {
phy->wc_flag = FALSE ;
}
break;
case TB:
/* take precedence over PA */
phy->wc_flag = FALSE ;
break;
case TS:
phy->wc_flag = FALSE ;
break;
case TM:
phy->wc_flag = FALSE ;
break;
}
}
void all_selection_criteria(struct s_smc *smc)
{
struct s_phy *phy ;
int p ;
for ( p = 0,phy = smc->y ; p < NUMPHYS; p++, phy++ ) {
/* Do the selection criteria */
selection_criteria (smc,phy);
}
}
static void cem_priv_state(struct s_smc *smc, int event)
/* State machine for private PORT states: used to optimize dual homing */
{
int np; /* Number of the port */
int i;
/* Do this only in a DAS */
if (smc->s.sas != SMT_DAS )
return ;
np = event - CF_JOIN;
if (np != PA && np != PB) {
return ;
}
/* Change the port state according to the event (portnumber) */
if (smc->y[np].cf_join) {
smc->y[np].cem_pst = CEM_PST_UP ;
} else if (!smc->y[np].wc_flag) {
/* set the port to done only if it is not withheld */
smc->y[np].cem_pst = CEM_PST_DOWN ;
}
/* Don't set an hold port to down */
/* Check all ports of restart conditions */
for (i = 0 ; i < 2 ; i ++ ) {
/* Check all port for PORT is on hold and no withhold is done */
if ( smc->y[i].cem_pst == CEM_PST_HOLD && !smc->y[i].wc_flag ) {
smc->y[i].cem_pst = CEM_PST_DOWN;
queue_event(smc,(int)(EVENT_PCM+i),PC_START) ;
}
if ( smc->y[i].cem_pst == CEM_PST_UP && smc->y[i].wc_flag ) {
smc->y[i].cem_pst = CEM_PST_HOLD;
queue_event(smc,(int)(EVENT_PCM+i),PC_START) ;
}
if ( smc->y[i].cem_pst == CEM_PST_DOWN && smc->y[i].wc_flag ) {
/*
* The port must be restarted when the wc_flag
* will be reset. So set the port on hold.
*/
smc->y[i].cem_pst = CEM_PST_HOLD;
}
}
return ;
}
/*
CFM state machine
called by dispatcher
do
display state change
process event
until SM is stable
*/
void cfm(struct s_smc *smc, int event)
{
int state ; /* remember last state */
int cond ;
/* We will do the following: */
/* - compute the variable WC_Flag for every port (This is where */
/* we can extend the requested path checking !!) */
/* - do the old (SMT 6.2 like) state machine */
/* - do the resulting station states */
all_selection_criteria (smc);
/* We will check now whether a state transition is allowed or not */
/* - change the portstates */
cem_priv_state (smc, event);
do {
DB_CFM("CFM : state %s%s event %s",
smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG],
cfm_events[event]);
state = smc->mib.fddiSMTCF_State ;
cfm_fsm(smc,event) ;
event = 0 ;
} while (state != smc->mib.fddiSMTCF_State) ;
#ifndef SLIM_SMT
/*
* check peer wrap condition
*/
cond = FALSE ;
if ( (smc->mib.fddiSMTCF_State == SC9_C_WRAP_A &&
smc->y[PA].pc_mode == PM_PEER) ||
(smc->mib.fddiSMTCF_State == SC10_C_WRAP_B &&
smc->y[PB].pc_mode == PM_PEER) ||
(smc->mib.fddiSMTCF_State == SC11_C_WRAP_S &&
smc->y[PS].pc_mode == PM_PEER &&
smc->y[PS].mib->fddiPORTNeighborType != TS ) ) {
cond = TRUE ;
}
if (cond != smc->mib.fddiSMTPeerWrapFlag)
smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
/*
* Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired
* to the primary path.
*/
#endif /* no SLIM_SMT */
/*
* set MAC port type
*/
smc->mib.m[MAC0].fddiMACDownstreamPORTType =
cf_to_ptype[smc->mib.fddiSMTCF_State] ;
cfm_state_change(smc,(int)smc->mib.fddiSMTCF_State) ;
}
/*
process CFM event
*/
/*ARGSUSED1*/
static void cfm_fsm(struct s_smc *smc, int cmd)
{
switch(smc->mib.fddiSMTCF_State) {
case ACTIONS(SC0_ISOLATED) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ;
config_mux(smc,MUX_ISOLATE) ; /* configure PHY Mux */
smc->r.rm_loop = FALSE ;
smc->r.rm_join = FALSE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
/* Don't do the WC-Flag changing here */
ACTIONS_DONE() ;
DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break;
case SC0_ISOLATED :
/*SC07*/
/*SAS port can be PA or PB ! */
if (smc->s.sas && (smc->y[PA].cf_join || smc->y[PA].cf_loop ||
smc->y[PB].cf_join || smc->y[PB].cf_loop)) {
GO_STATE(SC11_C_WRAP_S) ;
break ;
}
/*SC01*/
if ((smc->y[PA].cem_pst == CEM_PST_UP && smc->y[PA].cf_join &&
!smc->y[PA].wc_flag) || smc->y[PA].cf_loop) {
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC02*/
if ((smc->y[PB].cem_pst == CEM_PST_UP && smc->y[PB].cf_join &&
!smc->y[PB].wc_flag) || smc->y[PB].cf_loop) {
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
break ;
case ACTIONS(SC9_C_WRAP_A) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
config_mux(smc,MUX_WRAPA) ; /* configure PHY mux */
if (smc->y[PA].cf_loop) {
smc->r.rm_join = FALSE ;
smc->r.rm_loop = TRUE ;
queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
}
if (smc->y[PA].cf_join) {
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC9_C_WRAP_A :
/*SC10*/
if ( (smc->y[PA].wc_flag || !smc->y[PA].cf_join) &&
!smc->y[PA].cf_loop ) {
GO_STATE(SC0_ISOLATED) ;
break ;
}
/*SC12*/
else if ( (smc->y[PB].cf_loop && smc->y[PA].cf_join &&
smc->y[PA].cem_pst == CEM_PST_UP) ||
((smc->y[PB].cf_loop ||
(smc->y[PB].cf_join &&
smc->y[PB].cem_pst == CEM_PST_UP)) &&
(smc->y[PA].pc_mode == PM_TREE ||
smc->y[PB].pc_mode == PM_TREE))) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
/*SC14*/
else if (!smc->s.attach_s &&
smc->y[PA].cf_join &&
smc->y[PA].cem_pst == CEM_PST_UP &&
smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join &&
smc->y[PB].cem_pst == CEM_PST_UP &&
smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC4_THRU_A) ;
break ;
}
/*SC15*/
else if ( smc->s.attach_s &&
smc->y[PA].cf_join &&
smc->y[PA].cem_pst == CEM_PST_UP &&
smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join &&
smc->y[PB].cem_pst == CEM_PST_UP &&
smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC5_THRU_B) ;
break ;
}
break ;
case ACTIONS(SC10_C_WRAP_B) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
config_mux(smc,MUX_WRAPB) ; /* configure PHY mux */
if (smc->y[PB].cf_loop) {
smc->r.rm_join = FALSE ;
smc->r.rm_loop = TRUE ;
queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
}
if (smc->y[PB].cf_join) {
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC10_C_WRAP_B :
/*SC20*/
if ( !smc->y[PB].cf_join && !smc->y[PB].cf_loop ) {
GO_STATE(SC0_ISOLATED) ;
break ;
}
/*SC21*/
else if ( smc->y[PA].cf_loop && smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC24*/
else if (!smc->s.attach_s &&
smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC4_THRU_A) ;
break ;
}
/*SC25*/
else if ( smc->s.attach_s &&
smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER &&
smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) {
smc->y[PA].scrub = TRUE ;
smc->y[PB].scrub = TRUE ;
GO_STATE(SC5_THRU_B) ;
break ;
}
break ;
case ACTIONS(SC4_THRU_A) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ;
config_mux(smc,MUX_THRUA) ; /* configure PHY mux */
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
ACTIONS_DONE() ;
DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC4_THRU_A :
/*SC41*/
if (smc->y[PB].wc_flag || !smc->y[PB].cf_join) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC42*/
else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
/*SC45*/
else if (smc->s.attach_s) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC5_THRU_B) ;
break ;
}
break ;
case ACTIONS(SC5_THRU_B) :
smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ;
config_mux(smc,MUX_THRUB) ; /* configure PHY mux */
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
ACTIONS_DONE() ;
DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC5_THRU_B :
/*SC51*/
if (!smc->y[PB].cf_join || smc->y[PB].wc_flag) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC9_C_WRAP_A) ;
break ;
}
/*SC52*/
else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) {
smc->y[PB].scrub = TRUE ;
GO_STATE(SC10_C_WRAP_B) ;
break ;
}
/*SC54*/
else if (!smc->s.attach_s) {
smc->y[PA].scrub = TRUE ;
GO_STATE(SC4_THRU_A) ;
break ;
}
break ;
case ACTIONS(SC11_C_WRAP_S) :
smc->mib.p[PS].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
smc->mib.p[PS].fddiPORTMACPlacement = INDEX_MAC ;
smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ;
config_mux(smc,MUX_WRAPS) ; /* configure PHY mux */
if (smc->y[PA].cf_loop || smc->y[PB].cf_loop) {
smc->r.rm_join = FALSE ;
smc->r.rm_loop = TRUE ;
queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */
}
if (smc->y[PA].cf_join || smc->y[PB].cf_join) {
smc->r.rm_loop = FALSE ;
smc->r.rm_join = TRUE ;
queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
}
ACTIONS_DONE() ;
DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
break ;
case SC11_C_WRAP_S :
/*SC70*/
if ( !smc->y[PA].cf_join && !smc->y[PA].cf_loop &&
!smc->y[PB].cf_join && !smc->y[PB].cf_loop) {
GO_STATE(SC0_ISOLATED) ;
break ;
}
break ;
default:
SMT_PANIC(smc,SMT_E0106, SMT_E0106_MSG) ;
break;
}
}
/*
* get MAC's input Port
* return :
* PA or PB
*/
int cfm_get_mac_input(struct s_smc *smc)
{
return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA;
}
/*
* get MAC's output Port
* return :
* PA or PB
*/
int cfm_get_mac_output(struct s_smc *smc)
{
return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA;
}
static char path_iso[] = {
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_ISO,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_ISO,
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_ISO
} ;
static char path_wrap_a[] = {
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_ISO
} ;
static char path_wrap_b[] = {
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_ISO
} ;
static char path_thru[] = {
0,0, 0,RES_PORT, 0,PA + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
0,0, 0,RES_PORT, 0,PB + INDEX_PORT, 0,PATH_PRIM
} ;
static char path_wrap_s[] = {
0,0, 0,RES_PORT, 0,PS + INDEX_PORT, 0,PATH_PRIM,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_PRIM,
} ;
static char path_iso_s[] = {
0,0, 0,RES_PORT, 0,PS + INDEX_PORT, 0,PATH_ISO,
0,0, 0,RES_MAC, 0,INDEX_MAC, 0,PATH_ISO,
} ;
int cem_build_path(struct s_smc *smc, char *to, int path_index)
{
char *path ;
int len ;
switch (smc->mib.fddiSMTCF_State) {
default :
case SC0_ISOLATED :
path = smc->s.sas ? path_iso_s : path_iso ;
len = smc->s.sas ? sizeof(path_iso_s) : sizeof(path_iso) ;
break ;
case SC9_C_WRAP_A :
path = path_wrap_a ;
len = sizeof(path_wrap_a) ;
break ;
case SC10_C_WRAP_B :
path = path_wrap_b ;
len = sizeof(path_wrap_b) ;
break ;
case SC4_THRU_A :
path = path_thru ;
len = sizeof(path_thru) ;
break ;
case SC11_C_WRAP_S :
path = path_wrap_s ;
len = sizeof(path_wrap_s) ;
break ;
}
memcpy(to,path,len) ;
LINT_USE(path_index);
return len;
}
| linux-master | drivers/net/fddi/skfp/cfm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
PCM
Physical Connection Management
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
* smt_timer_start()
* smt_timer_stop()
*
* The following external HW dependent functions are referenced :
* sm_pm_control()
* sm_ph_linestate()
*
* The following HW dependent events are required :
* PC_QLS
* PC_ILS
* PC_HLS
* PC_MLS
* PC_NSE
* PC_LEM
*
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#define KERNEL
#include "h/smtstate.h"
#ifdef FDDI_MIB
extern int snmp_fddi_trap(
#ifdef ANSIC
struct s_smc * smc, int type, int index
#endif
);
#endif
#ifdef CONCENTRATOR
extern int plc_is_installed(
#ifdef ANSIC
struct s_smc *smc ,
int p
#endif
) ;
#endif
/*
* FSM Macros
*/
#define AFLAG (0x20)
#define GO_STATE(x) (mib->fddiPORTPCMState = (x)|AFLAG)
#define ACTIONS_DONE() (mib->fddiPORTPCMState &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
/*
* PCM states
*/
#define PC0_OFF 0
#define PC1_BREAK 1
#define PC2_TRACE 2
#define PC3_CONNECT 3
#define PC4_NEXT 4
#define PC5_SIGNAL 5
#define PC6_JOIN 6
#define PC7_VERIFY 7
#define PC8_ACTIVE 8
#define PC9_MAINT 9
/*
* symbolic state names
*/
static const char * const pcm_states[] = {
"PC0_OFF","PC1_BREAK","PC2_TRACE","PC3_CONNECT","PC4_NEXT",
"PC5_SIGNAL","PC6_JOIN","PC7_VERIFY","PC8_ACTIVE","PC9_MAINT"
} ;
/*
* symbolic event names
*/
static const char * const pcm_events[] = {
"NONE","PC_START","PC_STOP","PC_LOOP","PC_JOIN","PC_SIGNAL",
"PC_REJECT","PC_MAINT","PC_TRACE","PC_PDR",
"PC_ENABLE","PC_DISABLE",
"PC_QLS","PC_ILS","PC_MLS","PC_HLS","PC_LS_PDR","PC_LS_NONE",
"PC_TIMEOUT_TB_MAX","PC_TIMEOUT_TB_MIN",
"PC_TIMEOUT_C_MIN","PC_TIMEOUT_T_OUT",
"PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT",
"PC_NSE","PC_LEM"
} ;
#ifdef MOT_ELM
/*
* PCL-S control register
* this register in the PLC-S controls the scrambling parameters
*/
#define PLCS_CONTROL_C_U 0
#define PLCS_CONTROL_C_S (PL_C_SDOFF_ENABLE | PL_C_SDON_ENABLE | \
PL_C_CIPHER_ENABLE)
#define PLCS_FASSERT_U 0
#define PLCS_FASSERT_S 0xFd76 /* 52.0 us */
#define PLCS_FDEASSERT_U 0
#define PLCS_FDEASSERT_S 0
#else /* nMOT_ELM */
/*
* PCL-S control register
* this register in the PLC-S controls the scrambling parameters
* can be patched for ANSI compliance if standard changes
*/
static const u_char plcs_control_c_u[17] = "PLC_CNTRL_C_U=\0\0" ;
static const u_char plcs_control_c_s[17] = "PLC_CNTRL_C_S=\01\02" ;
#define PLCS_CONTROL_C_U (plcs_control_c_u[14] | (plcs_control_c_u[15]<<8))
#define PLCS_CONTROL_C_S (plcs_control_c_s[14] | (plcs_control_c_s[15]<<8))
#endif /* nMOT_ELM */
/*
* external vars
*/
/* struct definition see 'cmtdef.h' (also used by CFM) */
#define PS_OFF 0
#define PS_BIT3 1
#define PS_BIT4 2
#define PS_BIT7 3
#define PS_LCT 4
#define PS_BIT8 5
#define PS_JOIN 6
#define PS_ACTIVE 7
#define LCT_LEM_MAX 255
/*
* PLC timing parameter
*/
#define PLC_MS(m) ((int)((0x10000L-(m*100000L/2048))))
#define SLOW_TL_MIN PLC_MS(6)
#define SLOW_C_MIN PLC_MS(10)
static const struct plt {
int timer ; /* relative plc timer address */
int para ; /* default timing parameters */
} pltm[] = {
{ PL_C_MIN, SLOW_C_MIN }, /* min t. to remain Connect State */
{ PL_TL_MIN, SLOW_TL_MIN }, /* min t. to transmit a Line State */
{ PL_TB_MIN, TP_TB_MIN }, /* min break time */
{ PL_T_OUT, TP_T_OUT }, /* Signaling timeout */
{ PL_LC_LENGTH, TP_LC_LENGTH }, /* Link Confidence Test Time */
{ PL_T_SCRUB, TP_T_SCRUB }, /* Scrub Time == MAC TVX time ! */
{ PL_NS_MAX, TP_NS_MAX }, /* max t. that noise is tolerated */
{ 0,0 }
} ;
/*
* interrupt mask
*/
#ifdef SUPERNET_3
/*
* Do we need the EBUF error during signaling, too, to detect SUPERNET_3
* PLL bug?
*/
static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
#else /* SUPERNET_3 */
/*
* We do NOT need the elasticity buffer error during signaling.
*/
static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST ;
#endif /* SUPERNET_3 */
static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
/* internal functions */
static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd);
static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy);
static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy);
static void reset_lem_struct(struct s_phy *phy);
static void plc_init(struct s_smc *smc, int p);
static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold);
static void sm_ph_lem_stop(struct s_smc *smc, int np);
static void sm_ph_linestate(struct s_smc *smc, int phy, int ls);
static void real_init_plc(struct s_smc *smc);
/*
* SMT timer interface
* start PCM timer 0
*/
static void start_pcm_timer0(struct s_smc *smc, u_long value, int event,
struct s_phy *phy)
{
phy->timer0_exp = FALSE ; /* clear timer event flag */
smt_timer_start(smc,&phy->pcm_timer0,value,
EV_TOKEN(EVENT_PCM+phy->np,event)) ;
}
/*
* SMT timer interface
* stop PCM timer 0
*/
static void stop_pcm_timer0(struct s_smc *smc, struct s_phy *phy)
{
if (phy->pcm_timer0.tm_active)
smt_timer_stop(smc,&phy->pcm_timer0) ;
}
/*
init PCM state machine (called by driver)
clear all PCM vars and flags
*/
void pcm_init(struct s_smc *smc)
{
int i ;
int np ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
for (np = 0,phy = smc->y ; np < NUMPHYS ; np++,phy++) {
/* Indicates the type of PHY being used */
mib = phy->mib ;
mib->fddiPORTPCMState = ACTIONS(PC0_OFF) ;
phy->np = np ;
switch (smc->s.sas) {
#ifdef CONCENTRATOR
case SMT_SAS :
mib->fddiPORTMy_Type = (np == PS) ? TS : TM ;
break ;
case SMT_DAS :
mib->fddiPORTMy_Type = (np == PA) ? TA :
(np == PB) ? TB : TM ;
break ;
case SMT_NAC :
mib->fddiPORTMy_Type = TM ;
break;
#else
case SMT_SAS :
mib->fddiPORTMy_Type = (np == PS) ? TS : TNONE ;
mib->fddiPORTHardwarePresent = (np == PS) ? TRUE :
FALSE ;
#ifndef SUPERNET_3
smc->y[PA].mib->fddiPORTPCMState = PC0_OFF ;
#else
smc->y[PB].mib->fddiPORTPCMState = PC0_OFF ;
#endif
break ;
case SMT_DAS :
mib->fddiPORTMy_Type = (np == PB) ? TB : TA ;
break ;
#endif
}
/*
* set PMD-type
*/
phy->pmd_scramble = 0 ;
switch (phy->pmd_type[PMD_SK_PMD]) {
case 'P' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ;
break ;
case 'L' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_LCF ;
break ;
case 'D' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
case 'S' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
phy->pmd_scramble = TRUE ;
break ;
case 'U' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
phy->pmd_scramble = TRUE ;
break ;
case '1' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
break ;
case '2' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
break ;
case '3' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
break ;
case '4' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
break ;
case 'H' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
break ;
case 'I' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
case 'G' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
default:
mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
break ;
}
/*
* A and B port can be on primary and secondary path
*/
switch (mib->fddiPORTMy_Type) {
case TA :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_SEC_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_SEC_PREFER |
MIB_P_PATH_THRU ;
break ;
case TB :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_PRIM_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_PRIM_PREFER |
MIB_P_PATH_CON_PREFER |
MIB_P_PATH_THRU ;
break ;
case TS :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_PRIM_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_PRIM_PREFER ;
break ;
case TM :
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_SEC_ALTER |
MIB_P_PATH_PRIM_ALTER ;
mib->fddiPORTRequestedPaths[3] = 0 ;
break ;
}
phy->pc_lem_fail = FALSE ;
mib->fddiPORTPCMStateX = mib->fddiPORTPCMState ;
mib->fddiPORTLCTFail_Ct = 0 ;
mib->fddiPORTBS_Flag = 0 ;
mib->fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
mib->fddiPORTNeighborType = TNONE ;
phy->ls_flag = 0 ;
phy->rc_flag = 0 ;
phy->tc_flag = 0 ;
phy->td_flag = 0 ;
if (np >= PM)
phy->phy_name = '0' + np - PM ;
else
phy->phy_name = 'A' + np ;
phy->wc_flag = FALSE ; /* set by SMT */
memset((char *)&phy->lem,0,sizeof(struct lem_counter)) ;
reset_lem_struct(phy) ;
memset((char *)&phy->plc,0,sizeof(struct s_plc)) ;
phy->plc.p_state = PS_OFF ;
for (i = 0 ; i < NUMBITS ; i++) {
phy->t_next[i] = 0 ;
}
}
real_init_plc(smc) ;
}
void init_plc(struct s_smc *smc)
{
SK_UNUSED(smc) ;
/*
* dummy
* this is an obsolete public entry point that has to remain
* for compat. It is used by various drivers.
* the work is now done in real_init_plc()
* which is called from pcm_init() ;
*/
}
static void real_init_plc(struct s_smc *smc)
{
int p ;
for (p = 0 ; p < NUMPHYS ; p++)
plc_init(smc,p) ;
}
static void plc_init(struct s_smc *smc, int p)
{
int i ;
#ifndef MOT_ELM
int rev ; /* Revision of PLC-x */
#endif /* MOT_ELM */
/* transit PCM state machine to MAINT state */
outpw(PLC(p,PL_CNTRL_B),0) ;
outpw(PLC(p,PL_CNTRL_B),PL_PCM_STOP) ;
outpw(PLC(p,PL_CNTRL_A),0) ;
/*
* if PLC-S then set control register C
*/
#ifndef MOT_ELM
rev = inpw(PLC(p,PL_STATUS_A)) & PLC_REV_MASK ;
if (rev != PLC_REVISION_A)
#endif /* MOT_ELM */
{
if (smc->y[p].pmd_scramble) {
outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_S) ;
#ifdef MOT_ELM
outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_S) ;
outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_S) ;
#endif /* MOT_ELM */
}
else {
outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_U) ;
#ifdef MOT_ELM
outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_U) ;
outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_U) ;
#endif /* MOT_ELM */
}
}
/*
* set timer register
*/
for ( i = 0 ; pltm[i].timer; i++) /* set timer parameter reg */
outpw(PLC(p,pltm[i].timer),pltm[i].para) ;
(void)inpw(PLC(p,PL_INTR_EVENT)) ; /* clear interrupt event reg */
plc_clear_irq(smc,p) ;
outpw(PLC(p,PL_INTR_MASK),plc_imsk_na); /* enable non active irq's */
/*
* if PCM is configured for class s, it will NOT go to the
* REMOVE state if offline (page 3-36;)
* in the concentrator, all inactive PHYS always must be in
* the remove state
* there's no real need to use this feature at all ..
*/
#ifndef CONCENTRATOR
if ((smc->s.sas == SMT_SAS) && (p == PS)) {
outpw(PLC(p,PL_CNTRL_B),PL_CLASS_S) ;
}
#endif
}
/*
* control PCM state machine
*/
static void plc_go_state(struct s_smc *smc, int p, int state)
{
HW_PTR port ;
int val ;
SK_UNUSED(smc) ;
port = (HW_PTR) (PLC(p,PL_CNTRL_B)) ;
val = inpw(port) & ~(PL_PCM_CNTRL | PL_MAINT) ;
outpw(port,val) ;
outpw(port,val | state) ;
}
/*
* read current line state (called by ECM & PCM)
*/
int sm_pm_get_ls(struct s_smc *smc, int phy)
{
int state ;
#ifdef CONCENTRATOR
if (!plc_is_installed(smc,phy))
return PC_QLS;
#endif
state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
switch(state) {
case PL_L_QLS:
state = PC_QLS ;
break ;
case PL_L_MLS:
state = PC_MLS ;
break ;
case PL_L_HLS:
state = PC_HLS ;
break ;
case PL_L_ILS4:
case PL_L_ILS16:
state = PC_ILS ;
break ;
case PL_L_ALS:
state = PC_LS_PDR ;
break ;
default :
state = PC_LS_NONE ;
}
return state;
}
static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
{
int np = phy->np ; /* PHY index */
int n ;
int i ;
SK_UNUSED(smc) ;
/* create bit vector */
for (i = len-1,n = 0 ; i >= 0 ; i--) {
n = (n<<1) | phy->t_val[phy->bitn+i] ;
}
if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
#if 0
printf("PL_PCM_SIGNAL is set\n") ;
#endif
return 1;
}
/* write bit[n] & length = 1 to regs */
outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
outpw(PLC(np,PL_XMIT_VECTOR),n) ;
#ifdef DEBUG
#if 1
#ifdef DEBUG_BRD
if (smc->debug.d_plc & 0x80)
#else
if (debug.d_plc & 0x80)
#endif
printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
#endif
#endif
return 0;
}
/*
* config plc muxes
*/
void plc_config_mux(struct s_smc *smc, int mux)
{
if (smc->s.sas != SMT_DAS)
return ;
if (mux == MUX_WRAPB) {
SETMASK(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
SETMASK(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
}
else {
CLEAR(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
CLEAR(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP) ;
}
CLEAR(PLC(PB,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
CLEAR(PLC(PB,PL_CNTRL_A),PL_SC_REM_LOOP) ;
}
/*
PCM state machine
called by dispatcher & fddi_init() (driver)
do
display state change
process event
until SM is stable
*/
void pcm(struct s_smc *smc, const int np, int event)
{
int state ;
int oldstate ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
#ifndef CONCENTRATOR
/*
* ignore 2nd PHY if SAS
*/
if ((np != PS) && (smc->s.sas == SMT_SAS))
return ;
#endif
phy = &smc->y[np] ;
mib = phy->mib ;
oldstate = mib->fddiPORTPCMState ;
do {
DB_PCM("PCM %c: state %s%s, event %s",
phy->phy_name,
mib->fddiPORTPCMState & AFLAG ? "ACTIONS " : "",
pcm_states[mib->fddiPORTPCMState & ~AFLAG],
pcm_events[event]);
state = mib->fddiPORTPCMState ;
pcm_fsm(smc,phy,event) ;
event = 0 ;
} while (state != mib->fddiPORTPCMState) ;
/*
* because the PLC does the bit signaling for us,
* we're always in SIGNAL state
* the MIB want's to see CONNECT
* we therefore fake an entry in the MIB
*/
if (state == PC5_SIGNAL)
mib->fddiPORTPCMStateX = PC3_CONNECT ;
else
mib->fddiPORTPCMStateX = state ;
#ifndef SLIM_SMT
/*
* path change
*/
if ( mib->fddiPORTPCMState != oldstate &&
((oldstate == PC8_ACTIVE) || (mib->fddiPORTPCMState == PC8_ACTIVE))) {
smt_srf_event(smc,SMT_EVENT_PORT_PATH_CHANGE,
(int) (INDEX_PORT+ phy->np),0) ;
}
#endif
#ifdef FDDI_MIB
/* check whether a snmp-trap has to be sent */
if ( mib->fddiPORTPCMState != oldstate ) {
/* a real state change took place */
DB_SNMP ("PCM from %d to %d\n", oldstate, mib->fddiPORTPCMState);
if ( mib->fddiPORTPCMState == PC0_OFF ) {
/* send first trap */
snmp_fddi_trap (smc, 1, (int) mib->fddiPORTIndex );
} else if ( oldstate == PC0_OFF ) {
/* send second trap */
snmp_fddi_trap (smc, 2, (int) mib->fddiPORTIndex );
} else if ( mib->fddiPORTPCMState != PC2_TRACE &&
oldstate == PC8_ACTIVE ) {
/* send third trap */
snmp_fddi_trap (smc, 3, (int) mib->fddiPORTIndex );
} else if ( mib->fddiPORTPCMState == PC8_ACTIVE ) {
/* send fourth trap */
snmp_fddi_trap (smc, 4, (int) mib->fddiPORTIndex );
}
}
#endif
pcm_state_change(smc,np,state) ;
}
/*
* PCM state machine
*/
static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
{
int i ;
int np = phy->np ; /* PHY index */
struct s_plc *plc ;
struct fddi_mib_p *mib ;
#ifndef MOT_ELM
u_short plc_rev ; /* Revision of the plc */
#endif /* nMOT_ELM */
plc = &phy->plc ;
mib = phy->mib ;
/*
* general transitions independent of state
*/
switch (cmd) {
case PC_STOP :
/*PC00-PC80*/
if (mib->fddiPORTPCMState != PC9_MAINT) {
GO_STATE(PC0_OFF) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_PORT_EVENT, (u_long) FDDI_PORT_STOP,
smt_get_port_event_word(smc));
}
return ;
case PC_START :
/*PC01-PC81*/
if (mib->fddiPORTPCMState != PC9_MAINT)
GO_STATE(PC1_BREAK) ;
return ;
case PC_DISABLE :
/* PC09-PC99 */
GO_STATE(PC9_MAINT) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_PORT_EVENT, (u_long) FDDI_PORT_DISABLED,
smt_get_port_event_word(smc));
return ;
case PC_TIMEOUT_LCT :
/* if long or extended LCT */
stop_pcm_timer0(smc,phy) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
/* end of LCT is indicate by PCM_CODE (initiate PCM event) */
return ;
}
switch(mib->fddiPORTPCMState) {
case ACTIONS(PC0_OFF) :
stop_pcm_timer0(smc,phy) ;
outpw(PLC(np,PL_CNTRL_A),0) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
sm_ph_lem_stop(smc,np) ; /* disable LEM */
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
plc_go_state(smc,np,PL_PCM_STOP) ;
mib->fddiPORTConnectState = PCM_DISABLED ;
ACTIONS_DONE() ;
break ;
case PC0_OFF:
/*PC09*/
if (cmd == PC_MAINT) {
GO_STATE(PC9_MAINT) ;
break ;
}
break ;
case ACTIONS(PC1_BREAK) :
/* Stop the LCT timer if we came from Signal state */
stop_pcm_timer0(smc,phy) ;
ACTIONS_DONE() ;
plc_go_state(smc,np,0) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
sm_ph_lem_stop(smc,np) ; /* disable LEM */
/*
* if vector is already loaded, go to OFF to clear PCM_SIGNAL
*/
#if 0
if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
plc_go_state(smc,np,PL_PCM_STOP) ;
/* TB_MIN ? */
}
#endif
/*
* Go to OFF state in any case.
*/
plc_go_state(smc,np,PL_PCM_STOP) ;
if (mib->fddiPORTPC_Withhold == PC_WH_NONE)
mib->fddiPORTConnectState = PCM_CONNECTING ;
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
phy->ls_flag = FALSE ;
phy->pc_mode = PM_NONE ; /* needed by CFM */
phy->bitn = 0 ; /* bit signaling start bit */
for (i = 0 ; i < 3 ; i++)
pc_tcode_actions(smc,i,phy) ;
/* Set the non-active interrupt mask register */
outpw(PLC(np,PL_INTR_MASK),plc_imsk_na) ;
/*
* If the LCT was stopped. There might be a
* PCM_CODE interrupt event present.
* This must be cleared.
*/
(void)inpw(PLC(np,PL_INTR_EVENT)) ;
#ifndef MOT_ELM
/* Get the plc revision for revision dependent code */
plc_rev = inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK ;
if (plc_rev != PLC_REV_SN3)
#endif /* MOT_ELM */
{
/*
* No supernet III PLC, so set Xmit verctor and
* length BEFORE starting the state machine.
*/
if (plc_send_bits(smc,phy,3)) {
return ;
}
}
/*
* Now give the Start command.
* - The start command shall be done before setting the bits
* to be signaled. (In PLC-S description and PLCS in SN3.
* - The start command shall be issued AFTER setting the
* XMIT vector and the XMIT length register.
*
* We do it exactly according this specs for the old PLC and
* the new PLCS inside the SN3.
* For the usual PLCS we try it the way it is done for the
* old PLC and set the XMIT registers again, if the PLC is
* not in SIGNAL state. This is done according to an PLCS
* errata workaround.
*/
plc_go_state(smc,np,PL_PCM_START) ;
/*
* workaround for PLC-S eng. sample errata
*/
#ifdef MOT_ELM
if (!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
#else /* nMOT_ELM */
if (((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) !=
PLC_REVISION_A) &&
!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
#endif /* nMOT_ELM */
{
/*
* Set register again (PLCS errata) or the first time
* (new SN3 PLCS).
*/
(void) plc_send_bits(smc,phy,3) ;
}
/*
* end of workaround
*/
GO_STATE(PC5_SIGNAL) ;
plc->p_state = PS_BIT3 ;
plc->p_bits = 3 ;
plc->p_start = 0 ;
break ;
case PC1_BREAK :
break ;
case ACTIONS(PC2_TRACE) :
plc_go_state(smc,np,PL_PCM_TRACE) ;
ACTIONS_DONE() ;
break ;
case PC2_TRACE :
break ;
case PC3_CONNECT : /* these states are done by hardware */
case PC4_NEXT :
break ;
case ACTIONS(PC5_SIGNAL) :
ACTIONS_DONE() ;
fallthrough;
case PC5_SIGNAL :
if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
break ;
switch (plc->p_state) {
case PS_BIT3 :
for (i = 0 ; i <= 2 ; i++)
pc_rcode_actions(smc,i,phy) ;
pc_tcode_actions(smc,3,phy) ;
plc->p_state = PS_BIT4 ;
plc->p_bits = 1 ;
plc->p_start = 3 ;
phy->bitn = 3 ;
if (plc_send_bits(smc,phy,1)) {
return ;
}
break ;
case PS_BIT4 :
pc_rcode_actions(smc,3,phy) ;
for (i = 4 ; i <= 6 ; i++)
pc_tcode_actions(smc,i,phy) ;
plc->p_state = PS_BIT7 ;
plc->p_bits = 3 ;
plc->p_start = 4 ;
phy->bitn = 4 ;
if (plc_send_bits(smc,phy,3)) {
return ;
}
break ;
case PS_BIT7 :
for (i = 3 ; i <= 6 ; i++)
pc_rcode_actions(smc,i,phy) ;
plc->p_state = PS_LCT ;
plc->p_bits = 0 ;
plc->p_start = 7 ;
phy->bitn = 7 ;
sm_ph_lem_start(smc,np,(int)smc->s.lct_short) ; /* enable LEM */
/* start LCT */
i = inpw(PLC(np,PL_CNTRL_B)) & ~PL_PC_LOOP ;
outpw(PLC(np,PL_CNTRL_B),i) ; /* must be cleared */
outpw(PLC(np,PL_CNTRL_B),i | PL_RLBP) ;
break ;
case PS_LCT :
/* check for local LCT failure */
pc_tcode_actions(smc,7,phy) ;
/*
* set tval[7]
*/
plc->p_state = PS_BIT8 ;
plc->p_bits = 1 ;
plc->p_start = 7 ;
phy->bitn = 7 ;
if (plc_send_bits(smc,phy,1)) {
return ;
}
break ;
case PS_BIT8 :
/* check for remote LCT failure */
pc_rcode_actions(smc,7,phy) ;
if (phy->t_val[7] || phy->r_val[7]) {
plc_go_state(smc,np,PL_PCM_STOP) ;
GO_STATE(PC1_BREAK) ;
break ;
}
for (i = 8 ; i <= 9 ; i++)
pc_tcode_actions(smc,i,phy) ;
plc->p_state = PS_JOIN ;
plc->p_bits = 2 ;
plc->p_start = 8 ;
phy->bitn = 8 ;
if (plc_send_bits(smc,phy,2)) {
return ;
}
break ;
case PS_JOIN :
for (i = 8 ; i <= 9 ; i++)
pc_rcode_actions(smc,i,phy) ;
plc->p_state = PS_ACTIVE ;
GO_STATE(PC6_JOIN) ;
break ;
}
break ;
case ACTIONS(PC6_JOIN) :
/*
* prevent mux error when going from WRAP_A to WRAP_B
*/
if (smc->s.sas == SMT_DAS && np == PB &&
(smc->y[PA].pc_mode == PM_TREE ||
smc->y[PB].pc_mode == PM_TREE)) {
SETMASK(PLC(np,PL_CNTRL_A),
PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
SETMASK(PLC(np,PL_CNTRL_B),
PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
}
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
ACTIONS_DONE() ;
cmd = 0 ;
fallthrough;
case PC6_JOIN :
switch (plc->p_state) {
case PS_ACTIVE:
/*PC88b*/
if (!phy->cf_join) {
phy->cf_join = TRUE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
}
if (cmd == PC_JOIN)
GO_STATE(PC8_ACTIVE) ;
/*PC82*/
if (cmd == PC_TRACE) {
GO_STATE(PC2_TRACE) ;
break ;
}
break ;
}
break ;
case PC7_VERIFY :
break ;
case ACTIONS(PC8_ACTIVE) :
/*
* start LEM for SMT
*/
sm_ph_lem_start(smc,(int)phy->np,LCT_LEM_MAX) ;
phy->tr_flag = FALSE ;
mib->fddiPORTConnectState = PCM_ACTIVE ;
/* Set the active interrupt mask register */
outpw(PLC(np,PL_INTR_MASK),plc_imsk_act) ;
ACTIONS_DONE() ;
break ;
case PC8_ACTIVE :
/*PC81 is done by PL_TNE_EXPIRED irq */
/*PC82*/
if (cmd == PC_TRACE) {
GO_STATE(PC2_TRACE) ;
break ;
}
/*PC88c: is done by TRACE_PROP irq */
break ;
case ACTIONS(PC9_MAINT) :
stop_pcm_timer0(smc,phy) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; /* disable LEM int. */
sm_ph_lem_stop(smc,np) ; /* disable LEM */
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
plc_go_state(smc,np,PL_PCM_STOP) ;
mib->fddiPORTConnectState = PCM_DISABLED ;
SETMASK(PLC(np,PL_CNTRL_B),PL_MAINT,PL_MAINT) ;
sm_ph_linestate(smc,np,(int) MIB2LS(mib->fddiPORTMaint_LS)) ;
outpw(PLC(np,PL_CNTRL_A),PL_SC_BYPASS) ;
ACTIONS_DONE() ;
break ;
case PC9_MAINT :
DB_PCMN(1, "PCM %c : MAINT", phy->phy_name);
/*PC90*/
if (cmd == PC_ENABLE) {
GO_STATE(PC0_OFF) ;
break ;
}
break ;
default:
SMT_PANIC(smc,SMT_E0118, SMT_E0118_MSG) ;
break ;
}
}
/*
* force line state on a PHY output (only in MAINT state)
*/
static void sm_ph_linestate(struct s_smc *smc, int phy, int ls)
{
int cntrl ;
SK_UNUSED(smc) ;
cntrl = (inpw(PLC(phy,PL_CNTRL_B)) & ~PL_MAINT_LS) |
PL_PCM_STOP | PL_MAINT ;
switch(ls) {
case PC_QLS: /* Force Quiet */
cntrl |= PL_M_QUI0 ;
break ;
case PC_MLS: /* Force Master */
cntrl |= PL_M_MASTR ;
break ;
case PC_HLS: /* Force Halt */
cntrl |= PL_M_HALT ;
break ;
default :
case PC_ILS: /* Force Idle */
cntrl |= PL_M_IDLE ;
break ;
case PC_LS_PDR: /* Enable repeat filter */
cntrl |= PL_M_TPDR ;
break ;
}
outpw(PLC(phy,PL_CNTRL_B),cntrl) ;
}
static void reset_lem_struct(struct s_phy *phy)
{
struct lem_counter *lem = &phy->lem ;
phy->mib->fddiPORTLer_Estimate = 15 ;
lem->lem_float_ber = 15 * 100 ;
}
/*
* link error monitor
*/
static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
{
int ber ;
u_long errors ;
struct lem_counter *lem = &phy->lem ;
struct fddi_mib_p *mib ;
int cond ;
mib = phy->mib ;
if (!lem->lem_on)
return ;
errors = inpw(PLC(((int) phy->np),PL_LINK_ERR_CTR)) ;
lem->lem_errors += errors ;
mib->fddiPORTLem_Ct += errors ;
errors = lem->lem_errors ;
/*
* calculation is called on a intervall of 8 seconds
* -> this means, that one error in 8 sec. is one of 8*125*10E6
* the same as BER = 10E-9
* Please note:
* -> 9 errors in 8 seconds mean:
* BER = 9 * 10E-9 and this is
* < 10E-8, so the limit of 10E-8 is not reached!
*/
if (!errors) ber = 15 ;
else if (errors <= 9) ber = 9 ;
else if (errors <= 99) ber = 8 ;
else if (errors <= 999) ber = 7 ;
else if (errors <= 9999) ber = 6 ;
else if (errors <= 99999) ber = 5 ;
else if (errors <= 999999) ber = 4 ;
else if (errors <= 9999999) ber = 3 ;
else if (errors <= 99999999) ber = 2 ;
else if (errors <= 999999999) ber = 1 ;
else ber = 0 ;
/*
* weighted average
*/
ber *= 100 ;
lem->lem_float_ber = lem->lem_float_ber * 7 + ber * 3 ;
lem->lem_float_ber /= 10 ;
mib->fddiPORTLer_Estimate = lem->lem_float_ber / 100 ;
if (mib->fddiPORTLer_Estimate < 4) {
mib->fddiPORTLer_Estimate = 4 ;
}
if (lem->lem_errors) {
DB_PCMN(1, "LEM %c :", phy->np == PB ? 'B' : 'A');
DB_PCMN(1, "errors : %ld", lem->lem_errors);
DB_PCMN(1, "sum_errors : %ld", mib->fddiPORTLem_Ct);
DB_PCMN(1, "current BER : 10E-%d", ber / 100);
DB_PCMN(1, "float BER : 10E-(%d/100)", lem->lem_float_ber);
DB_PCMN(1, "avg. BER : 10E-%d", mib->fddiPORTLer_Estimate);
}
lem->lem_errors = 0L ;
#ifndef SLIM_SMT
cond = (mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Alarm) ?
TRUE : FALSE ;
#ifdef SMT_EXT_CUTOFF
smt_ler_alarm_check(smc,phy,cond) ;
#endif /* nSMT_EXT_CUTOFF */
if (cond != mib->fddiPORTLerFlag) {
smt_srf_event(smc,SMT_COND_PORT_LER,
(int) (INDEX_PORT+ phy->np) ,cond) ;
}
#endif
if ( mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Cutoff) {
phy->pc_lem_fail = TRUE ; /* flag */
mib->fddiPORTLem_Reject_Ct++ ;
/*
* "forgive 10e-2" if we cutoff so we can come
* up again ..
*/
lem->lem_float_ber += 2*100 ;
/*PC81b*/
#ifdef CONCENTRATOR
DB_PCMN(1, "PCM: LER cutoff on port %d cutoff %d",
phy->np, mib->fddiPORTLer_Cutoff);
#endif
#ifdef SMT_EXT_CUTOFF
smt_port_off_event(smc,phy->np);
#else /* nSMT_EXT_CUTOFF */
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
#endif /* nSMT_EXT_CUTOFF */
}
}
/*
* called by SMT to calculate LEM bit error rate
*/
void sm_lem_evaluate(struct s_smc *smc)
{
int np ;
for (np = 0 ; np < NUMPHYS ; np++)
lem_evaluate(smc,&smc->y[np]) ;
}
static void lem_check_lct(struct s_smc *smc, struct s_phy *phy)
{
struct lem_counter *lem = &phy->lem ;
struct fddi_mib_p *mib ;
int errors ;
mib = phy->mib ;
phy->pc_lem_fail = FALSE ; /* flag */
errors = inpw(PLC(((int)phy->np),PL_LINK_ERR_CTR)) ;
lem->lem_errors += errors ;
mib->fddiPORTLem_Ct += errors ;
if (lem->lem_errors) {
switch(phy->lc_test) {
case LC_SHORT:
if (lem->lem_errors >= smc->s.lct_short)
phy->pc_lem_fail = TRUE ;
break ;
case LC_MEDIUM:
if (lem->lem_errors >= smc->s.lct_medium)
phy->pc_lem_fail = TRUE ;
break ;
case LC_LONG:
if (lem->lem_errors >= smc->s.lct_long)
phy->pc_lem_fail = TRUE ;
break ;
case LC_EXTENDED:
if (lem->lem_errors >= smc->s.lct_extended)
phy->pc_lem_fail = TRUE ;
break ;
}
DB_PCMN(1, " >>errors : %lu", lem->lem_errors);
}
if (phy->pc_lem_fail) {
mib->fddiPORTLCTFail_Ct++ ;
mib->fddiPORTLem_Reject_Ct++ ;
}
else
mib->fddiPORTLCTFail_Ct = 0 ;
}
/*
* LEM functions
*/
static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold)
{
struct lem_counter *lem = &smc->y[np].lem ;
lem->lem_on = 1 ;
lem->lem_errors = 0L ;
/* Do NOT reset mib->fddiPORTLer_Estimate here. It is called too
* often.
*/
outpw(PLC(np,PL_LE_THRESHOLD),threshold) ;
(void)inpw(PLC(np,PL_LINK_ERR_CTR)) ; /* clear error counter */
/* enable LE INT */
SETMASK(PLC(np,PL_INTR_MASK),PL_LE_CTR,PL_LE_CTR) ;
}
static void sm_ph_lem_stop(struct s_smc *smc, int np)
{
struct lem_counter *lem = &smc->y[np].lem ;
lem->lem_on = 0 ;
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ;
}
/*
* PCM pseudo code
* receive actions are called AFTER the bit n is received,
* i.e. if pc_rcode_actions(5) is called, bit 6 is the next bit to be received
*/
/*
* PCM pseudo code 5.1 .. 6.1
*/
static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
{
struct fddi_mib_p *mib ;
mib = phy->mib ;
DB_PCMN(1, "SIG rec %x %x:", bit, phy->r_val[bit]);
bit++ ;
switch(bit) {
case 0:
case 1:
case 2:
break ;
case 3 :
if (phy->r_val[1] == 0 && phy->r_val[2] == 0)
mib->fddiPORTNeighborType = TA ;
else if (phy->r_val[1] == 0 && phy->r_val[2] == 1)
mib->fddiPORTNeighborType = TB ;
else if (phy->r_val[1] == 1 && phy->r_val[2] == 0)
mib->fddiPORTNeighborType = TS ;
else if (phy->r_val[1] == 1 && phy->r_val[2] == 1)
mib->fddiPORTNeighborType = TM ;
break ;
case 4:
if (mib->fddiPORTMy_Type == TM &&
mib->fddiPORTNeighborType == TM) {
DB_PCMN(1, "PCM %c : E100 withhold M-M",
phy->phy_name);
mib->fddiPORTPC_Withhold = PC_WH_M_M ;
RS_SET(smc,RS_EVENT) ;
}
else if (phy->t_val[3] || phy->r_val[3]) {
mib->fddiPORTPC_Withhold = PC_WH_NONE ;
if (mib->fddiPORTMy_Type == TM ||
mib->fddiPORTNeighborType == TM)
phy->pc_mode = PM_TREE ;
else
phy->pc_mode = PM_PEER ;
/* reevaluate the selection criteria (wc_flag) */
all_selection_criteria (smc);
if (phy->wc_flag) {
mib->fddiPORTPC_Withhold = PC_WH_PATH ;
}
}
else {
mib->fddiPORTPC_Withhold = PC_WH_OTHER ;
RS_SET(smc,RS_EVENT) ;
DB_PCMN(1, "PCM %c : E101 withhold other",
phy->phy_name);
}
phy->twisted = ((mib->fddiPORTMy_Type != TS) &&
(mib->fddiPORTMy_Type != TM) &&
(mib->fddiPORTNeighborType ==
mib->fddiPORTMy_Type)) ;
if (phy->twisted) {
DB_PCMN(1, "PCM %c : E102 !!! TWISTED !!!",
phy->phy_name);
}
break ;
case 5 :
break ;
case 6:
if (phy->t_val[4] || phy->r_val[4]) {
if ((phy->t_val[4] && phy->t_val[5]) ||
(phy->r_val[4] && phy->r_val[5]) )
phy->lc_test = LC_EXTENDED ;
else
phy->lc_test = LC_LONG ;
}
else if (phy->t_val[5] || phy->r_val[5])
phy->lc_test = LC_MEDIUM ;
else
phy->lc_test = LC_SHORT ;
switch (phy->lc_test) {
case LC_SHORT : /* 50ms */
outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LENGTH ) ;
phy->t_next[7] = smc->s.pcm_lc_short ;
break ;
case LC_MEDIUM : /* 500ms */
outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LONGLN ) ;
phy->t_next[7] = smc->s.pcm_lc_medium ;
break ;
case LC_LONG :
SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
phy->t_next[7] = smc->s.pcm_lc_long ;
break ;
case LC_EXTENDED :
SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
phy->t_next[7] = smc->s.pcm_lc_extended ;
break ;
}
if (phy->t_next[7] > smc->s.pcm_lc_medium) {
start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy);
}
DB_PCMN(1, "LCT timer = %ld us", phy->t_next[7]);
phy->t_next[9] = smc->s.pcm_t_next_9 ;
break ;
case 7:
if (phy->t_val[6]) {
phy->cf_loop = TRUE ;
}
phy->td_flag = TRUE ;
break ;
case 8:
if (phy->t_val[7] || phy->r_val[7]) {
DB_PCMN(1, "PCM %c : E103 LCT fail %s",
phy->phy_name,
phy->t_val[7] ? "local" : "remote");
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
}
break ;
case 9:
if (phy->t_val[8] || phy->r_val[8]) {
if (phy->t_val[8])
phy->cf_loop = TRUE ;
phy->td_flag = TRUE ;
}
break ;
case 10:
if (phy->r_val[9]) {
/* neighbor intends to have MAC on output */ ;
mib->fddiPORTMacIndicated.R_val = TRUE ;
}
else {
/* neighbor does not intend to have MAC on output */ ;
mib->fddiPORTMacIndicated.R_val = FALSE ;
}
break ;
}
}
/*
* PCM pseudo code 5.1 .. 6.1
*/
static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy)
{
int np = phy->np ;
struct fddi_mib_p *mib ;
mib = phy->mib ;
switch(bit) {
case 0:
phy->t_val[0] = 0 ; /* no escape used */
break ;
case 1:
if (mib->fddiPORTMy_Type == TS || mib->fddiPORTMy_Type == TM)
phy->t_val[1] = 1 ;
else
phy->t_val[1] = 0 ;
break ;
case 2 :
if (mib->fddiPORTMy_Type == TB || mib->fddiPORTMy_Type == TM)
phy->t_val[2] = 1 ;
else
phy->t_val[2] = 0 ;
break ;
case 3:
{
int type,ne ;
int policy ;
type = mib->fddiPORTMy_Type ;
ne = mib->fddiPORTNeighborType ;
policy = smc->mib.fddiSMTConnectionPolicy ;
phy->t_val[3] = 1 ; /* Accept connection */
switch (type) {
case TA :
if (
((policy & POLICY_AA) && ne == TA) ||
((policy & POLICY_AB) && ne == TB) ||
((policy & POLICY_AS) && ne == TS) ||
((policy & POLICY_AM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TB :
if (
((policy & POLICY_BA) && ne == TA) ||
((policy & POLICY_BB) && ne == TB) ||
((policy & POLICY_BS) && ne == TS) ||
((policy & POLICY_BM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TS :
if (
((policy & POLICY_SA) && ne == TA) ||
((policy & POLICY_SB) && ne == TB) ||
((policy & POLICY_SS) && ne == TS) ||
((policy & POLICY_SM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TM :
if ( ne == TM ||
((policy & POLICY_MA) && ne == TA) ||
((policy & POLICY_MB) && ne == TB) ||
((policy & POLICY_MS) && ne == TS) ||
((policy & POLICY_MM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
}
#ifndef SLIM_SMT
/*
* detect undesirable connection attempt event
*/
if ( (type == TA && ne == TA ) ||
(type == TA && ne == TS ) ||
(type == TB && ne == TB ) ||
(type == TB && ne == TS ) ||
(type == TS && ne == TA ) ||
(type == TS && ne == TB ) ) {
smt_srf_event(smc,SMT_EVENT_PORT_CONNECTION,
(int) (INDEX_PORT+ phy->np) ,0) ;
}
#endif
}
break ;
case 4:
if (mib->fddiPORTPC_Withhold == PC_WH_NONE) {
if (phy->pc_lem_fail) {
phy->t_val[4] = 1 ; /* long */
phy->t_val[5] = 0 ;
}
else {
phy->t_val[4] = 0 ;
if (mib->fddiPORTLCTFail_Ct > 0)
phy->t_val[5] = 1 ; /* medium */
else
phy->t_val[5] = 0 ; /* short */
/*
* Implementers choice: use medium
* instead of short when undesired
* connection attempt is made.
*/
if (phy->wc_flag)
phy->t_val[5] = 1 ; /* medium */
}
mib->fddiPORTConnectState = PCM_CONNECTING ;
}
else {
mib->fddiPORTConnectState = PCM_STANDBY ;
phy->t_val[4] = 1 ; /* extended */
phy->t_val[5] = 1 ;
}
break ;
case 5:
break ;
case 6:
/* we do NOT have a MAC for LCT */
phy->t_val[6] = 0 ;
break ;
case 7:
phy->cf_loop = FALSE ;
lem_check_lct(smc,phy) ;
if (phy->pc_lem_fail) {
DB_PCMN(1, "PCM %c : E104 LCT failed", phy->phy_name);
phy->t_val[7] = 1 ;
}
else
phy->t_val[7] = 0 ;
break ;
case 8:
phy->t_val[8] = 0 ; /* Don't request MAC loopback */
break ;
case 9:
phy->cf_loop = 0 ;
if ((mib->fddiPORTPC_Withhold != PC_WH_NONE) ||
((smc->s.sas == SMT_DAS) && (phy->wc_flag))) {
queue_event(smc,EVENT_PCM+np,PC_START) ;
break ;
}
phy->t_val[9] = FALSE ;
switch (smc->s.sas) {
case SMT_DAS :
/*
* MAC intended on output
*/
if (phy->pc_mode == PM_TREE) {
if ((np == PB) || ((np == PA) &&
(smc->y[PB].mib->fddiPORTConnectState !=
PCM_ACTIVE)))
phy->t_val[9] = TRUE ;
}
else {
if (np == PB)
phy->t_val[9] = TRUE ;
}
break ;
case SMT_SAS :
if (np == PS)
phy->t_val[9] = TRUE ;
break ;
#ifdef CONCENTRATOR
case SMT_NAC :
/*
* MAC intended on output
*/
if (np == PB)
phy->t_val[9] = TRUE ;
break ;
#endif
}
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
DB_PCMN(1, "SIG snd %x %x:", bit, phy->t_val[bit]);
}
/*
* return status twisted (called by SMT)
*/
int pcm_status_twisted(struct s_smc *smc)
{
int twist = 0 ;
if (smc->s.sas != SMT_DAS)
return 0;
if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 1 ;
if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 2 ;
return twist;
}
/*
* return status (called by SMT)
* type
* state
* remote phy type
* remote mac yes/no
*/
void pcm_status_state(struct s_smc *smc, int np, int *type, int *state,
int *remote, int *mac)
{
struct s_phy *phy = &smc->y[np] ;
struct fddi_mib_p *mib ;
mib = phy->mib ;
/* remote PHY type and MAC - set only if active */
*mac = 0 ;
*type = mib->fddiPORTMy_Type ; /* our PHY type */
*state = mib->fddiPORTConnectState ;
*remote = mib->fddiPORTNeighborType ;
switch(mib->fddiPORTPCMState) {
case PC8_ACTIVE :
*mac = mib->fddiPORTMacIndicated.R_val ;
break ;
}
}
/*
* return rooted station status (called by SMT)
*/
int pcm_rooted_station(struct s_smc *smc)
{
int n ;
for (n = 0 ; n < NUMPHYS ; n++) {
if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
smc->y[n].mib->fddiPORTNeighborType == TM)
return 0;
}
return 1;
}
/*
* Interrupt actions for PLC & PCM events
*/
void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
/* int np; PHY index */
{
struct s_phy *phy = &smc->y[np] ;
struct s_plc *plc = &phy->plc ;
int n ;
#ifdef SUPERNET_3
int corr_mask ;
#endif /* SUPERNET_3 */
int i ;
if (np >= smc->s.numphys) {
plc->soft_err++ ;
return ;
}
if (cmd & PL_EBUF_ERR) { /* elastic buff. det. over-|underflow*/
/*
* Check whether the SRF Condition occurred.
*/
if (!plc->ebuf_cont && phy->mib->fddiPORTPCMState == PC8_ACTIVE){
/*
* This is the real Elasticity Error.
* More than one in a row are treated as a
* single one.
* Only count this in the active state.
*/
phy->mib->fddiPORTEBError_Ct ++ ;
}
plc->ebuf_err++ ;
if (plc->ebuf_cont <= 1000) {
/*
* Prevent counter from being wrapped after
* hanging years in that interrupt.
*/
plc->ebuf_cont++ ; /* Ebuf continuous error */
}
#ifdef SUPERNET_3
if (plc->ebuf_cont == 1000 &&
((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) ==
PLC_REV_SN3)) {
/*
* This interrupt remeained high for at least
* 1000 consecutive interrupt calls.
*
* This is caused by a hardware error of the
* ORION part of the Supernet III chipset.
*
* Disable this bit from the mask.
*/
corr_mask = (plc_imsk_na & ~PL_EBUF_ERR) ;
outpw(PLC(np,PL_INTR_MASK),corr_mask);
/*
* Disconnect from the ring.
* Call the driver with the reset indication.
*/
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
/*
* Make an error log entry.
*/
SMT_ERR_LOG(smc,SMT_E0136, SMT_E0136_MSG) ;
/*
* Indicate the Reset.
*/
drv_reset_indication(smc) ;
}
#endif /* SUPERNET_3 */
} else {
/* Reset the continuous error variable */
plc->ebuf_cont = 0 ; /* reset Ebuf continuous error */
}
if (cmd & PL_PHYINV) { /* physical layer invalid signal */
plc->phyinv++ ;
}
if (cmd & PL_VSYM_CTR) { /* violation symbol counter has incr.*/
plc->vsym_ctr++ ;
}
if (cmd & PL_MINI_CTR) { /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/
plc->mini_ctr++ ;
}
if (cmd & PL_LE_CTR) { /* link error event counter */
int j ;
/*
* note: PL_LINK_ERR_CTR MUST be read to clear it
*/
j = inpw(PLC(np,PL_LE_THRESHOLD)) ;
i = inpw(PLC(np,PL_LINK_ERR_CTR)) ;
if (i < j) {
/* wrapped around */
i += 256 ;
}
if (phy->lem.lem_on) {
/* Note: Lem errors shall only be counted when
* link is ACTIVE or LCT is active.
*/
phy->lem.lem_errors += i ;
phy->mib->fddiPORTLem_Ct += i ;
}
}
if (cmd & PL_TPC_EXPIRED) { /* TPC timer reached zero */
if (plc->p_state == PS_LCT) {
/*
* end of LCT
*/
;
}
plc->tpc_exp++ ;
}
if (cmd & PL_LS_MATCH) { /* LS == LS in PLC_CNTRL_B's MATCH_LS*/
switch (inpw(PLC(np,PL_CNTRL_B)) & PL_MATCH_LS) {
case PL_I_IDLE : phy->curr_ls = PC_ILS ; break ;
case PL_I_HALT : phy->curr_ls = PC_HLS ; break ;
case PL_I_MASTR : phy->curr_ls = PC_MLS ; break ;
case PL_I_QUIET : phy->curr_ls = PC_QLS ; break ;
}
}
if (cmd & PL_PCM_BREAK) { /* PCM has entered the BREAK state */
int reason;
reason = inpw(PLC(np,PL_STATUS_B)) & PL_BREAK_REASON ;
switch (reason) {
case PL_B_PCS : plc->b_pcs++ ; break ;
case PL_B_TPC : plc->b_tpc++ ; break ;
case PL_B_TNE : plc->b_tne++ ; break ;
case PL_B_QLS : plc->b_qls++ ; break ;
case PL_B_ILS : plc->b_ils++ ; break ;
case PL_B_HLS : plc->b_hls++ ; break ;
}
/*jd 05-Aug-1999 changed: Bug #10419 */
DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag);
if (smc->e.DisconnectFlag == FALSE) {
DB_PCMN(1, "PLC %d: restart (reason %x)", np, reason);
queue_event(smc,EVENT_PCM+np,PC_START) ;
}
else {
DB_PCMN(1, "PLC %d: NO!! restart (reason %x)",
np, reason);
}
return ;
}
/*
* If both CODE & ENABLE are set ignore enable
*/
if (cmd & PL_PCM_CODE) { /* receive last sign.-bit | LCT complete */
queue_event(smc,EVENT_PCM+np,PC_SIGNAL) ;
n = inpw(PLC(np,PL_RCV_VECTOR)) ;
for (i = 0 ; i < plc->p_bits ; i++) {
phy->r_val[plc->p_start+i] = n & 1 ;
n >>= 1 ;
}
}
else if (cmd & PL_PCM_ENABLED) { /* asserted SC_JOIN, scrub.completed*/
queue_event(smc,EVENT_PCM+np,PC_JOIN) ;
}
if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */
/*PC22b*/
if (!phy->tr_flag) {
DB_PCMN(1, "PCM : irq TRACE_PROP %d %d",
np, smc->mib.fddiSMTECMState);
phy->tr_flag = TRUE ;
smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
}
}
/*
* filter PLC glitch ???
* QLS || HLS only while in PC2_TRACE state
*/
if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) {
/*PC22a*/
if (smc->e.path_test == PT_PASSED) {
DB_PCMN(1, "PCM : state = %s %d",
get_pcmstate(smc, np),
phy->mib->fddiPORTPCMState);
smc->e.path_test = PT_PENDING ;
queue_event(smc,EVENT_ECM,EC_PATH_TEST) ;
}
}
if (cmd & PL_TNE_EXPIRED) { /* TNE: length of noise events */
/* break_required (TNE > NS_Max) */
if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) {
if (!phy->tr_flag) {
DB_PCMN(1, "PCM %c : PC81 %s",
phy->phy_name, "NSE");
queue_event(smc, EVENT_PCM + np, PC_START);
return;
}
}
}
#if 0
if (cmd & PL_NP_ERR) { /* NP has requested to r/w an inv reg*/
/*
* It's a bug by AMD
*/
plc->np_err++ ;
}
/* pin inactiv (GND) */
if (cmd & PL_PARITY_ERR) { /* p. error dedected on TX9-0 inp */
plc->parity_err++ ;
}
if (cmd & PL_LSDO) { /* carrier detected */
;
}
#endif
}
#ifdef DEBUG
/*
* fill state struct
*/
void pcm_get_state(struct s_smc *smc, struct smt_state *state)
{
struct s_phy *phy ;
struct pcm_state *pcs ;
int i ;
int ii ;
short rbits ;
short tbits ;
struct fddi_mib_p *mib ;
for (i = 0, phy = smc->y, pcs = state->pcm_state ; i < NUMPHYS ;
i++ , phy++, pcs++ ) {
mib = phy->mib ;
pcs->pcm_type = (u_char) mib->fddiPORTMy_Type ;
pcs->pcm_state = (u_char) mib->fddiPORTPCMState ;
pcs->pcm_mode = phy->pc_mode ;
pcs->pcm_neighbor = (u_char) mib->fddiPORTNeighborType ;
pcs->pcm_bsf = mib->fddiPORTBS_Flag ;
pcs->pcm_lsf = phy->ls_flag ;
pcs->pcm_lct_fail = (u_char) mib->fddiPORTLCTFail_Ct ;
pcs->pcm_ls_rx = LS2MIB(sm_pm_get_ls(smc,i)) ;
for (ii = 0, rbits = tbits = 0 ; ii < NUMBITS ; ii++) {
rbits <<= 1 ;
tbits <<= 1 ;
if (phy->r_val[NUMBITS-1-ii])
rbits |= 1 ;
if (phy->t_val[NUMBITS-1-ii])
tbits |= 1 ;
}
pcs->pcm_r_val = rbits ;
pcs->pcm_t_val = tbits ;
}
}
int get_pcm_state(struct s_smc *smc, int np)
{
int pcs ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
case PL_PC0 : pcs = PC_STOP ; break ;
case PL_PC1 : pcs = PC_START ; break ;
case PL_PC2 : pcs = PC_TRACE ; break ;
case PL_PC3 : pcs = PC_SIGNAL ; break ;
case PL_PC4 : pcs = PC_SIGNAL ; break ;
case PL_PC5 : pcs = PC_SIGNAL ; break ;
case PL_PC6 : pcs = PC_JOIN ; break ;
case PL_PC7 : pcs = PC_JOIN ; break ;
case PL_PC8 : pcs = PC_ENABLE ; break ;
case PL_PC9 : pcs = PC_MAINT ; break ;
default : pcs = PC_DISABLE ; break ;
}
return pcs;
}
char *get_linestate(struct s_smc *smc, int np)
{
char *ls = "" ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_A)) & PL_LINE_ST) {
case PL_L_NLS : ls = "NOISE" ; break ;
case PL_L_ALS : ls = "ACTIV" ; break ;
case PL_L_UND : ls = "UNDEF" ; break ;
case PL_L_ILS4: ls = "ILS 4" ; break ;
case PL_L_QLS : ls = "QLS" ; break ;
case PL_L_MLS : ls = "MLS" ; break ;
case PL_L_HLS : ls = "HLS" ; break ;
case PL_L_ILS16:ls = "ILS16" ; break ;
#ifdef lint
default: ls = "unknown" ; break ;
#endif
}
return ls;
}
char *get_pcmstate(struct s_smc *smc, int np)
{
char *pcs ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
case PL_PC0 : pcs = "OFF" ; break ;
case PL_PC1 : pcs = "BREAK" ; break ;
case PL_PC2 : pcs = "TRACE" ; break ;
case PL_PC3 : pcs = "CONNECT"; break ;
case PL_PC4 : pcs = "NEXT" ; break ;
case PL_PC5 : pcs = "SIGNAL" ; break ;
case PL_PC6 : pcs = "JOIN" ; break ;
case PL_PC7 : pcs = "VERIFY" ; break ;
case PL_PC8 : pcs = "ACTIV" ; break ;
case PL_PC9 : pcs = "MAINT" ; break ;
default : pcs = "UNKNOWN" ; break ;
}
return pcs;
}
void list_phy(struct s_smc *smc)
{
struct s_plc *plc ;
int np ;
for (np = 0 ; np < NUMPHYS ; np++) {
plc = &smc->y[np].plc ;
printf("PHY %d:\tERRORS\t\t\tBREAK_REASONS\t\tSTATES:\n",np) ;
printf("\tsoft_error: %ld \t\tPC_Start : %ld\n",
plc->soft_err,plc->b_pcs);
printf("\tparity_err: %ld \t\tTPC exp. : %ld\t\tLine: %s\n",
plc->parity_err,plc->b_tpc,get_linestate(smc,np)) ;
printf("\tebuf_error: %ld \t\tTNE exp. : %ld\n",
plc->ebuf_err,plc->b_tne) ;
printf("\tphyinvalid: %ld \t\tQLS det. : %ld\t\tPCM : %s\n",
plc->phyinv,plc->b_qls,get_pcmstate(smc,np)) ;
printf("\tviosym_ctr: %ld \t\tILS det. : %ld\n",
plc->vsym_ctr,plc->b_ils) ;
printf("\tmingap_ctr: %ld \t\tHLS det. : %ld\n",
plc->mini_ctr,plc->b_hls) ;
printf("\tnodepr_err: %ld\n",plc->np_err) ;
printf("\tTPC_exp : %ld\n",plc->tpc_exp) ;
printf("\tLEM_err : %ld\n",smc->y[np].lem.lem_errors) ;
}
}
#ifdef CONCENTRATOR
void pcm_lem_dump(struct s_smc *smc)
{
int i ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
char *entostring() ;
printf("PHY errors BER\n") ;
printf("----------------------\n") ;
for (i = 0,phy = smc->y ; i < NUMPHYS ; i++,phy++) {
if (!plc_is_installed(smc,i))
continue ;
mib = phy->mib ;
printf("%s\t%ld\t10E-%d\n",
entostring(smc,ENTITY_PHY(i)),
mib->fddiPORTLem_Ct,
mib->fddiPORTLer_Estimate) ;
}
}
#endif
#endif
| linux-master | drivers/net/fddi/skfp/pcmplc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
* *******************************************************************
* This SBA code implements the Synchronous Bandwidth Allocation
* functions described in the "FDDI Synchronous Forum Implementer's
* Agreement" dated December 1th, 1993.
* *******************************************************************
*
* PURPOSE: The purpose of this function is to control
* synchronous allocations on a single FDDI segment.
* Allocations are limited to the primary FDDI ring.
* The SBM provides recovery mechanisms to recover
* unused bandwidth also resolves T_Neg and
* reconfiguration changes. Many of the SBM state
* machine inputs are sourced by the underlying
* FDDI sub-system supporting the SBA application.
*
* *******************************************************************
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/smt_p.h"
#ifndef SLIM_SMT
#ifdef ESS
#ifndef lint
#define LINT_USE(x)
#else
#define LINT_USE(x) (x)=(x)
#endif
#define MS2BCLK(x) ((x)*12500L)
/*
-------------------------------------------------------------
LOCAL VARIABLES:
-------------------------------------------------------------
*/
static const u_short plist_raf_alc_res[] = { SMT_P0012, SMT_P320B, SMT_P320F,
SMT_P3210, SMT_P0019, SMT_P001A,
SMT_P001D, 0 } ;
static const u_short plist_raf_chg_req[] = { SMT_P320B, SMT_P320F, SMT_P3210,
SMT_P001A, 0 } ;
static const struct fddi_addr smt_sba_da = {{0x80,0x01,0x43,0x00,0x80,0x0C}} ;
static const struct fddi_addr null_addr = {{0,0,0,0,0,0}} ;
/*
-------------------------------------------------------------
GLOBAL VARIABLES:
-------------------------------------------------------------
*/
/*
-------------------------------------------------------------
LOCAL FUNCTIONS:
-------------------------------------------------------------
*/
static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
int sba_cmd);
static void ess_config_fifo(struct s_smc *smc);
static void ess_send_alc_req(struct s_smc *smc);
static void ess_send_frame(struct s_smc *smc, SMbuf *mb);
/*
-------------------------------------------------------------
EXTERNAL FUNCTIONS:
-------------------------------------------------------------
*/
/*
-------------------------------------------------------------
PUBLIC FUNCTIONS:
-------------------------------------------------------------
*/
void ess_timer_poll(struct s_smc *smc);
void ess_para_change(struct s_smc *smc);
int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
int fs);
static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhead);
/*
* --------------------------------------------------------------------------
* End Station Support (ESS)
* --------------------------------------------------------------------------
*/
/*
* evaluate the RAF frame
*/
int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
int fs)
{
void *p ; /* universal pointer */
struct smt_p_0016 *cmd ; /* para: command for the ESS */
SMbuf *db ;
u_long msg_res_type ; /* recource type */
u_long payload, overhead ;
int local ;
int i ;
/*
* Message Processing Code
*/
local = ((fs & L_INDICATOR) != 0) ;
/*
* get the resource type
*/
if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
DB_ESS("ESS: RAF frame error, parameter type not found");
return fs;
}
msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
/*
* get the pointer to the ESS command
*/
if (!(cmd = (struct smt_p_0016 *) sm_to_para(smc,sm,SMT_P0016))) {
/*
* error in frame: para ESS command was not found
*/
DB_ESS("ESS: RAF frame error, parameter command not found");
return fs;
}
DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type);
DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid);
DB_ESSN(2, "stn_id %pM", &sm->smt_source);
DB_ESSN(2, "infolen %x res %lx", sm->smt_len, msg_res_type);
DB_ESSN(2, "sbacmd %x", cmd->sba_cmd);
/*
* evaluate the ESS command
*/
switch (cmd->sba_cmd) {
/*
* Process an ESS Allocation Request
*/
case REQUEST_ALLOCATION :
/*
* check for an RAF Request (Allocation Request)
*/
if (sm->smt_type == SMT_REQUEST) {
/*
* process the Allocation request only if the frame is
* local and no static allocation is used
*/
if (!local || smc->mib.fddiESSPayload)
return fs;
p = (void *) sm_to_para(smc,sm,SMT_P0019) ;
for (i = 0; i < 5; i++) {
if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) {
return fs;
}
}
/*
* Note: The Application should send a LAN_LOC_FRAME.
* The ESS do not send the Frame to the network!
*/
smc->ess.alloc_trans_id = sm->smt_tid ;
DB_ESS("ESS: save Alloc Req Trans ID %x", sm->smt_tid);
p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
((struct smt_p_320f *)p)->mib_payload =
smc->mib.a[PATH0].fddiPATHSbaPayload ;
p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
((struct smt_p_3210 *)p)->mib_overhead =
smc->mib.a[PATH0].fddiPATHSbaOverhead ;
sm->smt_dest = smt_sba_da ;
if (smc->ess.local_sba_active)
return fs | I_INDICATOR;
if (!(db = smt_get_mbuf(smc)))
return fs;
db->sm_len = mb->sm_len ;
db->sm_off = mb->sm_off ;
memcpy(((char *)(db->sm_data+db->sm_off)),(char *)sm,
(int)db->sm_len) ;
dump_smt(smc,
(struct smt_header *)(db->sm_data+db->sm_off),
"RAF") ;
smt_send_frame(smc,db,FC_SMT_INFO,0) ;
return fs;
}
/*
* The RAF frame is an Allocation Response !
* check the parameters
*/
if (smt_check_para(smc,sm,plist_raf_alc_res)) {
DB_ESS("ESS: RAF with para problem, ignoring");
return fs;
}
/*
* VERIFY THE FRAME IS WELL BUILT:
*
* 1. path index = primary ring only
* 2. resource type = sync bw only
* 3. trans action id = alloc_trans_id
* 4. reason code = success
*
* If any are violated, discard the RAF frame
*/
if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
!= PRIMARY_RING) ||
(msg_res_type != SYNC_BW) ||
(((struct smt_p_reason *)sm_to_para(smc,sm,SMT_P0012))->rdf_reason
!= SMT_RDF_SUCCESS) ||
(sm->smt_tid != smc->ess.alloc_trans_id)) {
DB_ESS("ESS: Allocation Response not accepted");
return fs;
}
/*
* Extract message parameters
*/
p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
if (!p) {
printk(KERN_ERR "ESS: sm_to_para failed");
return fs;
}
payload = ((struct smt_p_320f *)p)->mib_payload ;
p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
if (!p) {
printk(KERN_ERR "ESS: sm_to_para failed");
return fs;
}
overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
DB_ESSN(2, "payload= %lx overhead= %lx",
payload, overhead);
/*
* process the bandwidth allocation
*/
(void)process_bw_alloc(smc,(long)payload,(long)overhead) ;
return fs;
/* end of Process Allocation Request */
/*
* Process an ESS Change Request
*/
case CHANGE_ALLOCATION :
/*
* except only replies
*/
if (sm->smt_type != SMT_REQUEST) {
DB_ESS("ESS: Do not process Change Responses");
return fs;
}
/*
* check the para for the Change Request
*/
if (smt_check_para(smc,sm,plist_raf_chg_req)) {
DB_ESS("ESS: RAF with para problem, ignoring");
return fs;
}
/*
* Verify the path index and resource
* type are correct. If any of
* these are false, don't process this
* change request frame.
*/
if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
!= PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
DB_ESS("ESS: RAF frame with para problem, ignoring");
return fs;
}
/*
* Extract message queue parameters
*/
p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
payload = ((struct smt_p_320f *)p)->mib_payload ;
p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
DB_ESSN(2, "ESS: Change Request from %pM",
&sm->smt_source);
DB_ESSN(2, "payload= %lx overhead= %lx",
payload, overhead);
/*
* process the bandwidth allocation
*/
if(!process_bw_alloc(smc,(long)payload,(long)overhead))
return fs;
/*
* send an RAF Change Reply
*/
ess_send_response(smc,sm,CHANGE_ALLOCATION) ;
return fs;
/* end of Process Change Request */
/*
* Process Report Response
*/
case REPORT_ALLOCATION :
/*
* except only requests
*/
if (sm->smt_type != SMT_REQUEST) {
DB_ESS("ESS: Do not process a Report Reply");
return fs;
}
DB_ESSN(2, "ESS: Report Request from %pM",
&sm->smt_source);
/*
* verify that the resource type is sync bw only
*/
if (msg_res_type != SYNC_BW) {
DB_ESS("ESS: ignoring RAF with para problem");
return fs;
}
/*
* send an RAF Change Reply
*/
ess_send_response(smc,sm,REPORT_ALLOCATION) ;
return fs;
/* end of Process Report Request */
default:
/*
* error in frame
*/
DB_ESS("ESS: ignoring RAF with bad sba_cmd");
break ;
}
return fs;
}
/*
* determines the synchronous bandwidth, set the TSYNC register and the
* mib variables SBAPayload, SBAOverhead and fddiMACT-NEG.
*/
static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhead)
{
/*
* determine the synchronous bandwidth (sync_bw) in bytes per T-NEG,
* if the payload is greater than zero.
* For the SBAPayload and the SBAOverhead we have the following
* unite quations
* _ _
* | bytes |
* SBAPayload = | 8000 ------ |
* | s |
* - -
* _ _
* | bytes |
* SBAOverhead = | ------ |
* | T-NEG |
* - -
*
* T-NEG is described by the equation:
*
* (-) fddiMACT-NEG
* T-NEG = -------------------
* 12500000 1/s
*
* The number of bytes we are able to send is the payload
* plus the overhead.
*
* bytes T-NEG SBAPayload 8000 bytes/s
* sync_bw = SBAOverhead ------ + -----------------------------
* T-NEG T-NEG
*
*
* 1
* sync_bw = SBAOverhead + ---- (-)fddiMACT-NEG * SBAPayload
* 1562
*
*/
/*
* set the mib attributes fddiPATHSbaOverhead, fddiPATHSbaPayload
*/
/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
DB_ESS("ESS: SMT does not accept the payload value");
return FALSE;
}
if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
DB_ESS("ESS: SMT does not accept the overhead value");
return FALSE;
} */
/* premliminary */
if (payload > MAX_PAYLOAD || overhead > 5000) {
DB_ESS("ESS: payload / overhead not accepted");
return FALSE;
}
/*
* start the iterative allocation process if the payload or the overhead
* are smaller than the parsed values
*/
if (smc->mib.fddiESSPayload &&
((u_long)payload != smc->mib.fddiESSPayload ||
(u_long)overhead != smc->mib.fddiESSOverhead)) {
smc->ess.raf_act_timer_poll = TRUE ;
smc->ess.timer_count = 0 ;
}
/*
* evulate the Payload
*/
if (payload) {
DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit on");
smc->ess.sync_bw_available = TRUE ;
smc->ess.sync_bw = overhead -
(long)smc->mib.m[MAC0].fddiMACT_Neg *
payload / 1562 ;
}
else {
DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit off");
smc->ess.sync_bw_available = FALSE ;
smc->ess.sync_bw = 0 ;
overhead = 0 ;
}
smc->mib.a[PATH0].fddiPATHSbaPayload = payload ;
smc->mib.a[PATH0].fddiPATHSbaOverhead = overhead ;
DB_ESSN(2, "tsync = %lx", smc->ess.sync_bw);
ess_config_fifo(smc) ;
set_formac_tsync(smc,smc->ess.sync_bw) ;
return TRUE;
}
static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
int sba_cmd)
{
struct smt_sba_chg *chg ;
SMbuf *mb ;
void *p ;
/*
* get and initialize the response frame
*/
if (sba_cmd == CHANGE_ALLOCATION) {
if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REPLY,
sizeof(struct smt_sba_chg))))
return ;
}
else {
if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REPLY,
sizeof(struct smt_sba_rep_res))))
return ;
}
chg = smtod(mb,struct smt_sba_chg *) ;
chg->smt.smt_tid = sm->smt_tid ;
chg->smt.smt_dest = sm->smt_source ;
/* set P15 */
chg->s_type.para.p_type = SMT_P0015 ;
chg->s_type.para.p_len = sizeof(struct smt_p_0015) - PARA_LEN ;
chg->s_type.res_type = SYNC_BW ;
/* set P16 */
chg->cmd.para.p_type = SMT_P0016 ;
chg->cmd.para.p_len = sizeof(struct smt_p_0016) - PARA_LEN ;
chg->cmd.sba_cmd = sba_cmd ;
/* set P320B */
chg->path.para.p_type = SMT_P320B ;
chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
chg->path.mib_index = SBAPATHINDEX ;
chg->path.path_pad = 0;
chg->path.path_index = PRIMARY_RING ;
/* set P320F */
chg->payload.para.p_type = SMT_P320F ;
chg->payload.para.p_len = sizeof(struct smt_p_320f) - PARA_LEN ;
chg->payload.mib_index = SBAPATHINDEX ;
chg->payload.mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ;
/* set P3210 */
chg->overhead.para.p_type = SMT_P3210 ;
chg->overhead.para.p_len = sizeof(struct smt_p_3210) - PARA_LEN ;
chg->overhead.mib_index = SBAPATHINDEX ;
chg->overhead.mib_overhead = smc->mib.a[PATH0].fddiPATHSbaOverhead ;
if (sba_cmd == CHANGE_ALLOCATION) {
/* set P1A */
chg->cat.para.p_type = SMT_P001A ;
chg->cat.para.p_len = sizeof(struct smt_p_001a) - PARA_LEN ;
p = (void *) sm_to_para(smc,sm,SMT_P001A) ;
chg->cat.category = ((struct smt_p_001a *)p)->category ;
}
dump_smt(smc,(struct smt_header *)chg,"RAF") ;
ess_send_frame(smc,mb) ;
}
void ess_timer_poll(struct s_smc *smc)
{
if (!smc->ess.raf_act_timer_poll)
return ;
DB_ESSN(2, "ESS: timer_poll");
smc->ess.timer_count++ ;
if (smc->ess.timer_count == 10) {
smc->ess.timer_count = 0 ;
ess_send_alc_req(smc) ;
}
}
static void ess_send_alc_req(struct s_smc *smc)
{
struct smt_sba_alc_req *req ;
SMbuf *mb ;
/*
* send never allocation request where the requested payload and
* overhead is zero or deallocate bandwidth when no bandwidth is
* parsed
*/
if (!smc->mib.fddiESSPayload) {
smc->mib.fddiESSOverhead = 0 ;
}
else {
if (!smc->mib.fddiESSOverhead)
smc->mib.fddiESSOverhead = DEFAULT_OV ;
}
if (smc->mib.fddiESSOverhead ==
smc->mib.a[PATH0].fddiPATHSbaOverhead &&
smc->mib.fddiESSPayload ==
smc->mib.a[PATH0].fddiPATHSbaPayload){
smc->ess.raf_act_timer_poll = FALSE ;
smc->ess.timer_count = 7 ; /* next RAF alc req after 3 s */
return ;
}
/*
* get and initialize the response frame
*/
if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REQUEST,
sizeof(struct smt_sba_alc_req))))
return ;
req = smtod(mb,struct smt_sba_alc_req *) ;
req->smt.smt_tid = smc->ess.alloc_trans_id = smt_get_tid(smc) ;
req->smt.smt_dest = smt_sba_da ;
/* set P15 */
req->s_type.para.p_type = SMT_P0015 ;
req->s_type.para.p_len = sizeof(struct smt_p_0015) - PARA_LEN ;
req->s_type.res_type = SYNC_BW ;
/* set P16 */
req->cmd.para.p_type = SMT_P0016 ;
req->cmd.para.p_len = sizeof(struct smt_p_0016) - PARA_LEN ;
req->cmd.sba_cmd = REQUEST_ALLOCATION ;
/*
* set the parameter type and parameter length of all used
* parameters
*/
/* set P320B */
req->path.para.p_type = SMT_P320B ;
req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
req->path.mib_index = SBAPATHINDEX ;
req->path.path_pad = 0;
req->path.path_index = PRIMARY_RING ;
/* set P0017 */
req->pl_req.para.p_type = SMT_P0017 ;
req->pl_req.para.p_len = sizeof(struct smt_p_0017) - PARA_LEN ;
req->pl_req.sba_pl_req = smc->mib.fddiESSPayload -
smc->mib.a[PATH0].fddiPATHSbaPayload ;
/* set P0018 */
req->ov_req.para.p_type = SMT_P0018 ;
req->ov_req.para.p_len = sizeof(struct smt_p_0018) - PARA_LEN ;
req->ov_req.sba_ov_req = smc->mib.fddiESSOverhead -
smc->mib.a[PATH0].fddiPATHSbaOverhead ;
/* set P320F */
req->payload.para.p_type = SMT_P320F ;
req->payload.para.p_len = sizeof(struct smt_p_320f) - PARA_LEN ;
req->payload.mib_index = SBAPATHINDEX ;
req->payload.mib_payload = smc->mib.a[PATH0].fddiPATHSbaPayload ;
/* set P3210 */
req->overhead.para.p_type = SMT_P3210 ;
req->overhead.para.p_len = sizeof(struct smt_p_3210) - PARA_LEN ;
req->overhead.mib_index = SBAPATHINDEX ;
req->overhead.mib_overhead = smc->mib.a[PATH0].fddiPATHSbaOverhead ;
/* set P19 */
req->a_addr.para.p_type = SMT_P0019 ;
req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ;
req->a_addr.sba_pad = 0;
req->a_addr.alloc_addr = null_addr ;
/* set P1A */
req->cat.para.p_type = SMT_P001A ;
req->cat.para.p_len = sizeof(struct smt_p_001a) - PARA_LEN ;
req->cat.category = smc->mib.fddiESSCategory ;
/* set P1B */
req->tneg.para.p_type = SMT_P001B ;
req->tneg.para.p_len = sizeof(struct smt_p_001b) - PARA_LEN ;
req->tneg.max_t_neg = smc->mib.fddiESSMaxTNeg ;
/* set P1C */
req->segm.para.p_type = SMT_P001C ;
req->segm.para.p_len = sizeof(struct smt_p_001c) - PARA_LEN ;
req->segm.min_seg_siz = smc->mib.fddiESSMinSegmentSize ;
dump_smt(smc,(struct smt_header *)req,"RAF") ;
ess_send_frame(smc,mb) ;
}
static void ess_send_frame(struct s_smc *smc, SMbuf *mb)
{
/*
* check if the frame must be send to the own ESS
*/
if (smc->ess.local_sba_active) {
/*
* Send the Change Reply to the local SBA
*/
DB_ESS("ESS:Send to the local SBA");
if (!smc->ess.sba_reply_pend)
smc->ess.sba_reply_pend = mb ;
else {
DB_ESS("Frame is lost - another frame was pending");
smt_free_mbuf(smc,mb) ;
}
}
else {
/*
* Send the SBA RAF Change Reply to the network
*/
DB_ESS("ESS:Send to the network");
smt_send_frame(smc,mb,FC_SMT_INFO,0) ;
}
}
void ess_para_change(struct s_smc *smc)
{
(void)process_bw_alloc(smc,(long)smc->mib.a[PATH0].fddiPATHSbaPayload,
(long)smc->mib.a[PATH0].fddiPATHSbaOverhead) ;
}
static void ess_config_fifo(struct s_smc *smc)
{
/*
* if nothing to do exit
*/
if (smc->mib.a[PATH0].fddiPATHSbaPayload) {
if (smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON &&
(smc->hw.fp.fifo.fifo_config_mode&SEND_ASYNC_AS_SYNC) ==
smc->mib.fddiESSSynchTxMode) {
return ;
}
}
else {
if (!(smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON)) {
return ;
}
}
/*
* split up the FIFO and reinitialize the queues
*/
formac_reinit_tx(smc) ;
}
#endif /* ESS */
#endif /* no SLIM_SMT */
| linux-master | drivers/net/fddi/skfp/ess.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT/CMT defaults
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#ifndef OEM_USER_DATA
#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata"
#endif
/*
* defaults
*/
#define TTMS(x) ((u_long)(x)*1000L)
#define TTS(x) ((u_long)(x)*1000000L)
#define TTUS(x) ((u_long)(x))
#define DEFAULT_TB_MIN TTMS(5)
#define DEFAULT_TB_MAX TTMS(50)
#define DEFAULT_C_MIN TTUS(1600)
#define DEFAULT_T_OUT TTMS(100+5)
#define DEFAULT_TL_MIN TTUS(30)
#define DEFAULT_LC_SHORT TTMS(50+5)
#define DEFAULT_LC_MEDIUM TTMS(500+20)
#define DEFAULT_LC_LONG TTS(5)+TTMS(50)
#define DEFAULT_LC_EXTENDED TTS(50)+TTMS(50)
#define DEFAULT_T_NEXT_9 TTMS(200+10)
#define DEFAULT_NS_MAX TTUS(1310)
#define DEFAULT_I_MAX TTMS(25)
#define DEFAULT_IN_MAX TTMS(40)
#define DEFAULT_TD_MIN TTMS(5)
#define DEFAULT_T_NON_OP TTS(1)
#define DEFAULT_T_STUCK TTS(8)
#define DEFAULT_T_DIRECT TTMS(370)
#define DEFAULT_T_JAM TTMS(370)
#define DEFAULT_T_ANNOUNCE TTMS(2500)
#define DEFAULT_D_MAX TTUS(1617)
#define DEFAULT_LEM_ALARM (8)
#define DEFAULT_LEM_CUTOFF (7)
#define DEFAULT_TEST_DONE TTS(1)
#define DEFAULT_CHECK_POLL TTS(1)
#define DEFAULT_POLL TTMS(50)
/*
* LCT errors threshold
*/
#define DEFAULT_LCT_SHORT 1
#define DEFAULT_LCT_MEDIUM 3
#define DEFAULT_LCT_LONG 5
#define DEFAULT_LCT_EXTEND 50
/* Forward declarations */
void smt_reset_defaults(struct s_smc *smc, int level);
static void smt_init_mib(struct s_smc *smc, int level);
static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper);
#define MS2BCLK(x) ((x)*12500L)
#define US2BCLK(x) ((x)*1250L)
void smt_reset_defaults(struct s_smc *smc, int level)
{
struct smt_config *smt ;
int i ;
u_long smt_boot_time;
smt_init_mib(smc,level) ;
smc->os.smc_version = SMC_VERSION ;
smt_boot_time = smt_get_time();
for( i = 0; i < NUMMACS; i++ )
smc->sm.last_tok_time[i] = smt_boot_time ;
smt = &smc->s ;
smt->attach_s = 0 ;
smt->build_ring_map = 1 ;
smt->sas = SMT_DAS ;
smt->numphys = NUMPHYS ;
smt->pcm_tb_min = DEFAULT_TB_MIN ;
smt->pcm_tb_max = DEFAULT_TB_MAX ;
smt->pcm_c_min = DEFAULT_C_MIN ;
smt->pcm_t_out = DEFAULT_T_OUT ;
smt->pcm_tl_min = DEFAULT_TL_MIN ;
smt->pcm_lc_short = DEFAULT_LC_SHORT ;
smt->pcm_lc_medium = DEFAULT_LC_MEDIUM ;
smt->pcm_lc_long = DEFAULT_LC_LONG ;
smt->pcm_lc_extended = DEFAULT_LC_EXTENDED ;
smt->pcm_t_next_9 = DEFAULT_T_NEXT_9 ;
smt->pcm_ns_max = DEFAULT_NS_MAX ;
smt->ecm_i_max = DEFAULT_I_MAX ;
smt->ecm_in_max = DEFAULT_IN_MAX ;
smt->ecm_td_min = DEFAULT_TD_MIN ;
smt->ecm_test_done = DEFAULT_TEST_DONE ;
smt->ecm_check_poll = DEFAULT_CHECK_POLL ;
smt->rmt_t_non_op = DEFAULT_T_NON_OP ;
smt->rmt_t_stuck = DEFAULT_T_STUCK ;
smt->rmt_t_direct = DEFAULT_T_DIRECT ;
smt->rmt_t_jam = DEFAULT_T_JAM ;
smt->rmt_t_announce = DEFAULT_T_ANNOUNCE ;
smt->rmt_t_poll = DEFAULT_POLL ;
smt->rmt_dup_mac_behavior = FALSE ; /* See Struct smt_config */
smt->mac_d_max = DEFAULT_D_MAX ;
smt->lct_short = DEFAULT_LCT_SHORT ;
smt->lct_medium = DEFAULT_LCT_MEDIUM ;
smt->lct_long = DEFAULT_LCT_LONG ;
smt->lct_extended = DEFAULT_LCT_EXTEND ;
#ifndef SLIM_SMT
#ifdef ESS
if (level == 0) {
smc->ess.sync_bw_available = FALSE ;
smc->mib.fddiESSPayload = 0 ;
smc->mib.fddiESSOverhead = 0 ;
smc->mib.fddiESSMaxTNeg = (u_long)(- MS2BCLK(25)) ;
smc->mib.fddiESSMinSegmentSize = 1 ;
smc->mib.fddiESSCategory = SB_STATIC ;
smc->mib.fddiESSSynchTxMode = FALSE ;
smc->ess.raf_act_timer_poll = FALSE ;
smc->ess.timer_count = 7 ; /* first RAF alc req after 3s */
}
smc->ess.local_sba_active = FALSE ;
smc->ess.sba_reply_pend = NULL ;
#endif
#ifdef SBA
smt_init_sba(smc,level) ;
#endif
#endif /* no SLIM_SMT */
#ifdef TAG_MODE
if (level == 0) {
smc->hw.pci_fix_value = 0 ;
}
#endif
}
/*
* manufacturer data
*/
static const char man_data[32] =
/* 01234567890123456789012345678901 */
"xxxSK-NET FDDI SMT 7.3 - V2.8.8" ;
static void smt_init_mib(struct s_smc *smc, int level)
{
struct fddi_mib *mib ;
struct fddi_mib_p *pm ;
int port ;
int path ;
mib = &smc->mib ;
if (level == 0) {
/*
* set EVERYTHING to ZERO
* EXCEPT hw and os
*/
memset(((char *)smc)+
sizeof(struct s_smt_os)+sizeof(struct s_smt_hw), 0,
sizeof(struct s_smc) -
sizeof(struct s_smt_os) - sizeof(struct s_smt_hw)) ;
}
else {
mib->fddiSMTRemoteDisconnectFlag = 0 ;
mib->fddiSMTPeerWrapFlag = 0 ;
}
mib->fddiSMTOpVersionId = 2 ;
mib->fddiSMTHiVersionId = 2 ;
mib->fddiSMTLoVersionId = 2 ;
memcpy((char *) mib->fddiSMTManufacturerData,man_data,32) ;
if (level == 0) {
strcpy(mib->fddiSMTUserData,OEM_USER_DATA) ;
}
mib->fddiSMTMIBVersionId = 1 ;
mib->fddiSMTMac_Ct = NUMMACS ;
mib->fddiSMTConnectionPolicy = POLICY_MM | POLICY_AA | POLICY_BB ;
/*
* fddiSMTNonMaster_Ct and fddiSMTMaster_Ct are set in smt_fixup_mib
* s.sas is not set yet (is set in init driver)
*/
mib->fddiSMTAvailablePaths = MIB_PATH_P | MIB_PATH_S ;
mib->fddiSMTConfigCapabilities = 0 ; /* no hold,no wrap_ab*/
mib->fddiSMTTT_Notify = 10 ;
mib->fddiSMTStatRptPolicy = TRUE ;
mib->fddiSMTTrace_MaxExpiration = SEC2MIB(7) ;
mib->fddiSMTMACIndexes = INDEX_MAC ;
mib->fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ; /* separated */
mib->m[MAC0].fddiMACIndex = INDEX_MAC ;
mib->m[MAC0].fddiMACFrameStatusFunctions = FSC_TYPE0 ;
mib->m[MAC0].fddiMACRequestedPaths =
MIB_P_PATH_LOCAL |
MIB_P_PATH_SEC_ALTER |
MIB_P_PATH_PRIM_ALTER ;
mib->m[MAC0].fddiMACAvailablePaths = MIB_PATH_P ;
mib->m[MAC0].fddiMACCurrentPath = MIB_PATH_PRIMARY ;
mib->m[MAC0].fddiMACT_MaxCapabilitiy = (u_long)(- MS2BCLK(165)) ;
mib->m[MAC0].fddiMACTVXCapabilitiy = (u_long)(- US2BCLK(52)) ;
if (level == 0) {
mib->m[MAC0].fddiMACTvxValue = (u_long)(- US2BCLK(27)) ;
mib->m[MAC0].fddiMACTvxValueMIB = (u_long)(- US2BCLK(27)) ;
mib->m[MAC0].fddiMACT_Req = (u_long)(- MS2BCLK(165)) ;
mib->m[MAC0].fddiMACT_ReqMIB = (u_long)(- MS2BCLK(165)) ;
mib->m[MAC0].fddiMACT_Max = (u_long)(- MS2BCLK(165)) ;
mib->m[MAC0].fddiMACT_MaxMIB = (u_long)(- MS2BCLK(165)) ;
mib->m[MAC0].fddiMACT_Min = (u_long)(- MS2BCLK(4)) ;
}
mib->m[MAC0].fddiMACHardwarePresent = TRUE ;
mib->m[MAC0].fddiMACMA_UnitdataEnable = TRUE ;
mib->m[MAC0].fddiMACFrameErrorThreshold = 1 ;
mib->m[MAC0].fddiMACNotCopiedThreshold = 1 ;
/*
* Path attributes
*/
for (path = 0 ; path < NUMPATHS ; path++) {
mib->a[path].fddiPATHIndex = INDEX_PATH + path ;
if (level == 0) {
mib->a[path].fddiPATHTVXLowerBound =
(u_long)(- US2BCLK(27)) ;
mib->a[path].fddiPATHT_MaxLowerBound =
(u_long)(- MS2BCLK(165)) ;
mib->a[path].fddiPATHMaxT_Req =
(u_long)(- MS2BCLK(165)) ;
}
}
/*
* Port attributes
*/
pm = mib->p ;
for (port = 0 ; port < NUMPHYS ; port++) {
/*
* set MIB pointer in phy
*/
/* Attention: don't initialize mib pointer here! */
/* It must be initialized during phase 2 */
smc->y[port].mib = NULL;
mib->fddiSMTPORTIndexes[port] = port+INDEX_PORT ;
pm->fddiPORTIndex = port+INDEX_PORT ;
pm->fddiPORTHardwarePresent = TRUE ;
if (level == 0) {
pm->fddiPORTLer_Alarm = DEFAULT_LEM_ALARM ;
pm->fddiPORTLer_Cutoff = DEFAULT_LEM_CUTOFF ;
}
/*
* fddiPORTRequestedPaths are set in pcmplc.c
* we don't know the port type yet !
*/
pm->fddiPORTRequestedPaths[1] = 0 ;
pm->fddiPORTRequestedPaths[2] = 0 ;
pm->fddiPORTRequestedPaths[3] = 0 ;
pm->fddiPORTAvailablePaths = MIB_PATH_P ;
pm->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ;
pm++ ;
}
(void) smt_set_mac_opvalues(smc) ;
}
int smt_set_mac_opvalues(struct s_smc *smc)
{
int st ;
int st2 ;
st = set_min_max(1,smc->mib.m[MAC0].fddiMACTvxValueMIB,
smc->mib.a[PATH0].fddiPATHTVXLowerBound,
&smc->mib.m[MAC0].fddiMACTvxValue) ;
st |= set_min_max(0,smc->mib.m[MAC0].fddiMACT_MaxMIB,
smc->mib.a[PATH0].fddiPATHT_MaxLowerBound,
&smc->mib.m[MAC0].fddiMACT_Max) ;
st |= (st2 = set_min_max(0,smc->mib.m[MAC0].fddiMACT_ReqMIB,
smc->mib.a[PATH0].fddiPATHMaxT_Req,
&smc->mib.m[MAC0].fddiMACT_Req)) ;
if (st2) {
/* Treq attribute changed remotely. So send an AIX_EVENT to the
* user
*/
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ,
smt_get_event_word(smc));
}
return st;
}
void smt_fixup_mib(struct s_smc *smc)
{
#ifdef CONCENTRATOR
switch (smc->s.sas) {
case SMT_SAS :
smc->mib.fddiSMTNonMaster_Ct = 1 ;
break ;
case SMT_DAS :
smc->mib.fddiSMTNonMaster_Ct = 2 ;
break ;
case SMT_NAC :
smc->mib.fddiSMTNonMaster_Ct = 0 ;
break ;
}
smc->mib.fddiSMTMaster_Ct = NUMPHYS - smc->mib.fddiSMTNonMaster_Ct ;
#else
switch (smc->s.sas) {
case SMT_SAS :
smc->mib.fddiSMTNonMaster_Ct = 1 ;
break ;
case SMT_DAS :
smc->mib.fddiSMTNonMaster_Ct = 2 ;
break ;
}
smc->mib.fddiSMTMaster_Ct = 0 ;
#endif
}
/*
* determine new setting for operational value
* if limit is lower than mib
* use limit
* else
* use mib
* NOTE : numbers are negative, negate comparison !
*/
static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper)
{
u_long old ;
old = *oper ;
if ((limit > mib) ^ maxflag)
*oper = limit ;
else
*oper = mib ;
return old != *oper;
}
| linux-master | drivers/net/fddi/skfp/smtdef.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT Event Queue Management
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#define PRINTF(a,b,c)
/*
* init event queue management
*/
void ev_init(struct s_smc *smc)
{
smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ;
}
/*
* add event to queue
*/
void queue_event(struct s_smc *smc, int class, int event)
{
PRINTF("queue class %d event %d\n",class,event) ;
smc->q.ev_put->class = class ;
smc->q.ev_put->event = event ;
if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT])
smc->q.ev_put = smc->q.ev_queue ;
if (smc->q.ev_put == smc->q.ev_get) {
SMT_ERR_LOG(smc,SMT_E0137, SMT_E0137_MSG) ;
}
}
/*
* timer_event is called from HW timer package.
*/
void timer_event(struct s_smc *smc, u_long token)
{
PRINTF("timer event class %d token %d\n",
EV_T_CLASS(token),
EV_T_EVENT(token)) ;
queue_event(smc,EV_T_CLASS(token),EV_T_EVENT(token));
}
/*
* event dispatcher
* while event queue is not empty
* get event from queue
* send command to state machine
* end
*/
void ev_dispatcher(struct s_smc *smc)
{
struct event_queue *ev ; /* pointer into queue */
int class ;
ev = smc->q.ev_get ;
PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ;
while (ev != smc->q.ev_put) {
PRINTF("dispatch class %d event %d\n",ev->class,ev->event) ;
switch(class = ev->class) {
case EVENT_ECM : /* Entity Corordination Man. */
ecm(smc,(int)ev->event) ;
break ;
case EVENT_CFM : /* Configuration Man. */
cfm(smc,(int)ev->event) ;
break ;
case EVENT_RMT : /* Ring Man. */
rmt(smc,(int)ev->event) ;
break ;
case EVENT_SMT :
smt_event(smc,(int)ev->event) ;
break ;
#ifdef CONCENTRATOR
case 99 :
timer_test_event(smc,(int)ev->event) ;
break ;
#endif
case EVENT_PCMA : /* PHY A */
case EVENT_PCMB : /* PHY B */
default :
if (class >= EVENT_PCMA &&
class < EVENT_PCMA + NUMPHYS) {
pcm(smc,class - EVENT_PCMA,(int)ev->event) ;
break ;
}
SMT_PANIC(smc,SMT_E0121, SMT_E0121_MSG) ;
return ;
}
if (++ev == &smc->q.ev_queue[MAX_EVENT])
ev = smc->q.ev_queue ;
/* Renew get: it is used in queue_events to detect overruns */
smc->q.ev_get = ev;
}
}
/*
* smt_online connects to or disconnects from the ring
* MUST be called to initiate connection establishment
*
* on 0 disconnect
* on 1 connect
*/
u_short smt_online(struct s_smc *smc, int on)
{
queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ;
ev_dispatcher(smc) ;
return smc->mib.fddiSMTCF_State;
}
/*
* set SMT flag to value
* flag flag name
* value flag value
* dump current flag setting
*/
#ifdef CONCENTRATOR
void do_smt_flag(struct s_smc *smc, char *flag, int value)
{
#ifdef DEBUG
struct smt_debug *deb;
SK_UNUSED(smc) ;
#ifdef DEBUG_BRD
deb = &smc->debug;
#else
deb = &debug;
#endif
if (!strcmp(flag,"smt"))
deb->d_smt = value ;
else if (!strcmp(flag,"smtf"))
deb->d_smtf = value ;
else if (!strcmp(flag,"pcm"))
deb->d_pcm = value ;
else if (!strcmp(flag,"rmt"))
deb->d_rmt = value ;
else if (!strcmp(flag,"cfm"))
deb->d_cfm = value ;
else if (!strcmp(flag,"ecm"))
deb->d_ecm = value ;
printf("smt %d\n",deb->d_smt) ;
printf("smtf %d\n",deb->d_smtf) ;
printf("pcm %d\n",deb->d_pcm) ;
printf("rmt %d\n",deb->d_rmt) ;
printf("cfm %d\n",deb->d_cfm) ;
printf("ecm %d\n",deb->d_ecm) ;
#endif /* DEBUG */
}
#endif
| linux-master | drivers/net/fddi/skfp/queue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT timer
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
static void timer_done(struct s_smc *smc, int restart);
void smt_timer_init(struct s_smc *smc)
{
smc->t.st_queue = NULL;
smc->t.st_fast.tm_active = FALSE ;
smc->t.st_fast.tm_next = NULL;
hwt_init(smc) ;
}
void smt_timer_stop(struct s_smc *smc, struct smt_timer *timer)
{
struct smt_timer **prev ;
struct smt_timer *tm ;
/*
* remove timer from queue
*/
timer->tm_active = FALSE ;
if (smc->t.st_queue == timer && !timer->tm_next) {
hwt_stop(smc) ;
}
for (prev = &smc->t.st_queue ; (tm = *prev) ; prev = &tm->tm_next ) {
if (tm == timer) {
*prev = tm->tm_next ;
if (tm->tm_next) {
tm->tm_next->tm_delta += tm->tm_delta ;
}
return ;
}
}
}
void smt_timer_start(struct s_smc *smc, struct smt_timer *timer, u_long time,
u_long token)
{
struct smt_timer **prev ;
struct smt_timer *tm ;
u_long delta = 0 ;
time /= 16 ; /* input is uS, clock ticks are 16uS */
if (!time)
time = 1 ;
smt_timer_stop(smc,timer) ;
timer->tm_smc = smc ;
timer->tm_token = token ;
timer->tm_active = TRUE ;
if (!smc->t.st_queue) {
smc->t.st_queue = timer ;
timer->tm_next = NULL;
timer->tm_delta = time ;
hwt_start(smc,time) ;
return ;
}
/*
* timer correction
*/
timer_done(smc,0) ;
/*
* find position in queue
*/
delta = 0 ;
for (prev = &smc->t.st_queue ; (tm = *prev) ; prev = &tm->tm_next ) {
if (delta + tm->tm_delta > time) {
break ;
}
delta += tm->tm_delta ;
}
/* insert in queue */
*prev = timer ;
timer->tm_next = tm ;
timer->tm_delta = time - delta ;
if (tm)
tm->tm_delta -= timer->tm_delta ;
/*
* start new with first
*/
hwt_start(smc,smc->t.st_queue->tm_delta) ;
}
void smt_force_irq(struct s_smc *smc)
{
smt_timer_start(smc,&smc->t.st_fast,32L, EV_TOKEN(EVENT_SMT,SM_FAST));
}
void smt_timer_done(struct s_smc *smc)
{
timer_done(smc,1) ;
}
static void timer_done(struct s_smc *smc, int restart)
{
u_long delta ;
struct smt_timer *tm ;
struct smt_timer *next ;
struct smt_timer **last ;
int done = 0 ;
delta = hwt_read(smc) ;
last = &smc->t.st_queue ;
tm = smc->t.st_queue ;
while (tm && !done) {
if (delta >= tm->tm_delta) {
tm->tm_active = FALSE ;
delta -= tm->tm_delta ;
last = &tm->tm_next ;
tm = tm->tm_next ;
}
else {
tm->tm_delta -= delta ;
delta = 0 ;
done = 1 ;
}
}
*last = NULL;
next = smc->t.st_queue ;
smc->t.st_queue = tm ;
for ( tm = next ; tm ; tm = next) {
next = tm->tm_next ;
timer_event(smc,tm->tm_token) ;
}
if (restart && smc->t.st_queue)
hwt_start(smc,smc->t.st_queue->tm_delta) ;
}
| linux-master | drivers/net/fddi/skfp/smttimer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
Init SMT
call all module level initialization routines
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
void init_fddi_driver(struct s_smc *smc, const u_char *mac_addr);
/* define global debug variable */
#if defined(DEBUG) && !defined(DEBUG_BRD)
struct smt_debug debug;
#endif
#ifndef MULT_OEM
#define OEMID(smc,i) oem_id[i]
extern u_char oem_id[] ;
#else /* MULT_OEM */
#define OEMID(smc,i) smc->hw.oem_id->oi_mark[i]
extern struct s_oem_ids oem_ids[] ;
#endif /* MULT_OEM */
/*
* Set OEM specific values
*
* Can not be called in smt_reset_defaults, because it is not sure that
* the OEM ID is already defined.
*/
static void set_oem_spec_val(struct s_smc *smc)
{
struct fddi_mib *mib ;
mib = &smc->mib ;
/*
* set IBM specific values
*/
if (OEMID(smc,0) == 'I') {
mib->fddiSMTConnectionPolicy = POLICY_MM ;
}
}
/*
* Init SMT
*/
int init_smt(struct s_smc *smc, const u_char *mac_addr)
/* u_char *mac_addr; canonical address or NULL */
{
int p ;
#if defined(DEBUG) && !defined(DEBUG_BRD)
debug.d_smt = 0 ;
debug.d_smtf = 0 ;
debug.d_rmt = 0 ;
debug.d_ecm = 0 ;
debug.d_pcm = 0 ;
debug.d_cfm = 0 ;
debug.d_plc = 0 ;
#ifdef ESS
debug.d_ess = 0 ;
#endif
#ifdef SBA
debug.d_sba = 0 ;
#endif
#endif /* DEBUG && !DEBUG_BRD */
/* First initialize the ports mib->pointers */
for ( p = 0; p < NUMPHYS; p ++ ) {
smc->y[p].mib = & smc->mib.p[p] ;
}
set_oem_spec_val(smc) ;
(void) smt_set_mac_opvalues(smc) ;
init_fddi_driver(smc,mac_addr) ; /* HW driver */
smt_fixup_mib(smc) ; /* update values that depend on s.sas */
ev_init(smc) ; /* event queue */
#ifndef SLIM_SMT
smt_init_evc(smc) ; /* evcs in MIB */
#endif /* no SLIM_SMT */
smt_timer_init(smc) ; /* timer package */
smt_agent_init(smc) ; /* SMT frame manager */
pcm_init(smc) ; /* PCM state machine */
ecm_init(smc) ; /* ECM state machine */
cfm_init(smc) ; /* CFM state machine */
rmt_init(smc) ; /* RMT state machine */
for (p = 0 ; p < NUMPHYS ; p++) {
pcm(smc,p,0) ; /* PCM A state machine */
}
ecm(smc,0) ; /* ECM state machine */
cfm(smc,0) ; /* CFM state machine */
rmt(smc,0) ; /* RMT state machine */
smt_agent_task(smc) ; /* NIF FSM etc */
PNMI_INIT(smc) ; /* PNMI initialization */
return 0;
}
| linux-master | drivers/net/fddi/skfp/smtinit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
* FORMAC+ Driver for tag mode
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#include <linux/bitrev.h>
#include <linux/etherdevice.h>
#ifndef UNUSED
#ifdef lint
#define UNUSED(x) (x) = (x)
#else
#define UNUSED(x)
#endif
#endif
#define FM_ADDRX (FM_ADDET|FM_EXGPA0|FM_EXGPA1)
#define MS2BCLK(x) ((x)*12500L)
#define US2BCLK(x) ((x)*1250L)
/*
* prototypes for static function
*/
static void build_claim_beacon(struct s_smc *smc, u_long t_request);
static int init_mac(struct s_smc *smc, int all);
static void rtm_init(struct s_smc *smc);
static void smt_split_up_fifo(struct s_smc *smc);
#if (!defined(NO_SMT_PANIC) || defined(DEBUG))
static char write_mdr_warning [] = "E350 write_mdr() FM_SNPPND is set\n";
static char cam_warning [] = "E_SMT_004: CAM still busy\n";
#endif
#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
#define CHECK_NPP() { unsigned int k = 10000 ;\
while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
} \
}
#define CHECK_CAM() { unsigned int k = 10 ;\
while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
} \
}
const struct fddi_addr fddi_broadcast = {{0xff,0xff,0xff,0xff,0xff,0xff}};
static const struct fddi_addr null_addr = {{0,0,0,0,0,0}};
static const struct fddi_addr dbeacon_multi = {{0x01,0x80,0xc2,0x00,0x01,0x00}};
static const u_short my_said = 0xffff ; /* short address (n.u.) */
static const u_short my_sagp = 0xffff ; /* short group address (n.u.) */
/*
* define my address
*/
#ifdef USE_CAN_ADDR
#define MA smc->hw.fddi_canon_addr
#else
#define MA smc->hw.fddi_home_addr
#endif
/*
* useful interrupt bits
*/
static const int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ;
static const int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0|
FM_STBURS | FM_STBURA0 ;
/* delete FM_SRBFL after tests */
static const int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL |
FM_SMYCLM ;
static const int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR |
FM_SERRCTR | FM_SLSTCTR |
FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ;
static const int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ;
static const int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ;
static const int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC |
FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ;
static u_long mac_get_tneg(struct s_smc *smc)
{
u_long tneg ;
tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ;
return (u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
0xffe00000L) ;
}
void mac_update_counter(struct s_smc *smc)
{
smc->mib.m[MAC0].fddiMACFrame_Ct =
(smc->mib.m[MAC0].fddiMACFrame_Ct & 0xffff0000L)
+ (u_short) inpw(FM_A(FM_FCNTR)) ;
smc->mib.m[MAC0].fddiMACLost_Ct =
(smc->mib.m[MAC0].fddiMACLost_Ct & 0xffff0000L)
+ (u_short) inpw(FM_A(FM_LCNTR)) ;
smc->mib.m[MAC0].fddiMACError_Ct =
(smc->mib.m[MAC0].fddiMACError_Ct & 0xffff0000L)
+ (u_short) inpw(FM_A(FM_ECNTR)) ;
smc->mib.m[MAC0].fddiMACT_Neg = mac_get_tneg(smc) ;
#ifdef SMT_REAL_TOKEN_CT
/*
* If the token counter is emulated it is updated in smt_event.
*/
TBD
#else
smt_emulate_token_ct( smc, MAC0 );
#endif
}
/*
* write long value into buffer memory over memory data register (MDR),
*/
static void write_mdr(struct s_smc *smc, u_long val)
{
CHECK_NPP() ;
MDRW(val) ;
}
#if 0
/*
* read long value from buffer memory over memory data register (MDR),
*/
static u_long read_mdr(struct s_smc *smc, unsigned int addr)
{
long p ;
CHECK_NPP() ;
MARR(addr) ;
outpw(FM_A(FM_CMDREG1),FM_IRMEMWO) ;
CHECK_NPP() ; /* needed for PCI to prevent from timeing violations */
/* p = MDRR() ; */ /* bad read values if the workaround */
/* smc->hw.mc_dummy = *((short volatile far *)(addr)))*/
/* is used */
p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
p += (u_long)inpw(FM_A(FM_MDRL)) ;
return p;
}
#endif
/*
* clear buffer memory
*/
static void init_ram(struct s_smc *smc)
{
u_short i ;
smc->hw.fp.fifo.rbc_ram_start = 0 ;
smc->hw.fp.fifo.rbc_ram_end =
smc->hw.fp.fifo.rbc_ram_start + RBC_MEM_SIZE ;
CHECK_NPP() ;
MARW(smc->hw.fp.fifo.rbc_ram_start) ;
for (i = smc->hw.fp.fifo.rbc_ram_start;
i < (u_short) (smc->hw.fp.fifo.rbc_ram_end-1); i++)
write_mdr(smc,0L) ;
/* Erase the last byte too */
write_mdr(smc,0L) ;
}
/*
* set receive FIFO pointer
*/
static void set_recvptr(struct s_smc *smc)
{
/*
* initialize the pointer for receive queue 1
*/
outpw(FM_A(FM_RPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* RPR1 */
outpw(FM_A(FM_SWPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* SWPR1 */
outpw(FM_A(FM_WPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* WPR1 */
outpw(FM_A(FM_EARV1),smc->hw.fp.fifo.tx_s_start-1) ; /* EARV1 */
/*
* initialize the pointer for receive queue 2
*/
if (smc->hw.fp.fifo.rx2_fifo_size) {
outpw(FM_A(FM_RPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
outpw(FM_A(FM_SWPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
outpw(FM_A(FM_WPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
outpw(FM_A(FM_EARV2),smc->hw.fp.fifo.rbc_ram_end-1) ;
}
else {
outpw(FM_A(FM_RPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
outpw(FM_A(FM_SWPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
outpw(FM_A(FM_WPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
outpw(FM_A(FM_EARV2),smc->hw.fp.fifo.rbc_ram_end-1) ;
}
}
/*
* set transmit FIFO pointer
*/
static void set_txptr(struct s_smc *smc)
{
outpw(FM_A(FM_CMDREG2),FM_IRSTQ) ; /* reset transmit queues */
/*
* initialize the pointer for asynchronous transmit queue
*/
outpw(FM_A(FM_RPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* RPXA0 */
outpw(FM_A(FM_SWPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* SWPXA0 */
outpw(FM_A(FM_WPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* WPXA0 */
outpw(FM_A(FM_EAA0),smc->hw.fp.fifo.rx2_fifo_start-1) ; /* EAA0 */
/*
* initialize the pointer for synchronous transmit queue
*/
if (smc->hw.fp.fifo.tx_s_size) {
outpw(FM_A(FM_RPXS),smc->hw.fp.fifo.tx_s_start) ;
outpw(FM_A(FM_SWPXS),smc->hw.fp.fifo.tx_s_start) ;
outpw(FM_A(FM_WPXS),smc->hw.fp.fifo.tx_s_start) ;
outpw(FM_A(FM_EAS),smc->hw.fp.fifo.tx_a0_start-1) ;
}
else {
outpw(FM_A(FM_RPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
outpw(FM_A(FM_SWPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
outpw(FM_A(FM_WPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
outpw(FM_A(FM_EAS),smc->hw.fp.fifo.tx_a0_start-1) ;
}
}
/*
* init memory buffer management registers
*/
static void init_rbc(struct s_smc *smc)
{
u_short rbc_ram_addr ;
/*
* set unused pointers or permanent pointers
*/
rbc_ram_addr = smc->hw.fp.fifo.rx2_fifo_start - 1 ;
outpw(FM_A(FM_RPXA1),rbc_ram_addr) ; /* a1-send pointer */
outpw(FM_A(FM_WPXA1),rbc_ram_addr) ;
outpw(FM_A(FM_SWPXA1),rbc_ram_addr) ;
outpw(FM_A(FM_EAA1),rbc_ram_addr) ;
set_recvptr(smc) ;
set_txptr(smc) ;
}
/*
* init rx pointer
*/
static void init_rx(struct s_smc *smc)
{
struct s_smt_rx_queue *queue ;
/*
* init all tx data structures for receive queue 1
*/
smc->hw.fp.rx[QUEUE_R1] = queue = &smc->hw.fp.rx_q[QUEUE_R1] ;
queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R1_CSR) ;
queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R1_DA) ;
/*
* init all tx data structures for receive queue 2
*/
smc->hw.fp.rx[QUEUE_R2] = queue = &smc->hw.fp.rx_q[QUEUE_R2] ;
queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R2_CSR) ;
queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R2_DA) ;
}
/*
* set the TSYNC register of the FORMAC to regulate synchronous transmission
*/
void set_formac_tsync(struct s_smc *smc, long sync_bw)
{
outpw(FM_A(FM_TSYNC),(unsigned int) (((-sync_bw) >> 5) & 0xffff) ) ;
}
/*
* init all tx data structures
*/
static void init_tx(struct s_smc *smc)
{
struct s_smt_tx_queue *queue ;
/*
* init all tx data structures for the synchronous queue
*/
smc->hw.fp.tx[QUEUE_S] = queue = &smc->hw.fp.tx_q[QUEUE_S] ;
queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XS_CSR) ;
queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XS_DA) ;
#ifdef ESS
set_formac_tsync(smc,smc->ess.sync_bw) ;
#endif
/*
* init all tx data structures for the asynchronous queue 0
*/
smc->hw.fp.tx[QUEUE_A0] = queue = &smc->hw.fp.tx_q[QUEUE_A0] ;
queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XA_CSR) ;
queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XA_DA) ;
llc_recover_tx(smc) ;
}
static void mac_counter_init(struct s_smc *smc)
{
int i ;
u_long *ec ;
/*
* clear FORMAC+ frame-, lost- and error counter
*/
outpw(FM_A(FM_FCNTR),0) ;
outpw(FM_A(FM_LCNTR),0) ;
outpw(FM_A(FM_ECNTR),0) ;
/*
* clear internal error counter structure
*/
ec = (u_long *)&smc->hw.fp.err_stats ;
for (i = (sizeof(struct err_st)/sizeof(long)) ; i ; i--)
*ec++ = 0L ;
smc->mib.m[MAC0].fddiMACRingOp_Ct = 0 ;
}
/*
* set FORMAC address, and t_request
*/
static void set_formac_addr(struct s_smc *smc)
{
long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
outpw(FM_A(FM_SAID),my_said) ; /* set short address */
outpw(FM_A(FM_LAIL),(unsigned short)((smc->hw.fddi_home_addr.a[4]<<8) +
smc->hw.fddi_home_addr.a[5])) ;
outpw(FM_A(FM_LAIC),(unsigned short)((smc->hw.fddi_home_addr.a[2]<<8) +
smc->hw.fddi_home_addr.a[3])) ;
outpw(FM_A(FM_LAIM),(unsigned short)((smc->hw.fddi_home_addr.a[0]<<8) +
smc->hw.fddi_home_addr.a[1])) ;
outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
outpw(FM_A(FM_LAGL),(unsigned short)((smc->hw.fp.group_addr.a[4]<<8) +
smc->hw.fp.group_addr.a[5])) ;
outpw(FM_A(FM_LAGC),(unsigned short)((smc->hw.fp.group_addr.a[2]<<8) +
smc->hw.fp.group_addr.a[3])) ;
outpw(FM_A(FM_LAGM),(unsigned short)((smc->hw.fp.group_addr.a[0]<<8) +
smc->hw.fp.group_addr.a[1])) ;
/* set r_request regs. (MSW & LSW of TRT ) */
outpw(FM_A(FM_TREQ1),(unsigned short)(t_requ>>16)) ;
outpw(FM_A(FM_TREQ0),(unsigned short)t_requ) ;
}
static void set_int(char *p, int l)
{
p[0] = (char)(l >> 24) ;
p[1] = (char)(l >> 16) ;
p[2] = (char)(l >> 8) ;
p[3] = (char)(l >> 0) ;
}
/*
* copy TX descriptor to buffer mem
* append FC field and MAC frame
* if more bit is set in descr
* append pointer to descriptor (endless loop)
* else
* append 'end of chain' pointer
*/
static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
unsigned int off, int len)
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
/* unsigned int off; start address within buffer memory */
/* int len ; length of the frame including the FC */
{
int i ;
__le32 *p ;
CHECK_NPP() ;
MARW(off) ; /* set memory address reg for writes */
p = (__le32 *) mac ;
for (i = (len + 3)/4 ; i ; i--) {
if (i == 1) {
/* last word, set the tag bit */
outpw(FM_A(FM_CMDREG2),FM_ISTTB) ;
}
write_mdr(smc,le32_to_cpu(*p)) ;
p++ ;
}
outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
write_mdr(smc,td) ; /* write over memory data reg to buffer */
}
/*
BEGIN_MANUAL_ENTRY(module;tests;3)
How to test directed beacon frames
----------------------------------------------------------------
o Insert a break point in the function build_claim_beacon()
before calling copy_tx_mac() for building the claim frame.
o Modify the RM3_DETECT case so that the RM6_DETECT state
will always entered from the RM3_DETECT state (function rmt_fsm(),
rmt.c)
o Compile the driver.
o Set the parameter TREQ in the protocol.ini or net.cfg to a
small value to make sure your station will win the claim
process.
o Start the driver.
o When you reach the break point, modify the SA and DA address
of the claim frame (e.g. SA = DA = 10005affffff).
o When you see RM3_DETECT and RM6_DETECT, observe the direct
beacon frames on the UPPSLANA.
END_MANUAL_ENTRY
*/
static void directed_beacon(struct s_smc *smc)
{
SK_LOC_DECL(__le32,a[2]) ;
/*
* set UNA in frame
* enable FORMAC to send endless queue of directed beacon
* important: the UNA starts at byte 1 (not at byte 0)
*/
* (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
a[1] = 0 ;
memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
CHECK_NPP() ;
/* set memory address reg for writes */
MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ;
write_mdr(smc,le32_to_cpu(a[0])) ;
outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
write_mdr(smc,le32_to_cpu(a[1])) ;
outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ;
}
/*
setup claim & beacon pointer
NOTE :
special frame packets end with a pointer to their own
descriptor, and the MORE bit is set in the descriptor
*/
static void build_claim_beacon(struct s_smc *smc, u_long t_request)
{
u_int td ;
int len ;
struct fddi_mac_sf *mac ;
/*
* build claim packet
*/
len = 17 ;
td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
mac = &smc->hw.fp.mac_sfb ;
mac->mac_fc = FC_CLAIM ;
/* DA == SA in claim frame */
mac->mac_source = mac->mac_dest = MA ;
/* 2's complement */
set_int((char *)mac->mac_info,(int)t_request) ;
copy_tx_mac(smc,td,(struct fddi_mac *)mac,
smc->hw.fp.fifo.rbc_ram_start + CLAIM_FRAME_OFF,len) ;
/* set CLAIM start pointer */
outpw(FM_A(FM_SACL),smc->hw.fp.fifo.rbc_ram_start + CLAIM_FRAME_OFF) ;
/*
* build beacon packet
*/
len = 17 ;
td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
mac->mac_fc = FC_BEACON ;
mac->mac_source = MA ;
mac->mac_dest = null_addr ; /* DA == 0 in beacon frame */
set_int((char *) mac->mac_info,((int)BEACON_INFO<<24) + 0 ) ;
copy_tx_mac(smc,td,(struct fddi_mac *)mac,
smc->hw.fp.fifo.rbc_ram_start + BEACON_FRAME_OFF,len) ;
/* set beacon start pointer */
outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + BEACON_FRAME_OFF) ;
/*
* build directed beacon packet
* contains optional UNA
*/
len = 23 ;
td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
mac->mac_fc = FC_BEACON ;
mac->mac_source = MA ;
mac->mac_dest = dbeacon_multi ; /* multicast */
set_int((char *) mac->mac_info,((int)DBEACON_INFO<<24) + 0 ) ;
set_int((char *) mac->mac_info+4,0) ;
set_int((char *) mac->mac_info+8,0) ;
copy_tx_mac(smc,td,(struct fddi_mac *)mac,
smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF,len) ;
/* end of claim/beacon queue */
outpw(FM_A(FM_EACB),smc->hw.fp.fifo.rx1_fifo_start-1) ;
outpw(FM_A(FM_WPXSF),0) ;
outpw(FM_A(FM_RPXSF),0) ;
}
static void formac_rcv_restart(struct s_smc *smc)
{
/* enable receive function */
SETMASK(FM_A(FM_MDREG1),smc->hw.fp.rx_mode,FM_ADDRX) ;
outpw(FM_A(FM_CMDREG1),FM_ICLLR) ; /* clear receive lock */
}
void formac_tx_restart(struct s_smc *smc)
{
outpw(FM_A(FM_CMDREG1),FM_ICLLS) ; /* clear s-frame lock */
outpw(FM_A(FM_CMDREG1),FM_ICLLA0) ; /* clear a-frame lock */
}
static void enable_formac(struct s_smc *smc)
{
/* set formac IMSK : 0 enables irq */
outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u);
outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l);
outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u);
outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l);
outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u);
outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l);
}
#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
/* The FORMACs tx complete IRQ should be used any longer */
/*
BEGIN_MANUAL_ENTRY(if,func;others;4)
void enable_tx_irq(smc, queue)
struct s_smc *smc ;
u_short queue ;
Function DOWNCALL (SMT, fplustm.c)
enable_tx_irq() enables the FORMACs transmit complete
interrupt of the queue.
Para queue = QUEUE_S: synchronous queue
= QUEUE_A0: asynchronous queue
Note After any ring operational change the transmit complete
interrupts are disabled.
The operating system dependent module must enable
the transmit complete interrupt of a queue,
- when it queues the first frame,
because of no transmit resources are beeing
available and
- when it escapes from the function llc_restart_tx
while some frames are still queued.
END_MANUAL_ENTRY
*/
void enable_tx_irq(struct s_smc *smc, u_short queue)
/* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
{
u_short imask ;
imask = ~(inpw(FM_A(FM_IMSK1U))) ;
if (queue == 0) {
outpw(FM_A(FM_IMSK1U),~(imask|FM_STEFRMS)) ;
}
if (queue == 1) {
outpw(FM_A(FM_IMSK1U),~(imask|FM_STEFRMA0)) ;
}
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;4)
void disable_tx_irq(smc, queue)
struct s_smc *smc ;
u_short queue ;
Function DOWNCALL (SMT, fplustm.c)
disable_tx_irq disables the FORMACs transmit complete
interrupt of the queue
Para queue = QUEUE_S: synchronous queue
= QUEUE_A0: asynchronous queue
Note The operating system dependent module should disable
the transmit complete interrupts if it escapes from the
function llc_restart_tx and no frames are queued.
END_MANUAL_ENTRY
*/
void disable_tx_irq(struct s_smc *smc, u_short queue)
/* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
{
u_short imask ;
imask = ~(inpw(FM_A(FM_IMSK1U))) ;
if (queue == 0) {
outpw(FM_A(FM_IMSK1U),~(imask&~FM_STEFRMS)) ;
}
if (queue == 1) {
outpw(FM_A(FM_IMSK1U),~(imask&~FM_STEFRMA0)) ;
}
}
#endif
static void disable_formac(struct s_smc *smc)
{
/* clear formac IMSK : 1 disables irq */
outpw(FM_A(FM_IMSK1U),MW) ;
outpw(FM_A(FM_IMSK1L),MW) ;
outpw(FM_A(FM_IMSK2U),MW) ;
outpw(FM_A(FM_IMSK2L),MW) ;
outpw(FM_A(FM_IMSK3U),MW) ;
outpw(FM_A(FM_IMSK3L),MW) ;
}
static void mac_ring_up(struct s_smc *smc, int up)
{
if (up) {
formac_rcv_restart(smc) ; /* enable receive function */
smc->hw.mac_ring_is_up = TRUE ;
llc_restart_tx(smc) ; /* TX queue */
}
else {
/* disable receive function */
SETMASK(FM_A(FM_MDREG1),FM_MDISRCV,FM_ADDET) ;
/* abort current transmit activity */
outpw(FM_A(FM_CMDREG2),FM_IACTR) ;
smc->hw.mac_ring_is_up = FALSE ;
}
}
/*--------------------------- ISR handling ----------------------------------*/
/*
* mac1_irq is in drvfbi.c
*/
/*
* mac2_irq: status bits for the receive queue 1, and ring status
* ring status indication bits
*/
void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
{
u_short change_s2l ;
u_short change_s2u ;
/* (jd) 22-Feb-1999
* Restart 2_DMax Timer after end of claiming or beaconing
*/
if (code_s2u & (FM_SCLM|FM_SHICLM|FM_SBEC|FM_SOTRBEC)) {
queue_event(smc,EVENT_RMT,RM_TX_STATE_CHANGE) ;
}
else if (code_s2l & (FM_STKISS)) {
queue_event(smc,EVENT_RMT,RM_TX_STATE_CHANGE) ;
}
/*
* XOR current st bits with the last to avoid useless RMT event queuing
*/
change_s2l = smc->hw.fp.s2l ^ code_s2l ;
change_s2u = smc->hw.fp.s2u ^ code_s2u ;
if ((change_s2l & FM_SRNGOP) ||
(!smc->hw.mac_ring_is_up && ((code_s2l & FM_SRNGOP)))) {
if (code_s2l & FM_SRNGOP) {
mac_ring_up(smc,1) ;
queue_event(smc,EVENT_RMT,RM_RING_OP) ;
smc->mib.m[MAC0].fddiMACRingOp_Ct++ ;
}
else {
mac_ring_up(smc,0) ;
queue_event(smc,EVENT_RMT,RM_RING_NON_OP) ;
}
goto mac2_end ;
}
if (code_s2l & FM_SMISFRM) { /* missed frame */
smc->mib.m[MAC0].fddiMACNotCopied_Ct++ ;
}
if (code_s2u & (FM_SRCVOVR | /* recv. FIFO overflow */
FM_SRBFL)) { /* recv. buffer full */
smc->hw.mac_ct.mac_r_restart_counter++ ;
/* formac_rcv_restart(smc) ; */
smt_stat_counter(smc,1) ;
/* goto mac2_end ; */
}
if (code_s2u & FM_SOTRBEC)
queue_event(smc,EVENT_RMT,RM_OTHER_BEACON) ;
if (code_s2u & FM_SMYBEC)
queue_event(smc,EVENT_RMT,RM_MY_BEACON) ;
if (change_s2u & code_s2u & FM_SLOCLM) {
DB_RMTN(2, "RMT : lower claim received");
}
if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) {
/*
* This is my claim and that claim is not detected as a
* duplicate one.
*/
queue_event(smc,EVENT_RMT,RM_MY_CLAIM) ;
}
if (code_s2l & FM_SDUPCLM) {
/*
* If a duplicate claim frame (same SA but T_Bid != T_Req)
* this flag will be set.
* In the RMT state machine we need a RM_VALID_CLAIM event
* to do the appropriate state change.
* RM(34c)
*/
queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ;
}
if (change_s2u & code_s2u & FM_SHICLM) {
DB_RMTN(2, "RMT : higher claim received");
}
if ( (code_s2l & FM_STRTEXP) ||
(code_s2l & FM_STRTEXR) )
queue_event(smc,EVENT_RMT,RM_TRT_EXP) ;
if (code_s2l & FM_SMULTDA) {
/*
* The MAC has found a 2. MAC with the same address.
* Signal dup_addr_test = failed to RMT state machine.
* RM(25)
*/
smc->r.dup_addr_test = DA_FAILED ;
queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
}
if (code_s2u & FM_SBEC)
smc->hw.fp.err_stats.err_bec_stat++ ;
if (code_s2u & FM_SCLM)
smc->hw.fp.err_stats.err_clm_stat++ ;
if (code_s2l & FM_STVXEXP)
smc->mib.m[MAC0].fddiMACTvxExpired_Ct++ ;
if ((code_s2u & (FM_SBEC|FM_SCLM))) {
if (!(change_s2l & FM_SRNGOP) && (smc->hw.fp.s2l & FM_SRNGOP)) {
mac_ring_up(smc,0) ;
queue_event(smc,EVENT_RMT,RM_RING_NON_OP) ;
mac_ring_up(smc,1) ;
queue_event(smc,EVENT_RMT,RM_RING_OP) ;
smc->mib.m[MAC0].fddiMACRingOp_Ct++ ;
}
}
if (code_s2l & FM_SPHINV)
smc->hw.fp.err_stats.err_phinv++ ;
if (code_s2l & FM_SSIFG)
smc->hw.fp.err_stats.err_sifg_det++ ;
if (code_s2l & FM_STKISS)
smc->hw.fp.err_stats.err_tkiss++ ;
if (code_s2l & FM_STKERR)
smc->hw.fp.err_stats.err_tkerr++ ;
if (code_s2l & FM_SFRMCTR)
smc->mib.m[MAC0].fddiMACFrame_Ct += 0x10000L ;
if (code_s2l & FM_SERRCTR)
smc->mib.m[MAC0].fddiMACError_Ct += 0x10000L ;
if (code_s2l & FM_SLSTCTR)
smc->mib.m[MAC0].fddiMACLost_Ct += 0x10000L ;
if (code_s2u & FM_SERRSF) {
SMT_PANIC(smc,SMT_E0114, SMT_E0114_MSG) ;
}
mac2_end:
/* notice old status */
smc->hw.fp.s2l = code_s2l ;
smc->hw.fp.s2u = code_s2u ;
outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ;
}
/*
* mac3_irq: receive queue 2 bits and address detection bits
*/
void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l)
{
UNUSED(code_s3l) ;
if (code_s3u & (FM_SRCVOVR2 | /* recv. FIFO overflow */
FM_SRBFL2)) { /* recv. buffer full */
smc->hw.mac_ct.mac_r_restart_counter++ ;
smt_stat_counter(smc,1);
}
if (code_s3u & FM_SRPERRQ2) { /* parity error receive queue 2 */
SMT_PANIC(smc,SMT_E0115, SMT_E0115_MSG) ;
}
if (code_s3u & FM_SRPERRQ1) { /* parity error receive queue 2 */
SMT_PANIC(smc,SMT_E0116, SMT_E0116_MSG) ;
}
}
/*
* take formac offline
*/
static void formac_offline(struct s_smc *smc)
{
outpw(FM_A(FM_CMDREG2),FM_IACTR) ;/* abort current transmit activity */
/* disable receive function */
SETMASK(FM_A(FM_MDREG1),FM_MDISRCV,FM_ADDET) ;
/* FORMAC+ 'Initialize Mode' */
SETMASK(FM_A(FM_MDREG1),FM_MINIT,FM_MMODE) ;
disable_formac(smc) ;
smc->hw.mac_ring_is_up = FALSE ;
smc->hw.hw_state = STOPPED ;
}
/*
* bring formac online
*/
static void formac_online(struct s_smc *smc)
{
enable_formac(smc) ;
SETMASK(FM_A(FM_MDREG1),FM_MONLINE | FM_SELRA | MDR1INIT |
smc->hw.fp.rx_mode, FM_MMODE | FM_SELRA | FM_ADDRX) ;
}
/*
* FORMAC+ full init. (tx, rx, timer, counter, claim & beacon)
*/
int init_fplus(struct s_smc *smc)
{
smc->hw.fp.nsa_mode = FM_MRNNSAFNMA ;
smc->hw.fp.rx_mode = FM_MDAMA ;
smc->hw.fp.group_addr = fddi_broadcast ;
smc->hw.fp.func_addr = 0 ;
smc->hw.fp.frselreg_init = 0 ;
init_driver_fplus(smc) ;
if (smc->s.sas == SMT_DAS)
smc->hw.fp.mdr3init |= FM_MENDAS ;
smc->hw.mac_ct.mac_nobuf_counter = 0 ;
smc->hw.mac_ct.mac_r_restart_counter = 0 ;
smc->hw.fp.fm_st1u = (HW_PTR) ADDR(B0_ST1U) ;
smc->hw.fp.fm_st1l = (HW_PTR) ADDR(B0_ST1L) ;
smc->hw.fp.fm_st2u = (HW_PTR) ADDR(B0_ST2U) ;
smc->hw.fp.fm_st2l = (HW_PTR) ADDR(B0_ST2L) ;
smc->hw.fp.fm_st3u = (HW_PTR) ADDR(B0_ST3U) ;
smc->hw.fp.fm_st3l = (HW_PTR) ADDR(B0_ST3L) ;
smc->hw.fp.s2l = smc->hw.fp.s2u = 0 ;
smc->hw.mac_ring_is_up = 0 ;
mac_counter_init(smc) ;
/* convert BCKL units to symbol time */
smc->hw.mac_pa.t_neg = (u_long)0 ;
smc->hw.mac_pa.t_pri = (u_long)0 ;
/* make sure all PCI settings are correct */
mac_do_pci_fix(smc) ;
return init_mac(smc, 1);
/* enable_formac(smc) ; */
}
static int init_mac(struct s_smc *smc, int all)
{
u_short t_max,x ;
u_long time=0 ;
/*
* clear memory
*/
outpw(FM_A(FM_MDREG1),FM_MINIT) ; /* FORMAC+ init mode */
set_formac_addr(smc) ;
outpw(FM_A(FM_MDREG1),FM_MMEMACT) ; /* FORMAC+ memory activ mode */
/* Note: Mode register 2 is set here, incase parity is enabled. */
outpw(FM_A(FM_MDREG2),smc->hw.fp.mdr2init) ;
if (all) {
init_ram(smc) ;
}
else {
/*
* reset the HPI, the Master and the BMUs
*/
outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
time = hwt_quick_read(smc) ;
}
/*
* set all pointers, frames etc
*/
smt_split_up_fifo(smc) ;
init_tx(smc) ;
init_rx(smc) ;
init_rbc(smc) ;
build_claim_beacon(smc,smc->mib.m[MAC0].fddiMACT_Req) ;
/* set RX threshold */
/* see Errata #SN2 Phantom receive overflow */
outpw(FM_A(FM_FRMTHR),14<<12) ; /* switch on */
/* set formac work mode */
outpw(FM_A(FM_MDREG1),MDR1INIT | FM_SELRA | smc->hw.fp.rx_mode) ;
outpw(FM_A(FM_MDREG2),smc->hw.fp.mdr2init) ;
outpw(FM_A(FM_MDREG3),smc->hw.fp.mdr3init) ;
outpw(FM_A(FM_FRSELREG),smc->hw.fp.frselreg_init) ;
/* set timer */
/*
* errata #22 fplus:
* T_MAX must not be FFFE
* or one of FFDF, FFB8, FF91 (-0x27 etc..)
*/
t_max = (u_short)(smc->mib.m[MAC0].fddiMACT_Max/32) ;
x = t_max/0x27 ;
x *= 0x27 ;
if ((t_max == 0xfffe) || (t_max - x == 0x16))
t_max-- ;
outpw(FM_A(FM_TMAX),(u_short)t_max) ;
/* BugFix for report #10204 */
if (smc->mib.m[MAC0].fddiMACTvxValue < (u_long) (- US2BCLK(52))) {
outpw(FM_A(FM_TVX), (u_short) (- US2BCLK(52))/255 & MB) ;
} else {
outpw(FM_A(FM_TVX),
(u_short)((smc->mib.m[MAC0].fddiMACTvxValue/255) & MB)) ;
}
outpw(FM_A(FM_CMDREG1),FM_ICLLS) ; /* clear s-frame lock */
outpw(FM_A(FM_CMDREG1),FM_ICLLA0) ; /* clear a-frame lock */
outpw(FM_A(FM_CMDREG1),FM_ICLLR); /* clear receive lock */
/* Auto unlock receice threshold for receive queue 1 and 2 */
outpw(FM_A(FM_UNLCKDLY),(0xff|(0xff<<8))) ;
rtm_init(smc) ; /* RT-Monitor */
if (!all) {
/*
* after 10ms, reset the BMUs and repair the rings
*/
hwt_wait_time(smc,time,MS2BCLK(10)) ;
outpd(ADDR(B0_R1_CSR),CSR_SET_RESET) ;
outpd(ADDR(B0_XA_CSR),CSR_SET_RESET) ;
outpd(ADDR(B0_XS_CSR),CSR_SET_RESET) ;
outp(ADDR(B0_CTRL), CTRL_HPI_CLR) ;
outpd(ADDR(B0_R1_CSR),CSR_CLR_RESET) ;
outpd(ADDR(B0_XA_CSR),CSR_CLR_RESET) ;
outpd(ADDR(B0_XS_CSR),CSR_CLR_RESET) ;
if (!smc->hw.hw_is_64bit) {
outpd(ADDR(B4_R1_F), RX_WATERMARK) ;
outpd(ADDR(B5_XA_F), TX_WATERMARK) ;
outpd(ADDR(B5_XS_F), TX_WATERMARK) ;
}
smc->hw.hw_state = STOPPED ;
mac_drv_repair_descr(smc) ;
}
smc->hw.hw_state = STARTED ;
return 0;
}
/*
* called by CFM
*/
void config_mux(struct s_smc *smc, int mux)
{
plc_config_mux(smc,mux) ;
SETMASK(FM_A(FM_MDREG1),FM_SELRA,FM_SELRA) ;
}
/*
* called by RMT
* enable CLAIM/BEACON interrupts
* (only called if these events are of interest, e.g. in DETECT state
* the interrupt must not be permanently enabled
* RMT calls this function periodically (timer driven polling)
*/
void sm_mac_check_beacon_claim(struct s_smc *smc)
{
/* set formac IMSK : 0 enables irq */
outpw(FM_A(FM_IMSK2U),~(mac_imsk2u | mac_beacon_imsk2u)) ;
/* the driver must receive the directed beacons */
formac_rcv_restart(smc) ;
process_receive(smc) ;
}
/*-------------------------- interface functions ----------------------------*/
/*
* control MAC layer (called by RMT)
*/
void sm_ma_control(struct s_smc *smc, int mode)
{
switch(mode) {
case MA_OFFLINE :
/* Add to make the MAC offline in RM0_ISOLATED state */
formac_offline(smc) ;
break ;
case MA_RESET :
(void)init_mac(smc,0) ;
break ;
case MA_BEACON :
formac_online(smc) ;
break ;
case MA_DIRECTED :
directed_beacon(smc) ;
break ;
case MA_TREQ :
/*
* no actions necessary, TREQ is already set
*/
break ;
}
}
int sm_mac_get_tx_state(struct s_smc *smc)
{
return (inpw(FM_A(FM_STMCHN))>>4) & 7;
}
/*
* multicast functions
*/
static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
struct fddi_addr *user,
struct fddi_addr *own,
int del, int can)
{
struct s_fpmc *tb ;
struct s_fpmc *slot ;
u_char *p ;
int i ;
/*
* set own = can(user)
*/
*own = *user ;
if (can) {
p = own->a ;
for (i = 0 ; i < 6 ; i++, p++)
*p = bitrev8(*p);
}
slot = NULL;
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->n) { /* not used */
if (!del && !slot) /* if !del save first free */
slot = tb ;
continue ;
}
if (!ether_addr_equal((char *)&tb->a, (char *)own))
continue ;
return tb;
}
return slot; /* return first free or NULL */
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;2)
void mac_clear_multicast(smc)
struct s_smc *smc ;
Function DOWNCALL (SMT, fplustm.c)
Clear all multicast entries
END_MANUAL_ENTRY()
*/
void mac_clear_multicast(struct s_smc *smc)
{
struct s_fpmc *tb ;
int i ;
smc->hw.fp.os_slots_used = 0 ; /* note the SMT addresses */
/* will not be deleted */
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->perm) {
tb->n = 0 ;
}
}
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;2)
int mac_add_multicast(smc,addr,can)
struct s_smc *smc ;
struct fddi_addr *addr ;
int can ;
Function DOWNCALL (SMC, fplustm.c)
Add an entry to the multicast table
Para addr pointer to a multicast address
can = 0: the multicast address has the physical format
= 1: the multicast address has the canonical format
| 0x80 permanent
Returns 0: success
1: address table full
Note After a 'driver reset' or a 'station set address' all
entries of the multicast table are cleared.
In this case the driver has to fill the multicast table again.
After the operating system dependent module filled
the multicast table it must call mac_update_multicast
to activate the new multicast addresses!
END_MANUAL_ENTRY()
*/
int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
{
SK_LOC_DECL(struct fddi_addr,own) ;
struct s_fpmc *tb ;
/*
* check if there are free table entries
*/
if (can & 0x80) {
if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) {
return 1;
}
}
else {
if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) {
return 1;
}
}
/*
* find empty slot
*/
if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
return 1;
tb->n++ ;
tb->a = own ;
tb->perm = (can & 0x80) ? 1 : 0 ;
if (can & 0x80)
smc->hw.fp.smt_slots_used++ ;
else
smc->hw.fp.os_slots_used++ ;
return 0;
}
/*
* mode
*/
#define RX_MODE_PROM 0x1
#define RX_MODE_ALL_MULTI 0x2
/*
BEGIN_MANUAL_ENTRY(if,func;others;2)
void mac_update_multicast(smc)
struct s_smc *smc ;
Function DOWNCALL (SMT, fplustm.c)
Update FORMAC multicast registers
END_MANUAL_ENTRY()
*/
void mac_update_multicast(struct s_smc *smc)
{
struct s_fpmc *tb ;
u_char *fu ;
int i ;
/*
* invalidate the CAM
*/
outpw(FM_A(FM_AFCMD),FM_IINV_CAM) ;
/*
* set the functional address
*/
if (smc->hw.fp.func_addr) {
fu = (u_char *) &smc->hw.fp.func_addr ;
outpw(FM_A(FM_AFMASK2),0xffff) ;
outpw(FM_A(FM_AFMASK1),(u_short) ~((fu[0] << 8) + fu[1])) ;
outpw(FM_A(FM_AFMASK0),(u_short) ~((fu[2] << 8) + fu[3])) ;
outpw(FM_A(FM_AFPERS),FM_VALID|FM_DA) ;
outpw(FM_A(FM_AFCOMP2), 0xc000) ;
outpw(FM_A(FM_AFCOMP1), 0x0000) ;
outpw(FM_A(FM_AFCOMP0), 0x0000) ;
outpw(FM_A(FM_AFCMD),FM_IWRITE_CAM) ;
}
/*
* set the mask and the personality register(s)
*/
outpw(FM_A(FM_AFMASK0),0xffff) ;
outpw(FM_A(FM_AFMASK1),0xffff) ;
outpw(FM_A(FM_AFMASK2),0xffff) ;
outpw(FM_A(FM_AFPERS),FM_VALID|FM_DA) ;
for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) {
if (tb->n) {
CHECK_CAM() ;
/*
* write the multicast address into the CAM
*/
outpw(FM_A(FM_AFCOMP2),
(u_short)((tb->a.a[0]<<8)+tb->a.a[1])) ;
outpw(FM_A(FM_AFCOMP1),
(u_short)((tb->a.a[2]<<8)+tb->a.a[3])) ;
outpw(FM_A(FM_AFCOMP0),
(u_short)((tb->a.a[4]<<8)+tb->a.a[5])) ;
outpw(FM_A(FM_AFCMD),FM_IWRITE_CAM) ;
}
}
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;3)
void mac_set_rx_mode(smc,mode)
struct s_smc *smc ;
int mode ;
Function DOWNCALL/INTERN (SMT, fplustm.c)
This function enables / disables the selected receive.
Don't call this function if the hardware module is
used -- use mac_drv_rx_mode() instead of.
Para mode = 1 RX_ENABLE_ALLMULTI enable all multicasts
2 RX_DISABLE_ALLMULTI disable "enable all multicasts"
3 RX_ENABLE_PROMISC enable promiscuous
4 RX_DISABLE_PROMISC disable promiscuous
5 RX_ENABLE_NSA enable reception of NSA frames
6 RX_DISABLE_NSA disable reception of NSA frames
Note The selected receive modes will be lost after 'driver reset'
or 'set station address'
END_MANUAL_ENTRY
*/
void mac_set_rx_mode(struct s_smc *smc, int mode)
{
switch (mode) {
case RX_ENABLE_ALLMULTI :
smc->hw.fp.rx_prom |= RX_MODE_ALL_MULTI ;
break ;
case RX_DISABLE_ALLMULTI :
smc->hw.fp.rx_prom &= ~RX_MODE_ALL_MULTI ;
break ;
case RX_ENABLE_PROMISC :
smc->hw.fp.rx_prom |= RX_MODE_PROM ;
break ;
case RX_DISABLE_PROMISC :
smc->hw.fp.rx_prom &= ~RX_MODE_PROM ;
break ;
case RX_ENABLE_NSA :
smc->hw.fp.nsa_mode = FM_MDAMA ;
smc->hw.fp.rx_mode = (smc->hw.fp.rx_mode & ~FM_ADDET) |
smc->hw.fp.nsa_mode ;
break ;
case RX_DISABLE_NSA :
smc->hw.fp.nsa_mode = FM_MRNNSAFNMA ;
smc->hw.fp.rx_mode = (smc->hw.fp.rx_mode & ~FM_ADDET) |
smc->hw.fp.nsa_mode ;
break ;
}
if (smc->hw.fp.rx_prom & RX_MODE_PROM) {
smc->hw.fp.rx_mode = FM_MLIMPROM ;
}
else if (smc->hw.fp.rx_prom & RX_MODE_ALL_MULTI) {
smc->hw.fp.rx_mode = smc->hw.fp.nsa_mode | FM_EXGPA0 ;
}
else
smc->hw.fp.rx_mode = smc->hw.fp.nsa_mode ;
SETMASK(FM_A(FM_MDREG1),smc->hw.fp.rx_mode,FM_ADDRX) ;
mac_update_multicast(smc) ;
}
/*
BEGIN_MANUAL_ENTRY(module;tests;3)
How to test the Restricted Token Monitor
----------------------------------------------------------------
o Insert a break point in the function rtm_irq()
o Remove all stations with a restricted token monitor from the
network.
o Connect a UPPS ISA or EISA station to the network.
o Give the FORMAC of UPPS station the command to send
restricted tokens until the ring becomes instable.
o Now connect your test client.
o The restricted token monitor should detect the restricted token,
and your break point will be reached.
o You can ovserve how the station will clean the ring.
END_MANUAL_ENTRY
*/
void rtm_irq(struct s_smc *smc)
{
outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ; /* clear IRQ */
if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) {
outpw(FM_A(FM_CMDREG1),FM_ICL) ; /* force claim */
DB_RMT("RMT: fddiPATHT_Rmode expired");
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
(u_long) FDDI_SMT_EVENT,
(u_long) FDDI_RTT, smt_get_event_word(smc));
}
outpw(ADDR(B2_RTM_CRTL),TIM_START) ; /* enable RTM monitoring */
}
static void rtm_init(struct s_smc *smc)
{
outpd(ADDR(B2_RTM_INI),0) ; /* timer = 0 */
outpw(ADDR(B2_RTM_CRTL),TIM_START) ; /* enable IRQ */
}
void rtm_set_timer(struct s_smc *smc)
{
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns",
(int)smc->mib.a[PATH0].fddiPATHT_Rmode);
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
static void smt_split_up_fifo(struct s_smc *smc)
{
/*
BEGIN_MANUAL_ENTRY(module;mem;1)
-------------------------------------------------------------
RECEIVE BUFFER MEMORY DIVERSION
-------------------------------------------------------------
R1_RxD == SMT_R1_RXD_COUNT
R2_RxD == SMT_R2_RXD_COUNT
SMT_R1_RXD_COUNT must be unequal zero
| R1_RxD R2_RxD |R1_RxD R2_RxD | R1_RxD R2_RxD
| x 0 | x 1-3 | x < 3
----------------------------------------------------------------------
| 63,75 kB | 54,75 | R1_RxD
rx queue 1 | RX_FIFO_SPACE | RX_LARGE_FIFO| ------------- * 63,75 kB
| | | R1_RxD+R2_RxD
----------------------------------------------------------------------
| | 9 kB | R2_RxD
rx queue 2 | 0 kB | RX_SMALL_FIFO| ------------- * 63,75 kB
| (not used) | | R1_RxD+R2_RxD
END_MANUAL_ENTRY
*/
if (SMT_R1_RXD_COUNT == 0) {
SMT_PANIC(smc,SMT_E0117, SMT_E0117_MSG) ;
}
switch(SMT_R2_RXD_COUNT) {
case 0:
smc->hw.fp.fifo.rx1_fifo_size = RX_FIFO_SPACE ;
smc->hw.fp.fifo.rx2_fifo_size = 0 ;
break ;
case 1:
case 2:
case 3:
smc->hw.fp.fifo.rx1_fifo_size = RX_LARGE_FIFO ;
smc->hw.fp.fifo.rx2_fifo_size = RX_SMALL_FIFO ;
break ;
default: /* this is not the real defaule */
smc->hw.fp.fifo.rx1_fifo_size = RX_FIFO_SPACE *
SMT_R1_RXD_COUNT/(SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) ;
smc->hw.fp.fifo.rx2_fifo_size = RX_FIFO_SPACE *
SMT_R2_RXD_COUNT/(SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) ;
break ;
}
/*
BEGIN_MANUAL_ENTRY(module;mem;1)
-------------------------------------------------------------
TRANSMIT BUFFER MEMORY DIVERSION
-------------------------------------------------------------
| no sync bw | sync bw available and | sync bw available and
| available | SynchTxMode = SPLIT | SynchTxMode = ALL
-----------------------------------------------------------------------
sync tx | 0 kB | 32 kB | 55 kB
queue | | TX_MEDIUM_FIFO | TX_LARGE_FIFO
-----------------------------------------------------------------------
async tx | 64 kB | 32 kB | 9 k
queue | TX_FIFO_SPACE| TX_MEDIUM_FIFO | TX_SMALL_FIFO
END_MANUAL_ENTRY
*/
/*
* set the tx mode bits
*/
if (smc->mib.a[PATH0].fddiPATHSbaPayload) {
#ifdef ESS
smc->hw.fp.fifo.fifo_config_mode |=
smc->mib.fddiESSSynchTxMode | SYNC_TRAFFIC_ON ;
#endif
}
else {
smc->hw.fp.fifo.fifo_config_mode &=
~(SEND_ASYNC_AS_SYNC|SYNC_TRAFFIC_ON) ;
}
/*
* split up the FIFO
*/
if (smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON) {
if (smc->hw.fp.fifo.fifo_config_mode & SEND_ASYNC_AS_SYNC) {
smc->hw.fp.fifo.tx_s_size = TX_LARGE_FIFO ;
smc->hw.fp.fifo.tx_a0_size = TX_SMALL_FIFO ;
}
else {
smc->hw.fp.fifo.tx_s_size = TX_MEDIUM_FIFO ;
smc->hw.fp.fifo.tx_a0_size = TX_MEDIUM_FIFO ;
}
}
else {
smc->hw.fp.fifo.tx_s_size = 0 ;
smc->hw.fp.fifo.tx_a0_size = TX_FIFO_SPACE ;
}
smc->hw.fp.fifo.rx1_fifo_start = smc->hw.fp.fifo.rbc_ram_start +
RX_FIFO_OFF ;
smc->hw.fp.fifo.tx_s_start = smc->hw.fp.fifo.rx1_fifo_start +
smc->hw.fp.fifo.rx1_fifo_size ;
smc->hw.fp.fifo.tx_a0_start = smc->hw.fp.fifo.tx_s_start +
smc->hw.fp.fifo.tx_s_size ;
smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start +
smc->hw.fp.fifo.tx_a0_size ;
DB_SMT("FIFO split: mode = %x", smc->hw.fp.fifo.fifo_config_mode);
DB_SMT("rbc_ram_start = %x rbc_ram_end = %x",
smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end);
DB_SMT("rx1_fifo_start = %x tx_s_start = %x",
smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start);
DB_SMT("tx_a0_start = %x rx2_fifo_start = %x",
smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start);
}
void formac_reinit_tx(struct s_smc *smc)
{
/*
* Split up the FIFO and reinitialize the MAC if synchronous
* bandwidth becomes available but no synchronous queue is
* configured.
*/
if (!smc->hw.fp.fifo.tx_s_size && smc->mib.a[PATH0].fddiPATHSbaPayload){
(void)init_mac(smc,0) ;
}
}
| linux-master | drivers/net/fddi/skfp/fplustm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
#define HWMTM
#ifndef FDDI
#define FDDI
#endif
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#include "h/skfbiinc.h"
/*
-------------------------------------------------------------
DOCUMENTATION
-------------------------------------------------------------
BEGIN_MANUAL_ENTRY(DOCUMENTATION)
T B D
END_MANUAL_ENTRY
*/
/*
-------------------------------------------------------------
LOCAL VARIABLES:
-------------------------------------------------------------
*/
#ifdef COMMON_MB_POOL
static SMbuf *mb_start;
static SMbuf *mb_free;
static int mb_init = FALSE ;
static int call_count;
#endif
/*
-------------------------------------------------------------
EXTERNE VARIABLES:
-------------------------------------------------------------
*/
#ifdef DEBUG
#ifndef DEBUG_BRD
extern struct smt_debug debug ;
#endif
#endif
#ifdef NDIS_OS2
extern u_char offDepth ;
extern u_char force_irq_pending ;
#endif
/*
-------------------------------------------------------------
LOCAL FUNCTIONS:
-------------------------------------------------------------
*/
static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
static void init_txd_ring(struct s_smc *smc);
static void init_rxd_ring(struct s_smc *smc);
static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
int count);
static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
static SMbuf* get_llc_rx(struct s_smc *smc);
static SMbuf* get_txd_mb(struct s_smc *smc);
static void mac_drv_clear_txd(struct s_smc *smc);
/*
-------------------------------------------------------------
EXTERNAL FUNCTIONS:
-------------------------------------------------------------
*/
/* The external SMT functions are listed in cmtdef.h */
extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
extern void mac_drv_fill_rxd(struct s_smc *smc);
extern void mac_drv_tx_complete(struct s_smc *smc,
volatile struct s_smt_fp_txd *txd);
extern void mac_drv_rx_complete(struct s_smc *smc,
volatile struct s_smt_fp_rxd *rxd,
int frag_count, int len);
extern void mac_drv_requeue_rxd(struct s_smc *smc,
volatile struct s_smt_fp_rxd *rxd,
int frag_count);
extern void mac_drv_clear_rxd(struct s_smc *smc,
volatile struct s_smt_fp_rxd *rxd, int frag_count);
#ifdef USE_OS_CPY
extern void hwm_cpy_rxd2mb(void);
extern void hwm_cpy_txd2mb(void);
#endif
#ifdef ALL_RX_COMPLETE
extern void mac_drv_all_receives_complete(void);
#endif
extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
#ifdef NDIS_OS2
extern void post_proc(void);
#else
extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
int flag);
#endif
extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
int la_len);
/*
-------------------------------------------------------------
PUBLIC FUNCTIONS:
-------------------------------------------------------------
*/
void process_receive(struct s_smc *smc);
void fddi_isr(struct s_smc *smc);
void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
void init_driver_fplus(struct s_smc *smc);
void mac_drv_rx_mode(struct s_smc *smc, int mode);
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
void mac_drv_clear_tx_queue(struct s_smc *smc);
void mac_drv_clear_rx_queue(struct s_smc *smc);
void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status);
void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status);
int mac_drv_init(struct s_smc *smc);
int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
int frame_status);
u_int mac_drv_check_space(void);
SMbuf* smt_get_mbuf(struct s_smc *smc);
#ifdef DEBUG
void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
#endif
/*
-------------------------------------------------------------
MACROS:
-------------------------------------------------------------
*/
#ifndef UNUSED
#ifdef lint
#define UNUSED(x) (x) = (x)
#else
#define UNUSED(x)
#endif
#endif
#ifdef USE_CAN_ADDR
#define MA smc->hw.fddi_canon_addr.a
#define GROUP_ADDR_BIT 0x01
#else
#define MA smc->hw.fddi_home_addr.a
#define GROUP_ADDR_BIT 0x80
#endif
#define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
#ifdef MB_OUTSIDE_SMC
#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
MAX_MBUF*sizeof(SMbuf))
#define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
#else
#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
#endif
/*
* define critical read for 16 Bit drivers
*/
#if defined(NDIS_OS2) || defined(ODI2)
#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
#else
#define CR_READ(var) (__le32)(var)
#endif
#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
IS_R1_C | IS_XA_C | IS_XS_C)
/*
-------------------------------------------------------------
INIT- AND SMT FUNCTIONS:
-------------------------------------------------------------
*/
/*
* BEGIN_MANUAL_ENTRY(mac_drv_check_space)
* u_int mac_drv_check_space()
*
* function DOWNCALL (drvsr.c)
* This function calculates the needed non virtual
* memory for MBufs, RxD and TxD descriptors etc.
* needed by the driver.
*
* return u_int memory in bytes
*
* END_MANUAL_ENTRY
*/
u_int mac_drv_check_space(void)
{
#ifdef MB_OUTSIDE_SMC
#ifdef COMMON_MB_POOL
call_count++ ;
if (call_count == 1) {
return EXT_VIRT_MEM;
}
else {
return EXT_VIRT_MEM_2;
}
#else
return EXT_VIRT_MEM;
#endif
#else
return 0;
#endif
}
/*
* BEGIN_MANUAL_ENTRY(mac_drv_init)
* void mac_drv_init(smc)
*
* function DOWNCALL (drvsr.c)
* In this function the hardware module allocates it's
* memory.
* The operating system dependent module should call
* mac_drv_init once, after the adatper is detected.
* END_MANUAL_ENTRY
*/
int mac_drv_init(struct s_smc *smc)
{
if (sizeof(struct s_smt_fp_rxd) % 16) {
SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
}
if (sizeof(struct s_smt_fp_txd) % 16) {
SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
}
/*
* get the required memory for the RxDs and TxDs
*/
if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
mac_drv_get_desc_mem(smc,(u_int)
(RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
return 1; /* no space the hwm modul can't work */
}
/*
* get the memory for the SMT MBufs
*/
#ifndef MB_OUTSIDE_SMC
smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
#else
#ifndef COMMON_MB_POOL
if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
MAX_MBUF*sizeof(SMbuf)))) {
return 1; /* no space the hwm modul can't work */
}
#else
if (!mb_start) {
if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
MAX_MBUF*sizeof(SMbuf)))) {
return 1; /* no space the hwm modul can't work */
}
}
#endif
#endif
return 0;
}
/*
* BEGIN_MANUAL_ENTRY(init_driver_fplus)
* init_driver_fplus(smc)
*
* Sets hardware modul specific values for the mode register 2
* (e.g. the byte alignment for the received frames, the position of the
* least significant byte etc.)
* END_MANUAL_ENTRY
*/
void init_driver_fplus(struct s_smc *smc)
{
smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
#ifdef PCI
smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
#endif
smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
#ifdef USE_CAN_ADDR
/* enable address bit swapping */
smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
#endif
}
static u_long init_descr_ring(struct s_smc *smc,
union s_fp_descr volatile *start,
int count)
{
int i ;
union s_fp_descr volatile *d1 ;
union s_fp_descr volatile *d2 ;
u_long phys ;
DB_GEN(3, "descr ring starts at = %p", start);
for (i=count-1, d1=start; i ; i--) {
d2 = d1 ;
d1++ ; /* descr is owned by the host */
d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d2->r.rxd_next = &d1->r ;
phys = mac_drv_virt2phys(smc,(void *)d1) ;
d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
}
DB_GEN(3, "descr ring ends at = %p", d1);
d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d1->r.rxd_next = &start->r ;
phys = mac_drv_virt2phys(smc,(void *)start) ;
d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
for (i=count, d1=start; i ; i--) {
DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
d1++;
}
return phys;
}
static void init_txd_ring(struct s_smc *smc)
{
struct s_smt_fp_txd volatile *ds ;
struct s_smt_tx_queue *queue ;
u_long phys ;
/*
* initialize the transmit descriptors
*/
ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
queue = smc->hw.fp.tx[QUEUE_A0] ;
DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_ASYNC_TXD_COUNT) ;
phys = le32_to_cpu(ds->txd_ntdadr) ;
ds++ ;
queue->tx_curr_put = queue->tx_curr_get = ds ;
ds-- ;
queue->tx_free = HWM_ASYNC_TXD_COUNT ;
queue->tx_used = 0 ;
outpd(ADDR(B5_XA_DA),phys) ;
ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
queue = smc->hw.fp.tx[QUEUE_S] ;
DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_SYNC_TXD_COUNT) ;
phys = le32_to_cpu(ds->txd_ntdadr) ;
ds++ ;
queue->tx_curr_put = queue->tx_curr_get = ds ;
queue->tx_free = HWM_SYNC_TXD_COUNT ;
queue->tx_used = 0 ;
outpd(ADDR(B5_XS_DA),phys) ;
}
static void init_rxd_ring(struct s_smc *smc)
{
struct s_smt_fp_rxd volatile *ds ;
struct s_smt_rx_queue *queue ;
u_long phys ;
/*
* initialize the receive descriptors
*/
ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
queue = smc->hw.fp.rx[QUEUE_R1] ;
DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
SMT_R1_RXD_COUNT) ;
phys = le32_to_cpu(ds->rxd_nrdadr) ;
ds++ ;
queue->rx_curr_put = queue->rx_curr_get = ds ;
queue->rx_free = SMT_R1_RXD_COUNT ;
queue->rx_used = 0 ;
outpd(ADDR(B4_R1_DA),phys) ;
}
/*
* BEGIN_MANUAL_ENTRY(init_fddi_driver)
* void init_fddi_driver(smc,mac_addr)
*
* initializes the driver and it's variables
*
* END_MANUAL_ENTRY
*/
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
{
SMbuf *mb ;
int i ;
init_board(smc,mac_addr) ;
(void)init_fplus(smc) ;
/*
* initialize the SMbufs for the SMT
*/
#ifndef COMMON_MB_POOL
mb = smc->os.hwm.mbuf_pool.mb_start ;
smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
for (i = 0; i < MAX_MBUF; i++) {
mb->sm_use_count = 1 ;
smt_free_mbuf(smc,mb) ;
mb++ ;
}
#else
mb = mb_start ;
if (!mb_init) {
mb_free = 0 ;
for (i = 0; i < MAX_MBUF; i++) {
mb->sm_use_count = 1 ;
smt_free_mbuf(smc,mb) ;
mb++ ;
}
mb_init = TRUE ;
}
#endif
/*
* initialize the other variables
*/
smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
smc->os.hwm.pass_llc_promisc = TRUE ;
smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
smc->os.hwm.detec_count = 0 ;
smc->os.hwm.rx_break = 0 ;
smc->os.hwm.rx_len_error = 0 ;
smc->os.hwm.isr_flag = FALSE ;
/*
* make sure that the start pointer is 16 byte aligned
*/
i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
if (i != 16) {
DB_GEN(3, "i = %d", i);
smc->os.hwm.descr_p = (union s_fp_descr volatile *)
((char *)smc->os.hwm.descr_p+i) ;
}
DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
init_txd_ring(smc) ;
init_rxd_ring(smc) ;
mac_drv_fill_rxd(smc) ;
init_plc(smc) ;
}
SMbuf *smt_get_mbuf(struct s_smc *smc)
{
register SMbuf *mb ;
#ifndef COMMON_MB_POOL
mb = smc->os.hwm.mbuf_pool.mb_free ;
#else
mb = mb_free ;
#endif
if (mb) {
#ifndef COMMON_MB_POOL
smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
#else
mb_free = mb->sm_next ;
#endif
mb->sm_off = 8 ;
mb->sm_use_count = 1 ;
}
DB_GEN(3, "get SMbuf: mb = %p", mb);
return mb; /* May be NULL */
}
void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
{
if (mb) {
mb->sm_use_count-- ;
DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
/*
* If the use_count is != zero the MBuf is queued
* more than once and must not queued into the
* free MBuf queue
*/
if (!mb->sm_use_count) {
DB_GEN(3, "free SMbuf: mb = %p", mb);
#ifndef COMMON_MB_POOL
mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
smc->os.hwm.mbuf_pool.mb_free = mb ;
#else
mb->sm_next = mb_free ;
mb_free = mb ;
#endif
}
}
else
SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
}
/*
* BEGIN_MANUAL_ENTRY(mac_drv_repair_descr)
* void mac_drv_repair_descr(smc)
*
* function called from SMT (HWM / hwmtm.c)
* The BMU is idle when this function is called.
* Mac_drv_repair_descr sets up the physical address
* for all receive and transmit queues where the BMU
* should continue.
* It may be that the BMU was reseted during a fragmented
* transfer. In this case there are some fragments which will
* never completed by the BMU. The OWN bit of this fragments
* must be switched to be owned by the host.
*
* Give a start command to the receive BMU.
* Start the transmit BMUs if transmit frames pending.
*
* END_MANUAL_ENTRY
*/
void mac_drv_repair_descr(struct s_smc *smc)
{
u_long phys ;
if (smc->hw.hw_state != STOPPED) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
return ;
}
/*
* repair tx queues: don't start
*/
phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
outpd(ADDR(B5_XA_DA),phys) ;
if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
outpd(ADDR(B0_XA_CSR),CSR_START) ;
}
phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
outpd(ADDR(B5_XS_DA),phys) ;
if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
outpd(ADDR(B0_XS_CSR),CSR_START) ;
}
/*
* repair rx queues
*/
phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
outpd(ADDR(B4_R1_DA),phys) ;
outpd(ADDR(B0_R1_CSR),CSR_START) ;
}
static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
{
int i ;
int tx_used ;
u_long phys ;
u_long tbctrl ;
struct s_smt_fp_txd volatile *t ;
SK_UNUSED(smc) ;
t = queue->tx_curr_get ;
tx_used = queue->tx_used ;
for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
t = t->txd_next ;
}
phys = le32_to_cpu(t->txd_ntdadr) ;
t = queue->tx_curr_get ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
tbctrl = le32_to_cpu(t->txd_tbctrl) ;
if (tbctrl & BMU_OWN) {
if (tbctrl & BMU_STF) {
break ; /* exit the loop */
}
else {
/*
* repair the descriptor
*/
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
}
}
phys = le32_to_cpu(t->txd_ntdadr) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
tx_used-- ;
}
return phys;
}
/*
* Repairs the receive descriptor ring and returns the physical address
* where the BMU should continue working.
*
* o The physical address where the BMU was stopped has to be
* determined. This is the next RxD after rx_curr_get with an OWN
* bit set.
* o The BMU should start working at beginning of the next frame.
* RxDs with an OWN bit set but with a reset STF bit should be
* skipped and owned by the driver (OWN = 0).
*/
static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
{
int i ;
int rx_used ;
u_long phys ;
u_long rbctrl ;
struct s_smt_fp_rxd volatile *r ;
SK_UNUSED(smc) ;
r = queue->rx_curr_get ;
rx_used = queue->rx_used ;
for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
r = r->rxd_next ;
}
phys = le32_to_cpu(r->rxd_nrdadr) ;
r = queue->rx_curr_get ;
while (rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
if (rbctrl & BMU_OWN) {
if (rbctrl & BMU_STF) {
break ; /* exit the loop */
}
else {
/*
* repair the descriptor
*/
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
}
}
phys = le32_to_cpu(r->rxd_nrdadr) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
rx_used-- ;
}
return phys;
}
/*
-------------------------------------------------------------
INTERRUPT SERVICE ROUTINE:
-------------------------------------------------------------
*/
/*
* BEGIN_MANUAL_ENTRY(fddi_isr)
* void fddi_isr(smc)
*
* function DOWNCALL (drvsr.c)
* interrupt service routine, handles the interrupt requests
* generated by the FDDI adapter.
*
* NOTE: The operating system dependent module must guarantee that the
* interrupts of the adapter are disabled when it calls fddi_isr.
*
* About the USE_BREAK_ISR mechanismn:
*
* The main requirement of this mechanismn is to force an timer IRQ when
* leaving process_receive() with leave_isr set. process_receive() may
* be called at any time from anywhere!
* To be sure we don't miss such event we set 'force_irq' per default.
* We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND
* 'force_irq' are set. 'force_irq' may be reset if a receive complete
* IRQ is pending.
*
* END_MANUAL_ENTRY
*/
void fddi_isr(struct s_smc *smc)
{
u_long is ; /* ISR source */
u_short stu, stl ;
SMbuf *mb ;
#ifdef USE_BREAK_ISR
int force_irq ;
#endif
#ifdef ODI2
if (smc->os.hwm.rx_break) {
mac_drv_fill_rxd(smc) ;
if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
smc->os.hwm.rx_break = 0 ;
process_receive(smc) ;
}
else {
smc->os.hwm.detec_count = 0 ;
smt_force_irq(smc) ;
}
}
#endif
smc->os.hwm.isr_flag = TRUE ;
#ifdef USE_BREAK_ISR
force_irq = TRUE ;
if (smc->os.hwm.leave_isr) {
smc->os.hwm.leave_isr = FALSE ;
process_receive(smc) ;
}
#endif
while ((is = GET_ISR() & ISR_MASK)) {
NDD_TRACE("CH0B",is,0,0) ;
DB_GEN(7, "ISA = 0x%lx", is);
if (is & IMASK_SLOW) {
NDD_TRACE("CH1b",is,0,0) ;
if (is & IS_PLINT1) { /* PLC1 */
plc1_irq(smc) ;
}
if (is & IS_PLINT2) { /* PLC2 */
plc2_irq(smc) ;
}
if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */
stu = inpw(FM_A(FM_ST1U)) ;
stl = inpw(FM_A(FM_ST1L)) ;
DB_GEN(6, "Slow transmit complete");
mac1_irq(smc,stu,stl) ;
}
if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */
stu= inpw(FM_A(FM_ST2U)) ;
stl= inpw(FM_A(FM_ST2L)) ;
DB_GEN(6, "Slow receive complete");
DB_GEN(7, "stl = %x : stu = %x", stl, stu);
mac2_irq(smc,stu,stl) ;
}
if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */
stu= inpw(FM_A(FM_ST3U)) ;
stl= inpw(FM_A(FM_ST3L)) ;
DB_GEN(6, "FORMAC Mode Register 3");
mac3_irq(smc,stu,stl) ;
}
if (is & IS_TIMINT) { /* Timer 82C54-2 */
timer_irq(smc) ;
#ifdef NDIS_OS2
force_irq_pending = 0 ;
#endif
/*
* out of RxD detection
*/
if (++smc->os.hwm.detec_count > 4) {
/*
* check out of RxD condition
*/
process_receive(smc) ;
}
}
if (is & IS_TOKEN) { /* Restricted Token Monitor */
rtm_irq(smc) ;
}
if (is & IS_R1_P) { /* Parity error rx queue 1 */
/* clear IRQ */
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
}
if (is & IS_R1_C) { /* Encoding error rx queue 1 */
/* clear IRQ */
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
}
if (is & IS_XA_C) { /* Encoding error async tx q */
/* clear IRQ */
outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
}
if (is & IS_XS_C) { /* Encoding error sync tx q */
/* clear IRQ */
outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
}
}
/*
* Fast Tx complete Async/Sync Queue (BMU service)
*/
if (is & (IS_XS_F|IS_XA_F)) {
DB_GEN(6, "Fast tx complete queue");
/*
* clear IRQ, Note: no IRQ is lost, because
* we always service both queues
*/
outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
mac_drv_clear_txd(smc) ;
llc_restart_tx(smc) ;
}
/*
* Fast Rx Complete (BMU service)
*/
if (is & IS_R1_F) {
DB_GEN(6, "Fast receive complete");
/* clear IRQ */
#ifndef USE_BREAK_ISR
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
process_receive(smc) ;
#else
process_receive(smc) ;
if (smc->os.hwm.leave_isr) {
force_irq = FALSE ;
} else {
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
process_receive(smc) ;
}
#endif
}
#ifndef NDIS_OS2
while ((mb = get_llc_rx(smc))) {
smt_to_llc(smc,mb) ;
}
#else
if (offDepth)
post_proc() ;
while (!offDepth && (mb = get_llc_rx(smc))) {
smt_to_llc(smc,mb) ;
}
if (!offDepth && smc->os.hwm.rx_break) {
process_receive(smc) ;
}
#endif
if (smc->q.ev_get != smc->q.ev_put) {
NDD_TRACE("CH2a",0,0,0) ;
ev_dispatcher(smc) ;
}
#ifdef NDIS_OS2
post_proc() ;
if (offDepth) { /* leave fddi_isr because */
break ; /* indications not allowed */
}
#endif
#ifdef USE_BREAK_ISR
if (smc->os.hwm.leave_isr) {
break ; /* leave fddi_isr */
}
#endif
/* NOTE: when the isr is left, no rx is pending */
} /* end of interrupt source polling loop */
#ifdef USE_BREAK_ISR
if (smc->os.hwm.leave_isr && force_irq) {
smt_force_irq(smc) ;
}
#endif
smc->os.hwm.isr_flag = FALSE ;
NDD_TRACE("CH0E",0,0,0) ;
}
/*
-------------------------------------------------------------
RECEIVE FUNCTIONS:
-------------------------------------------------------------
*/
#ifndef NDIS_OS2
/*
* BEGIN_MANUAL_ENTRY(mac_drv_rx_mode)
* void mac_drv_rx_mode(smc,mode)
*
* function DOWNCALL (fplus.c)
* Corresponding to the parameter mode, the operating system
* dependent module can activate several receive modes.
*
* para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts
* = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts"
* = 3: RX_ENABLE_PROMISC enable promiscuous
* = 4: RX_DISABLE_PROMISC disable promiscuous
* = 5: RX_ENABLE_NSA enable rec. of all NSA frames
* (disabled after 'driver reset' & 'set station address')
* = 6: RX_DISABLE_NSA disable rec. of all NSA frames
*
* = 21: RX_ENABLE_PASS_SMT ( see description )
* = 22: RX_DISABLE_PASS_SMT ( " " )
* = 23: RX_ENABLE_PASS_NSA ( " " )
* = 24: RX_DISABLE_PASS_NSA ( " " )
* = 25: RX_ENABLE_PASS_DB ( " " )
* = 26: RX_DISABLE_PASS_DB ( " " )
* = 27: RX_DISABLE_PASS_ALL ( " " )
* = 28: RX_DISABLE_LLC_PROMISC ( " " )
* = 29: RX_ENABLE_LLC_PROMISC ( " " )
*
*
* RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT
*
* If the operating system dependent module activates the
* mode RX_ENABLE_PASS_SMT, the hardware module
* duplicates all SMT frames with the frame control
* FC_SMT_INFO and passes them to the LLC receive channel
* by calling mac_drv_rx_init.
* The SMT Frames which are sent by the local SMT and the NSA
* frames whose A- and C-Indicator is not set are also duplicated
* and passed.
* The receive mode RX_DISABLE_PASS_SMT disables the passing
* of SMT frames.
*
* RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA
*
* If the operating system dependent module activates the
* mode RX_ENABLE_PASS_NSA, the hardware module
* duplicates all NSA frames with frame control FC_SMT_NSA
* and a set A-Indicator and passed them to the LLC
* receive channel by calling mac_drv_rx_init.
* All NSA Frames which are sent by the local SMT
* are also duplicated and passed.
* The receive mode RX_DISABLE_PASS_NSA disables the passing
* of NSA frames with the A- or C-Indicator set.
*
* NOTE: For fear that the hardware module receives NSA frames with
* a reset A-Indicator, the operating system dependent module
* has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA
* before activate the RX_ENABLE_PASS_NSA mode and after every
* 'driver reset' and 'set station address'.
*
* RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB
*
* If the operating system dependent module activates the
* mode RX_ENABLE_PASS_DB, direct BEACON frames
* (FC_BEACON frame control) are passed to the LLC receive
* channel by mac_drv_rx_init.
* The receive mode RX_DISABLE_PASS_DB disables the passing
* of direct BEACON frames.
*
* RX_DISABLE_PASS_ALL
*
* Disables all special receives modes. It is equal to
* call mac_drv_set_rx_mode successively with the
* parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT,
* RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB.
*
* RX_ENABLE_LLC_PROMISC
*
* (default) all received LLC frames and all SMT/NSA/DBEACON
* frames depending on the attitude of the flags
* PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the
* LLC layer
*
* RX_DISABLE_LLC_PROMISC
*
* all received SMT/NSA/DBEACON frames depending on the
* attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON
* will be delivered to the LLC layer.
* all received LLC frames with a directed address, Multicast
* or Broadcast address will be delivered to the LLC
* layer too.
*
* END_MANUAL_ENTRY
*/
void mac_drv_rx_mode(struct s_smc *smc, int mode)
{
switch(mode) {
case RX_ENABLE_PASS_SMT:
smc->os.hwm.pass_SMT = TRUE ;
break ;
case RX_DISABLE_PASS_SMT:
smc->os.hwm.pass_SMT = FALSE ;
break ;
case RX_ENABLE_PASS_NSA:
smc->os.hwm.pass_NSA = TRUE ;
break ;
case RX_DISABLE_PASS_NSA:
smc->os.hwm.pass_NSA = FALSE ;
break ;
case RX_ENABLE_PASS_DB:
smc->os.hwm.pass_DB = TRUE ;
break ;
case RX_DISABLE_PASS_DB:
smc->os.hwm.pass_DB = FALSE ;
break ;
case RX_DISABLE_PASS_ALL:
smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
smc->os.hwm.pass_DB = FALSE ;
smc->os.hwm.pass_llc_promisc = TRUE ;
mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
break ;
case RX_DISABLE_LLC_PROMISC:
smc->os.hwm.pass_llc_promisc = FALSE ;
break ;
case RX_ENABLE_LLC_PROMISC:
smc->os.hwm.pass_llc_promisc = TRUE ;
break ;
case RX_ENABLE_ALLMULTI:
case RX_DISABLE_ALLMULTI:
case RX_ENABLE_PROMISC:
case RX_DISABLE_PROMISC:
case RX_ENABLE_NSA:
case RX_DISABLE_NSA:
default:
mac_set_rx_mode(smc,mode) ;
break ;
}
}
#endif /* ifndef NDIS_OS2 */
/*
* process receive queue
*/
void process_receive(struct s_smc *smc)
{
int i ;
int n ;
int frag_count ; /* number of RxDs of the curr rx buf */
int used_frags ; /* number of RxDs of the curr frame */
struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */
struct s_smt_fp_rxd volatile *r ; /* rxd pointer */
struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */
u_long rbctrl ; /* receive buffer control word */
u_long rfsw ; /* receive frame status word */
u_short rx_used ;
u_char far *virt ;
char far *data ;
SMbuf *mb ;
u_char fc ; /* Frame control */
int len ; /* Frame length */
smc->os.hwm.detec_count = 0 ;
queue = smc->hw.fp.rx[QUEUE_R1] ;
NDD_TRACE("RHxB",0,0,0) ;
for ( ; ; ) {
r = queue->rx_curr_get ;
rx_used = queue->rx_used ;
frag_count = 0 ;
#ifdef USE_BREAK_ISR
if (smc->os.hwm.leave_isr) {
goto rx_end ;
}
#endif
#ifdef NDIS_OS2
if (offDepth) {
smc->os.hwm.rx_break = 1 ;
goto rx_end ;
}
smc->os.hwm.rx_break = 0 ;
#endif
#ifdef ODI2
if (smc->os.hwm.rx_break) {
goto rx_end ;
}
#endif
n = 0 ;
do {
DB_RX(5, "Check RxD %p for OWN and EOF", r);
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
if (rbctrl & BMU_OWN) {
NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
DB_RX(4, "End of RxDs");
goto rx_end ;
}
/*
* out of RxD detection
*/
if (!rx_used) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
/* Either we don't have an RxD or all
* RxDs are filled. Therefore it's allowed
* for to set the STOPPED flag */
smc->hw.hw_state = STOPPED ;
mac_drv_clear_rx_queue(smc) ;
smc->hw.hw_state = STARTED ;
mac_drv_fill_rxd(smc) ;
smc->os.hwm.detec_count = 0 ;
goto rx_end ;
}
rfsw = le32_to_cpu(r->rxd_rfsw) ;
if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
/*
* The BMU_STF bit is deleted, 1 frame is
* placed into more than 1 rx buffer
*
* skip frame by setting the rx len to 0
*
* if fragment count == 0
* The missing STF bit belongs to the
* current frame, search for the
* EOF bit to complete the frame
* else
* the fragment belongs to the next frame,
* exit the loop and process the frame
*/
SK_BREAK() ;
rfsw = 0 ;
if (frag_count) {
break ;
}
}
n += rbctrl & 0xffff ;
r = r->rxd_next ;
frag_count++ ;
rx_used-- ;
} while (!(rbctrl & BMU_EOF)) ;
used_frags = frag_count ;
DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
/* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
/* BMU_ST_BUF will not be changed by the ASIC */
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
DB_RX(5, "Check STF bit in %p", r);
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
frag_count++ ;
rx_used-- ;
}
DB_RX(5, "STF bit found");
/*
* The received frame is finished for the process receive
*/
rxd = queue->rx_curr_get ;
queue->rx_curr_get = r ;
queue->rx_free += frag_count ;
queue->rx_used = rx_used ;
/*
* ASIC Errata no. 7 (STF - Bit Bug)
*/
rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
DB_RX(5, "dma_complete for RxD %p", r);
dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
}
smc->hw.fp.err_stats.err_valid++ ;
smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
/* the length of the data including the FC */
len = (rfsw & RD_LENGTH) - 4 ;
DB_RX(4, "frame length = %d", len);
/*
* check the frame_length and all error flags
*/
if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
if (rfsw & RD_S_MSRABT) {
DB_RX(2, "Frame aborted by the FORMAC");
smc->hw.fp.err_stats.err_abort++ ;
}
/*
* check frame status
*/
if (rfsw & RD_S_SEAC2) {
DB_RX(2, "E-Indicator set");
smc->hw.fp.err_stats.err_e_indicator++ ;
}
if (rfsw & RD_S_SFRMERR) {
DB_RX(2, "CRC error");
smc->hw.fp.err_stats.err_crc++ ;
}
if (rfsw & RX_FS_IMPL) {
DB_RX(2, "Implementer frame");
smc->hw.fp.err_stats.err_imp_frame++ ;
}
goto abort_frame ;
}
if (len > FDDI_RAW_MTU-4) {
DB_RX(2, "Frame too long error");
smc->hw.fp.err_stats.err_too_long++ ;
goto abort_frame ;
}
/*
* SUPERNET 3 Bug: FORMAC delivers status words
* of aborted frames to the BMU
*/
if (len <= 4) {
DB_RX(2, "Frame length = 0");
goto abort_frame ;
}
if (len != (n-4)) {
DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
smc->os.hwm.rx_len_error++ ;
goto abort_frame ;
}
/*
* Check SA == MA
*/
virt = (u_char far *) rxd->rxd_virt ;
DB_RX(2, "FC = %x", *virt);
if (virt[12] == MA[5] &&
virt[11] == MA[4] &&
virt[10] == MA[3] &&
virt[9] == MA[2] &&
virt[8] == MA[1] &&
(virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
goto abort_frame ;
}
/*
* test if LLC frame
*/
if (rfsw & RX_FS_LLC) {
/*
* if pass_llc_promisc is disable
* if DA != Multicast or Broadcast or DA!=MA
* abort the frame
*/
if (!smc->os.hwm.pass_llc_promisc) {
if(!(virt[1] & GROUP_ADDR_BIT)) {
if (virt[6] != MA[5] ||
virt[5] != MA[4] ||
virt[4] != MA[3] ||
virt[3] != MA[2] ||
virt[2] != MA[1] ||
virt[1] != MA[0]) {
DB_RX(2, "DA != MA and not multi- or broadcast");
goto abort_frame ;
}
}
}
/*
* LLC frame received
*/
DB_RX(4, "LLC - receive");
mac_drv_rx_complete(smc,rxd,frag_count,len) ;
}
else {
if (!(mb = smt_get_mbuf(smc))) {
smc->hw.fp.err_stats.err_no_buf++ ;
DB_RX(4, "No SMbuf; receive terminated");
goto abort_frame ;
}
data = smtod(mb,char *) - 1 ;
/*
* copy the frame into a SMT_MBuf
*/
#ifdef USE_OS_CPY
hwm_cpy_rxd2mb(rxd,data,len) ;
#else
for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
DB_RX(6, "cp SMT frame to mb: len = %d", n);
memcpy(data,r->rxd_virt,n) ;
data += n ;
}
data = smtod(mb,char *) - 1 ;
#endif
fc = *(char *)mb->sm_data = *data ;
mb->sm_len = len - 1 ; /* len - fc */
data++ ;
/*
* SMT frame received
*/
switch(fc) {
case FC_SMT_INFO :
smc->hw.fp.err_stats.err_smt_frame++ ;
DB_RX(5, "SMT frame received");
if (smc->os.hwm.pass_SMT) {
DB_RX(5, "pass SMT frame");
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
break ;
case FC_SMT_NSA :
smc->hw.fp.err_stats.err_smt_frame++ ;
DB_RX(5, "SMT frame received");
/* if pass_NSA set pass the NSA frame or */
/* pass_SMT set and the A-Indicator */
/* is not set, pass the NSA frame */
if (smc->os.hwm.pass_NSA ||
(smc->os.hwm.pass_SMT &&
!(rfsw & A_INDIC))) {
DB_RX(5, "pass SMT frame");
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
break ;
case FC_BEACON :
if (smc->os.hwm.pass_DB) {
DB_RX(5, "pass DB frame");
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_free_mbuf(smc,mb) ;
break ;
default :
/*
* unknown FC abort the frame
*/
DB_RX(2, "unknown FC error");
smt_free_mbuf(smc,mb) ;
DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count) ;
if ((fc & 0xf0) == FC_MAC)
smc->hw.fp.err_stats.err_mac_frame++ ;
else
smc->hw.fp.err_stats.err_imp_frame++ ;
break ;
}
}
DB_RX(3, "next RxD is %p", queue->rx_curr_get);
NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
continue ;
/*--------------------------------------------------------------------*/
abort_frame:
DB_RX(5, "requeue RxD");
mac_drv_requeue_rxd(smc,rxd,frag_count) ;
DB_RX(3, "next RxD is %p", queue->rx_curr_get);
NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
}
rx_end:
#ifdef ALL_RX_COMPLETE
mac_drv_all_receives_complete(smc) ;
#endif
return ; /* lint bug: needs return detect end of function */
}
static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
{
u_char fc ;
DB_RX(4, "send a queued frame to the llc layer");
smc->os.hwm.r.len = mb->sm_len ;
smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
fc = *smc->os.hwm.r.mb_pos ;
(void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
smt_free_mbuf(smc,mb) ;
}
/*
* BEGIN_MANUAL_ENTRY(hwm_rx_frag)
* void hwm_rx_frag(smc,virt,phys,len,frame_status)
*
* function MACRO (hardware module, hwmtm.h)
* This function calls dma_master for preparing the
* system hardware for the DMA transfer and initializes
* the current RxD with the length and the physical and
* virtual address of the fragment. Furthermore, it sets the
* STF and EOF bits depending on the frame status byte,
* switches the OWN flag of the RxD, so that it is owned by the
* adapter and issues an rx_start.
*
* para virt virtual pointer to the fragment
* len the length of the fragment
* frame_status status of the frame, see design description
*
* NOTE: It is possible to call this function with a fragment length
* of zero.
*
* END_MANUAL_ENTRY
*/
void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status)
{
struct s_smt_fp_rxd volatile *r ;
__le32 rbctrl;
NDD_TRACE("RHfB",virt,len,frame_status) ;
DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
r->rxd_virt = virt ;
r->rxd_rbadr = cpu_to_le32(phys) ;
rbctrl = cpu_to_le32( (((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG))<<26) |
(((u_long) frame_status & FIRST_FRAG) << 21) |
BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
r->rxd_rbctrl = rbctrl ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
outpd(ADDR(B0_R1_CSR),CSR_START) ;
smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
}
/*
* BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue)
*
* void mac_drv_clear_rx_queue(smc)
* struct s_smc *smc ;
*
* function DOWNCALL (hardware module, hwmtm.c)
* mac_drv_clear_rx_queue is called by the OS-specific module
* after it has issued a card_stop.
* In this case, the frames in the receive queue are obsolete and
* should be removed. For removing mac_drv_clear_rx_queue
* calls dma_master for each RxD and mac_drv_clear_rxd for each
* receive buffer.
*
* NOTE: calling sequence card_stop:
* CLI_FBI(), card_stop(),
* mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
*
* NOTE: The caller is responsible that the BMUs are idle
* when this function is called.
*
* END_MANUAL_ENTRY
*/
void mac_drv_clear_rx_queue(struct s_smc *smc)
{
struct s_smt_fp_rxd volatile *r ;
struct s_smt_fp_rxd volatile *next_rxd ;
struct s_smt_rx_queue *queue ;
int frag_count ;
int i ;
if (smc->hw.hw_state != STOPPED) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
return ;
}
queue = smc->hw.fp.rx[QUEUE_R1] ;
DB_RX(5, "clear_rx_queue");
/*
* dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
*/
r = queue->rx_curr_get ;
while (queue->rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
DB_RX(5, "switch OWN bit of RxD 0x%p", r);
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
frag_count = 1 ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (r != queue->rx_curr_put &&
!(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
DB_RX(5, "Check STF bit in %p", r);
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
frag_count++ ;
}
DB_RX(5, "STF bit found");
next_rxd = r ;
for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
DB_RX(5, "dma_complete for RxD %p", r);
dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
}
DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
queue->rx_curr_get, frag_count);
mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
queue->rx_curr_get = next_rxd ;
queue->rx_used -= frag_count ;
queue->rx_free += frag_count ;
}
}
/*
-------------------------------------------------------------
SEND FUNCTIONS:
-------------------------------------------------------------
*/
/*
* BEGIN_MANUAL_ENTRY(hwm_tx_init)
* int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status)
*
* function DOWN_CALL (hardware module, hwmtm.c)
* hwm_tx_init checks if the frame can be sent through the
* corresponding send queue.
*
* para fc the frame control. To determine through which
* send queue the frame should be transmitted.
* 0x50 - 0x57: asynchronous LLC frame
* 0xD0 - 0xD7: synchronous LLC frame
* 0x41, 0x4F: SMT frame to the network
* 0x42: SMT frame to the network and to the local SMT
* 0x43: SMT frame to the local SMT
* frag_count count of the fragments for this frame
* frame_len length of the frame
* frame_status status of the frame, the send queue bit is already
* specified
*
* return frame_status
*
* END_MANUAL_ENTRY
*/
int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
int frame_status)
{
NDD_TRACE("THiB",fc,frag_count,frame_len) ;
smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
smc->os.hwm.tx_len = frame_len ;
DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
frame_status |= LAN_TX ;
}
else {
switch (fc) {
case FC_SMT_INFO :
case FC_SMT_NSA :
frame_status |= LAN_TX ;
break ;
case FC_SMT_LOC :
frame_status |= LOC_TX ;
break ;
case FC_SMT_LAN_LOC :
frame_status |= LAN_TX | LOC_TX ;
break ;
default :
SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
}
}
if (!smc->hw.mac_ring_is_up) {
frame_status &= ~LAN_TX ;
frame_status |= RING_DOWN ;
DB_TX(2, "Ring is down: terminate LAN_TX");
}
if (frag_count > smc->os.hwm.tx_p->tx_free) {
#ifndef NDIS_OS2
mac_drv_clear_txd(smc) ;
if (frag_count > smc->os.hwm.tx_p->tx_free) {
DB_TX(2, "Out of TxDs, terminate LAN_TX");
frame_status &= ~LAN_TX ;
frame_status |= OUT_OF_TXD ;
}
#else
DB_TX(2, "Out of TxDs, terminate LAN_TX");
frame_status &= ~LAN_TX ;
frame_status |= OUT_OF_TXD ;
#endif
}
DB_TX(3, "frame_status = %x", frame_status);
NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
return frame_status;
}
/*
* BEGIN_MANUAL_ENTRY(hwm_tx_frag)
* void hwm_tx_frag(smc,virt,phys,len,frame_status)
*
* function DOWNCALL (hardware module, hwmtm.c)
* If the frame should be sent to the LAN, this function calls
* dma_master, fills the current TxD with the virtual and the
* physical address, sets the STF and EOF bits dependent on
* the frame status, and requests the BMU to start the
* transmit.
* If the frame should be sent to the local SMT, an SMT_MBuf
* is allocated if the FIRST_FRAG bit is set in the frame_status.
* The fragment of the frame is copied into the SMT MBuf.
* The function smt_received_pack is called if the LAST_FRAG
* bit is set in the frame_status word.
*
* para virt virtual pointer to the fragment
* len the length of the fragment
* frame_status status of the frame, see design description
*
* return nothing returned, no parameter is modified
*
* NOTE: It is possible to invoke this macro with a fragment length
* of zero.
*
* END_MANUAL_ENTRY
*/
void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status)
{
struct s_smt_fp_txd volatile *t ;
struct s_smt_tx_queue *queue ;
__le32 tbctrl ;
queue = smc->os.hwm.tx_p ;
NDD_TRACE("THfB",virt,len,frame_status) ;
/* Bug fix: AF / May 31 1999 (#missing)
* snmpinfo problem reported by IBM is caused by invalid
* t-pointer (txd) if LAN_TX is not set but LOC_TX only.
* Set: t = queue->tx_curr_put here !
*/
t = queue->tx_curr_put ;
DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
if (frame_status & LAN_TX) {
/* '*t' is already defined */
DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
t->txd_virt = virt ;
t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
t->txd_tbadr = cpu_to_le32(phys) ;
tbctrl = cpu_to_le32((((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
BMU_OWN|BMU_CHECK |len) ;
t->txd_tbctrl = tbctrl ;
#ifndef AIX
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
outpd(queue->tx_bmu_ctl,CSR_START) ;
#else /* ifndef AIX */
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
if (frame_status & QUEUE_A0) {
outpd(ADDR(B0_XA_CSR),CSR_START) ;
}
else {
outpd(ADDR(B0_XS_CSR),CSR_START) ;
}
#endif
queue->tx_free-- ;
queue->tx_used++ ;
queue->tx_curr_put = t->txd_next ;
if (frame_status & LAST_FRAG) {
smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
}
}
if (frame_status & LOC_TX) {
DB_TX(3, "LOC_TX:");
if (frame_status & FIRST_FRAG) {
if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
smc->hw.fp.err_stats.err_no_buf++ ;
DB_TX(4, "No SMbuf; transmit terminated");
}
else {
smc->os.hwm.tx_data =
smtod(smc->os.hwm.tx_mb,char *) - 1 ;
#ifdef USE_OS_CPY
#ifdef PASS_1ST_TXD_2_TX_COMP
hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
smc->os.hwm.tx_len) ;
#endif
#endif
}
}
if (smc->os.hwm.tx_mb) {
#ifndef USE_OS_CPY
DB_TX(3, "copy fragment into MBuf");
memcpy(smc->os.hwm.tx_data,virt,len) ;
smc->os.hwm.tx_data += len ;
#endif
if (frame_status & LAST_FRAG) {
#ifdef USE_OS_CPY
#ifndef PASS_1ST_TXD_2_TX_COMP
/*
* hwm_cpy_txd2mb(txd,data,len) copies 'len'
* bytes from the virtual pointer in 'rxd'
* to 'data'. The virtual pointer of the
* os-specific tx-buffer should be written
* in the LAST txd.
*/
hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
smc->os.hwm.tx_len) ;
#endif /* nPASS_1ST_TXD_2_TX_COMP */
#endif /* USE_OS_CPY */
smc->os.hwm.tx_data =
smtod(smc->os.hwm.tx_mb,char *) - 1 ;
*(char *)smc->os.hwm.tx_mb->sm_data =
*smc->os.hwm.tx_data ;
smc->os.hwm.tx_data++ ;
smc->os.hwm.tx_mb->sm_len =
smc->os.hwm.tx_len - 1 ;
DB_TX(3, "pass LLC frame to SMT");
smt_received_pack(smc,smc->os.hwm.tx_mb,
RD_FS_LOCAL) ;
}
}
}
NDD_TRACE("THfE",t,queue->tx_free,0) ;
}
/*
* queues a receive for later send
*/
static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
{
DB_GEN(4, "queue_llc_rx: mb = %p", mb);
smc->os.hwm.queued_rx_frames++ ;
mb->sm_next = (SMbuf *)NULL ;
if (smc->os.hwm.llc_rx_pipe == NULL) {
smc->os.hwm.llc_rx_pipe = mb ;
}
else {
smc->os.hwm.llc_rx_tail->sm_next = mb ;
}
smc->os.hwm.llc_rx_tail = mb ;
/*
* force an timer IRQ to receive the data
*/
if (!smc->os.hwm.isr_flag) {
smt_force_irq(smc) ;
}
}
/*
* get a SMbuf from the llc_rx_queue
*/
static SMbuf *get_llc_rx(struct s_smc *smc)
{
SMbuf *mb ;
if ((mb = smc->os.hwm.llc_rx_pipe)) {
smc->os.hwm.queued_rx_frames-- ;
smc->os.hwm.llc_rx_pipe = mb->sm_next ;
}
DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
return mb;
}
/*
* queues a transmit SMT MBuf during the time were the MBuf is
* queued the TxD ring
*/
static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
{
DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
smc->os.hwm.queued_txd_mb++ ;
mb->sm_next = (SMbuf *)NULL ;
if (smc->os.hwm.txd_tx_pipe == NULL) {
smc->os.hwm.txd_tx_pipe = mb ;
}
else {
smc->os.hwm.txd_tx_tail->sm_next = mb ;
}
smc->os.hwm.txd_tx_tail = mb ;
}
/*
* get a SMbuf from the txd_tx_queue
*/
static SMbuf *get_txd_mb(struct s_smc *smc)
{
SMbuf *mb ;
if ((mb = smc->os.hwm.txd_tx_pipe)) {
smc->os.hwm.queued_txd_mb-- ;
smc->os.hwm.txd_tx_pipe = mb->sm_next ;
}
DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
return mb;
}
/*
* SMT Send function
*/
void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
{
char far *data ;
int len ;
int n ;
int i ;
int frag_count ;
int frame_status ;
SK_LOC_DECL(char far,*virt[3]) ;
int frag_len[3] ;
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t ;
u_long phys ;
__le32 tbctrl;
NDD_TRACE("THSB",mb,fc,0) ;
DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
mb->sm_off-- ; /* set to fc */
mb->sm_len++ ; /* + fc */
data = smtod(mb,char *) ;
*data = fc ;
if (fc == FC_SMT_LOC)
*data = FC_SMT_INFO ;
/*
* determine the frag count and the virt addresses of the frags
*/
frag_count = 0 ;
len = mb->sm_len ;
while (len) {
n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
if (n >= len) {
n = len ;
}
DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
virt[frag_count] = data ;
frag_len[frag_count] = n ;
frag_count++ ;
len -= n ;
data += n ;
}
/*
* determine the frame status
*/
queue = smc->hw.fp.tx[QUEUE_A0] ;
if (fc == FC_BEACON || fc == FC_SMT_LOC) {
frame_status = LOC_TX ;
}
else {
frame_status = LAN_TX ;
if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
(smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
frame_status |= LOC_TX ;
}
if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
frame_status &= ~LAN_TX;
if (frame_status) {
DB_TX(2, "Ring is down: terminate LAN_TX");
}
else {
DB_TX(2, "Ring is down: terminate transmission");
smt_free_mbuf(smc,mb) ;
return ;
}
}
DB_TX(5, "frame_status = 0x%x", frame_status);
if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
mb->sm_use_count = 2 ;
}
if (frame_status & LAN_TX) {
t = queue->tx_curr_put ;
frame_status |= FIRST_FRAG ;
for (i = 0; i < frag_count; i++) {
DB_TX(5, "init TxD = 0x%p", t);
if (i == frag_count-1) {
frame_status |= LAST_FRAG ;
t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
(((__u32)(mb->sm_len-1)&3) << 27)) ;
}
t->txd_virt = virt[i] ;
phys = dma_master(smc, (void far *)virt[i],
frag_len[i], DMA_RD|SMT_BUF) ;
t->txd_tbadr = cpu_to_le32(phys) ;
tbctrl = cpu_to_le32((((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG)) << 26) |
BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
t->txd_tbctrl = tbctrl ;
#ifndef AIX
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
outpd(queue->tx_bmu_ctl,CSR_START) ;
#else
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
outpd(ADDR(B0_XA_CSR),CSR_START) ;
#endif
frame_status &= ~FIRST_FRAG ;
queue->tx_curr_put = t = t->txd_next ;
queue->tx_free-- ;
queue->tx_used++ ;
}
smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
queue_txd_mb(smc,mb) ;
}
if (frame_status & LOC_TX) {
DB_TX(5, "pass Mbuf to LLC queue");
queue_llc_rx(smc,mb) ;
}
/*
* We need to unqueue the free SMT_MBUFs here, because it may
* be that the SMT want's to send more than 1 frame for one down call
*/
mac_drv_clear_txd(smc) ;
NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
}
/* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd)
* void mac_drv_clear_txd(smc)
*
* function DOWNCALL (hardware module, hwmtm.c)
* mac_drv_clear_txd searches in both send queues for TxD's
* which were finished by the adapter. It calls dma_complete
* for each TxD. If the last fragment of an LLC frame is
* reached, it calls mac_drv_tx_complete to release the
* send buffer.
*
* return nothing
*
* END_MANUAL_ENTRY
*/
static void mac_drv_clear_txd(struct s_smc *smc)
{
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t1 ;
struct s_smt_fp_txd volatile *t2 = NULL ;
SMbuf *mb ;
u_long tbctrl ;
int i ;
int frag_count ;
int n ;
NDD_TRACE("THcB",0,0,0) ;
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
t1 = queue->tx_curr_get ;
DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
for ( ; ; ) {
frag_count = 0 ;
do {
DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
if (tbctrl & BMU_OWN || !queue->tx_used){
DB_TX(4, "End of TxDs queue %d", i);
goto free_next_queue ; /* next queue */
}
t1 = t1->txd_next ;
frag_count++ ;
} while (!(tbctrl & BMU_EOF)) ;
t1 = queue->tx_curr_get ;
for (n = frag_count; n; n--) {
tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
dma_complete(smc,
(union s_fp_descr volatile *) t1,
(int) (DMA_RD |
((tbctrl & BMU_SMT_TX) >> 18))) ;
t2 = t1 ;
t1 = t1->txd_next ;
}
if (tbctrl & BMU_SMT_TX) {
mb = get_txd_mb(smc) ;
smt_free_mbuf(smc,mb) ;
}
else {
#ifndef PASS_1ST_TXD_2_TX_COMP
DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
mac_drv_tx_complete(smc,t2) ;
#else
DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
queue->tx_curr_get);
mac_drv_tx_complete(smc,queue->tx_curr_get) ;
#endif
}
queue->tx_curr_get = t1 ;
queue->tx_free += frag_count ;
queue->tx_used -= frag_count ;
}
free_next_queue: ;
}
NDD_TRACE("THcE",0,0,0) ;
}
/*
* BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue)
*
* void mac_drv_clear_tx_queue(smc)
* struct s_smc *smc ;
*
* function DOWNCALL (hardware module, hwmtm.c)
* mac_drv_clear_tx_queue is called from the SMT when
* the RMT state machine has entered the ISOLATE state.
* This function is also called by the os-specific module
* after it has called the function card_stop().
* In this case, the frames in the send queues are obsolete and
* should be removed.
*
* note calling sequence:
* CLI_FBI(), card_stop(),
* mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
*
* NOTE: The caller is responsible that the BMUs are idle
* when this function is called.
*
* END_MANUAL_ENTRY
*/
void mac_drv_clear_tx_queue(struct s_smc *smc)
{
struct s_smt_fp_txd volatile *t ;
struct s_smt_tx_queue *queue ;
int tx_used ;
int i ;
if (smc->hw.hw_state != STOPPED) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
return ;
}
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
/*
* switch the OWN bit of all pending frames to the host
*/
t = queue->tx_curr_get ;
tx_used = queue->tx_used ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
DB_TX(5, "switch OWN bit of TxD 0x%p", t);
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
tx_used-- ;
}
}
/*
* release all TxD's for both send queues
*/
mac_drv_clear_txd(smc) ;
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
t = queue->tx_curr_get ;
/*
* write the phys pointer of the NEXT descriptor into the
* BMU's current address descriptor pointer and set
* tx_curr_get and tx_curr_put to this position
*/
if (i == QUEUE_S) {
outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
}
else {
outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
}
queue->tx_curr_put = queue->tx_curr_get->txd_next ;
queue->tx_curr_get = queue->tx_curr_put ;
}
}
/*
-------------------------------------------------------------
TEST FUNCTIONS:
-------------------------------------------------------------
*/
#ifdef DEBUG
/*
* BEGIN_MANUAL_ENTRY(mac_drv_debug_lev)
* void mac_drv_debug_lev(smc,flag,lev)
*
* function DOWNCALL (drvsr.c)
* To get a special debug info the user can assign a debug level
* to any debug flag.
*
* para flag debug flag, possible values are:
* = 0: reset all debug flags (the defined level is
* ignored)
* = 1: debug.d_smtf
* = 2: debug.d_smt
* = 3: debug.d_ecm
* = 4: debug.d_rmt
* = 5: debug.d_cfm
* = 6: debug.d_pcm
*
* = 10: debug.d_os.hwm_rx (hardware module receive path)
* = 11: debug.d_os.hwm_tx(hardware module transmit path)
* = 12: debug.d_os.hwm_gen(hardware module general flag)
*
* lev debug level
*
* END_MANUAL_ENTRY
*/
void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
{
switch(flag) {
case (int)NULL:
DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
DB_P.d_cfm = 0 ;
DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
#ifdef SBA
DB_P.d_sba = 0 ;
#endif
#ifdef ESS
DB_P.d_ess = 0 ;
#endif
break ;
case DEBUG_SMTF:
DB_P.d_smtf = lev ;
break ;
case DEBUG_SMT:
DB_P.d_smt = lev ;
break ;
case DEBUG_ECM:
DB_P.d_ecm = lev ;
break ;
case DEBUG_RMT:
DB_P.d_rmt = lev ;
break ;
case DEBUG_CFM:
DB_P.d_cfm = lev ;
break ;
case DEBUG_PCM:
DB_P.d_pcm = lev ;
break ;
case DEBUG_SBA:
#ifdef SBA
DB_P.d_sba = lev ;
#endif
break ;
case DEBUG_ESS:
#ifdef ESS
DB_P.d_ess = lev ;
#endif
break ;
case DB_HWM_RX:
DB_P.d_os.hwm_rx = lev ;
break ;
case DB_HWM_TX:
DB_P.d_os.hwm_tx = lev ;
break ;
case DB_HWM_GEN:
DB_P.d_os.hwm_gen = lev ;
break ;
default:
break ;
}
}
#endif
| linux-master | drivers/net/fddi/skfp/hwmtm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* File Name:
* skfddi.c
*
* Copyright Information:
* Copyright SysKonnect 1998,1999.
*
* The information in this file is provided "AS IS" without warranty.
*
* Abstract:
* A Linux device driver supporting the SysKonnect FDDI PCI controller
* familie.
*
* Maintainers:
* CG Christoph Goos ([email protected])
*
* Contributors:
* DM David S. Miller
*
* Address all question to:
* [email protected]
*
* The technical manual for the adapters is available from SysKonnect's
* web pages: www.syskonnect.com
* Goto "Support" and search Knowledge Base for "manual".
*
* Driver Architecture:
* The driver architecture is based on the DEC FDDI driver by
* Lawrence V. Stefani and several ethernet drivers.
* I also used an existing Windows NT miniport driver.
* All hardware dependent functions are handled by the SysKonnect
* Hardware Module.
* The only headerfiles that are directly related to this source
* are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
* The others belong to the SysKonnect FDDI Hardware Module and
* should better not be changed.
*
* Modification History:
* Date Name Description
* 02-Mar-98 CG Created.
*
* 10-Mar-99 CG Support for 2.2.x added.
* 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
* 26-Oct-99 CG Fixed compilation error on 2.2.13
* 12-Nov-99 CG Source code release
* 22-Nov-99 CG Included in kernel source.
* 07-May-00 DM 64 bit fixes, new dma interface
* 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
* Daniele Bellucci <[email protected]>
* 03-Dec-03 SH Convert to PCI device model
*
* Compilation options (-Dxxx):
* DRIVERDEBUG print lots of messages to log file
* DUMPPACKETS print received/transmitted packets to logfile
*
* Tested cpu architectures:
* - i386
* - sparc64
*/
/* Version information string - should be updated prior to */
/* each new release!!! */
#define VERSION "2.07"
static const char * const boot_msg =
"SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
" SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
/* Include files */
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include "h/types.h"
#undef ADDR // undo Linux definition
#include "h/skfbi.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/smtstate.h"
// Define module-wide (static) routines
static int skfp_driver_init(struct net_device *dev);
static int skfp_open(struct net_device *dev);
static int skfp_close(struct net_device *dev);
static irqreturn_t skfp_interrupt(int irq, void *dev_id);
static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
static void skfp_ctl_set_multicast_list(struct net_device *dev);
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd);
static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct net_device *dev);
static void send_queued_packets(struct s_smc *smc);
static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
static void ResetAdapter(struct s_smc *smc);
// Functions needed by the hardware module
void *mac_drv_get_space(struct s_smc *smc, u_int size);
void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
int flag);
void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
void llc_restart_tx(struct s_smc *smc);
void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count, int len);
void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count);
void mac_drv_fill_rxd(struct s_smc *smc);
void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count);
int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
int la_len);
void dump_data(unsigned char *Data, int length);
// External functions from the hardware module
extern u_int mac_drv_check_space(void);
extern int mac_drv_init(struct s_smc *smc);
extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
int len, int frame_status);
extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
int frame_len, int frame_status);
extern void fddi_isr(struct s_smc *smc);
extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
int len, int frame_status);
extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
static const struct pci_device_id skfddi_pci_tbl[] = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mirko Lindner <[email protected]>");
// Define module-wide (static) variables
static int num_boards; /* total number of adapters configured */
static const struct net_device_ops skfp_netdev_ops = {
.ndo_open = skfp_open,
.ndo_stop = skfp_close,
.ndo_start_xmit = skfp_send_pkt,
.ndo_get_stats = skfp_ctl_get_stats,
.ndo_set_rx_mode = skfp_ctl_set_multicast_list,
.ndo_set_mac_address = skfp_ctl_set_mac_address,
.ndo_siocdevprivate = skfp_siocdevprivate,
};
/*
* =================
* = skfp_init_one =
* =================
*
* Overview:
* Probes for supported FDDI PCI controllers
*
* Returns:
* Condition code
*
* Arguments:
* pdev - pointer to PCI device information
*
* Functional Description:
* This is now called by PCI driver registration process
* for each board found.
*
* Return Codes:
* 0 - This device (fddi0, fddi1, etc) configured successfully
* -ENODEV - No devices present, or no SysKonnect FDDI PCI device
* present for this device name
*
*
* Side Effects:
* Device structures for FDDI adapters (fddi0, fddi1, etc) are
* initialized and the board resources are read and stored in
* the device structure.
*/
static int skfp_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct s_smc *smc; /* board pointer */
void __iomem *mem;
int err;
pr_debug("entering skfp_init_one\n");
if (num_boards == 0)
printk("%s\n", boot_msg);
err = pci_enable_device(pdev);
if (err)
return err;
err = pci_request_regions(pdev, "skfddi");
if (err)
goto err_out1;
pci_set_master(pdev);
#ifdef MEM_MAPPED_IO
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
printk(KERN_ERR "skfp: region is not an MMIO resource\n");
err = -EIO;
goto err_out2;
}
mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
#else
if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
printk(KERN_ERR "skfp: region is not PIO resource\n");
err = -EIO;
goto err_out2;
}
mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
#endif
if (!mem) {
printk(KERN_ERR "skfp: Unable to map register, "
"FDDI adapter will be disabled.\n");
err = -EIO;
goto err_out2;
}
dev = alloc_fddidev(sizeof(struct s_smc));
if (!dev) {
printk(KERN_ERR "skfp: Unable to allocate fddi device, "
"FDDI adapter will be disabled.\n");
err = -ENOMEM;
goto err_out3;
}
dev->irq = pdev->irq;
dev->netdev_ops = &skfp_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
/* Initialize board structure with bus-specific info */
smc = netdev_priv(dev);
smc->os.dev = dev;
smc->os.bus_type = SK_BUS_TYPE_PCI;
smc->os.pdev = *pdev;
smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
smc->os.MaxFrameSize = MAX_FRAME_SIZE;
smc->os.dev = dev;
smc->hw.slot = -1;
smc->hw.iop = mem;
smc->os.ResetRequested = FALSE;
skb_queue_head_init(&smc->os.SendSkbQueue);
dev->base_addr = (unsigned long)mem;
err = skfp_driver_init(dev);
if (err)
goto err_out4;
err = register_netdev(dev);
if (err)
goto err_out5;
++num_boards;
pci_set_drvdata(pdev, dev);
if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
(pdev->subsystem_device & 0xff00) == 0x5800)
printk("%s: SysKonnect FDDI PCI adapter"
" found (SK-%04X)\n", dev->name,
pdev->subsystem_device);
else
printk("%s: FDDI PCI adapter found\n", dev->name);
return 0;
err_out5:
if (smc->os.SharedMemAddr)
dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
smc->os.SharedMemAddr,
smc->os.SharedMemDMA);
dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
err_out4:
free_netdev(dev);
err_out3:
#ifdef MEM_MAPPED_IO
iounmap(mem);
#else
ioport_unmap(mem);
#endif
err_out2:
pci_release_regions(pdev);
err_out1:
pci_disable_device(pdev);
return err;
}
/*
* Called for each adapter board from pci_unregister_driver
*/
static void skfp_remove_one(struct pci_dev *pdev)
{
struct net_device *p = pci_get_drvdata(pdev);
struct s_smc *lp = netdev_priv(p);
unregister_netdev(p);
if (lp->os.SharedMemAddr) {
dma_free_coherent(&pdev->dev,
lp->os.SharedMemSize,
lp->os.SharedMemAddr,
lp->os.SharedMemDMA);
lp->os.SharedMemAddr = NULL;
}
if (lp->os.LocalRxBuffer) {
dma_free_coherent(&pdev->dev,
MAX_FRAME_SIZE,
lp->os.LocalRxBuffer,
lp->os.LocalRxBufferDMA);
lp->os.LocalRxBuffer = NULL;
}
#ifdef MEM_MAPPED_IO
iounmap(lp->hw.iop);
#else
ioport_unmap(lp->hw.iop);
#endif
pci_release_regions(pdev);
free_netdev(p);
pci_disable_device(pdev);
}
/*
* ====================
* = skfp_driver_init =
* ====================
*
* Overview:
* Initializes remaining adapter board structure information
* and makes sure adapter is in a safe state prior to skfp_open().
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function allocates additional resources such as the host memory
* blocks needed by the adapter.
* The adapter is also reset. The OS must call skfp_open() to open
* the adapter and bring it on-line.
*
* Return Codes:
* 0 - initialization succeeded
* -1 - initialization failed
*/
static int skfp_driver_init(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
int err = -EIO;
pr_debug("entering skfp_driver_init\n");
// set the io address in private structures
bp->base_addr = dev->base_addr;
// Get the interrupt level from the PCI Configuration Table
smc->hw.irq = dev->irq;
spin_lock_init(&bp->DriverLock);
// Allocate invalid frame
bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
&bp->LocalRxBufferDMA,
GFP_ATOMIC);
if (!bp->LocalRxBuffer) {
printk("could not allocate mem for ");
printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
goto fail;
}
// Determine the required size of the 'shared' memory area.
bp->SharedMemSize = mac_drv_check_space();
pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
bp->SharedMemSize,
&bp->SharedMemDMA,
GFP_ATOMIC);
if (!bp->SharedMemAddr) {
printk("could not allocate mem for ");
printk("hardware module: %ld byte\n",
bp->SharedMemSize);
goto fail;
}
} else {
bp->SharedMemAddr = NULL;
}
bp->SharedMemHeap = 0;
card_stop(smc); // Reset adapter.
pr_debug("mac_drv_init()..\n");
if (mac_drv_init(smc) != 0) {
pr_debug("mac_drv_init() failed\n");
goto fail;
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
smt_reset_defaults(smc, 0);
return 0;
fail:
if (bp->SharedMemAddr) {
dma_free_coherent(&bp->pdev.dev,
bp->SharedMemSize,
bp->SharedMemAddr,
bp->SharedMemDMA);
bp->SharedMemAddr = NULL;
}
if (bp->LocalRxBuffer) {
dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
bp->LocalRxBuffer, bp->LocalRxBufferDMA);
bp->LocalRxBuffer = NULL;
}
return err;
} // skfp_driver_init
/*
* =============
* = skfp_open =
* =============
*
* Overview:
* Opens the adapter
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function brings the adapter to an operational state.
*
* Return Codes:
* 0 - Adapter was successfully opened
* -EAGAIN - Could not register IRQ
*/
static int skfp_open(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
int err;
pr_debug("entering skfp_open\n");
/* Register IRQ - support shared interrupts by passing device ptr */
err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
dev->name, dev);
if (err)
return err;
/*
* Set current address to factory MAC address
*
* Note: We've already done this step in skfp_driver_init.
* However, it's possible that a user has set a node
* address override, then closed and reopened the
* adapter. Unless we reset the device address field
* now, we'll continue to use the existing modified
* address.
*/
read_address(smc, NULL);
eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
init_smt(smc, NULL);
smt_online(smc, 1);
STI_FBI();
/* Clear local multicast address tables */
mac_clear_multicast(smc);
/* Disable promiscuous filter settings */
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
netif_start_queue(dev);
return 0;
} // skfp_open
/*
* ==============
* = skfp_close =
* ==============
*
* Overview:
* Closes the device/module.
*
* Returns:
* Condition code
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This routine closes the adapter and brings it to a safe state.
* The interrupt service routine is deregistered with the OS.
* The adapter can be opened again with another call to skfp_open().
*
* Return Codes:
* Always return 0.
*
* Assumptions:
* No further requests for this adapter are made after this routine is
* called. skfp_open() can be called to reset and reinitialize the
* adapter.
*/
static int skfp_close(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
CLI_FBI();
smt_reset_defaults(smc, 1);
card_stop(smc);
mac_drv_clear_tx_queue(smc);
mac_drv_clear_rx_queue(smc);
netif_stop_queue(dev);
/* Deregister (free) IRQ */
free_irq(dev->irq, dev);
skb_queue_purge(&bp->SendSkbQueue);
bp->QueueSkb = MAX_TX_QUEUE_LEN;
return 0;
} // skfp_close
/*
* ==================
* = skfp_interrupt =
* ==================
*
* Overview:
* Interrupt processing routine
*
* Returns:
* None
*
* Arguments:
* irq - interrupt vector
* dev_id - pointer to device information
*
* Functional Description:
* This routine calls the interrupt processing routine for this adapter. It
* disables and reenables adapter interrupts, as appropriate. We can support
* shared interrupts since the incoming dev_id pointer provides our device
* structure context. All the real work is done in the hardware module.
*
* Return Codes:
* None
*
* Assumptions:
* The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
* on Intel-based systems) is done by the operating system outside this
* routine.
*
* System interrupts are enabled through this call.
*
* Side Effects:
* Interrupts are disabled, then reenabled at the adapter.
*/
static irqreturn_t skfp_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct s_smc *smc; /* private board structure pointer */
skfddi_priv *bp;
smc = netdev_priv(dev);
bp = &smc->os;
// IRQs enabled or disabled ?
if (inpd(ADDR(B0_IMSK)) == 0) {
// IRQs are disabled: must be shared interrupt
return IRQ_NONE;
}
// Note: At this point, IRQs are enabled.
if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
// Adapter did not issue an IRQ: must be shared interrupt
return IRQ_NONE;
}
CLI_FBI(); // Disable IRQs from our adapter.
spin_lock(&bp->DriverLock);
// Call interrupt handler in hardware module (HWM).
fddi_isr(smc);
if (smc->os.ResetRequested) {
ResetAdapter(smc);
smc->os.ResetRequested = FALSE;
}
spin_unlock(&bp->DriverLock);
STI_FBI(); // Enable IRQs from our adapter.
return IRQ_HANDLED;
} // skfp_interrupt
/*
* ======================
* = skfp_ctl_get_stats =
* ======================
*
* Overview:
* Get statistics for FDDI adapter
*
* Returns:
* Pointer to FDDI statistics structure
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* Gets current MIB objects from adapter, then
* returns FDDI statistics structure as defined
* in if_fddi.h.
*
* Note: Since the FDDI statistics structure is
* still new and the device structure doesn't
* have an FDDI-specific get statistics handler,
* we'll return the FDDI statistics structure as
* a pointer to an Ethernet statistics structure.
* That way, at least the first part of the statistics
* structure can be decoded properly.
* We'll have to pay attention to this routine as the
* device structure becomes more mature and LAN media
* independent.
*
*/
static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
{
struct s_smc *bp = netdev_priv(dev);
/* Fill the bp->stats structure with driver-maintained counters */
bp->os.MacStat.port_bs_flag[0] = 0x1234;
bp->os.MacStat.port_bs_flag[1] = 0x5678;
// goos: need to fill out fddi statistic
#if 0
/* Get FDDI SMT MIB objects */
/* Fill the bp->stats structure with the SMT MIB object values */
memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
/* Fill the bp->stats structure with the FDDI counter values */
bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
#endif
return (struct net_device_stats *)&bp->os.MacStat;
} // ctl_get_stat
/*
* ==============================
* = skfp_ctl_set_multicast_list =
* ==============================
*
* Overview:
* Enable/Disable LLC frame promiscuous mode reception
* on the adapter and/or update multicast address table.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
*
* Functional Description:
* This function acquires the driver lock and only calls
* skfp_ctl_set_multicast_list_wo_lock then.
* This routine follows a fairly simple algorithm for setting the
* adapter filters and CAM:
*
* if IFF_PROMISC flag is set
* enable promiscuous mode
* else
* disable promiscuous mode
* if number of multicast addresses <= max. multicast number
* add mc addresses to adapter table
* else
* enable promiscuous mode
* update adapter filters
*
* Assumptions:
* Multicast addresses are presented in canonical (LSB) format.
*
* Side Effects:
* On-board adapter filters are updated.
*/
static void skfp_ctl_set_multicast_list(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
unsigned long Flags;
spin_lock_irqsave(&bp->DriverLock, Flags);
skfp_ctl_set_multicast_list_wo_lock(dev);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
} // skfp_ctl_set_multicast_list
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
struct netdev_hw_addr *ha;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
pr_debug("PROMISCUOUS MODE ENABLED\n");
}
/* Else, update multicast address table */
else {
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
pr_debug("PROMISCUOUS MODE DISABLED\n");
// Reset all MC addresses
mac_clear_multicast(smc);
mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug("ENABLE ALL MC ADDRESSES\n");
} else if (!netdev_mc_empty(dev)) {
if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
netdev_for_each_mc_addr(ha, dev) {
mac_add_multicast(smc,
(struct fddi_addr *)ha->addr,
1);
pr_debug("ENABLE MC ADDRESS: %pMF\n",
ha->addr);
}
} else { // more MC addresses than HW supports
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug("ENABLE ALL MC ADDRESSES\n");
}
} else { // no MC addresses
pr_debug("DISABLE ALL MC ADDRESSES\n");
}
/* Update adapter filters */
mac_update_multicast(smc);
}
} // skfp_ctl_set_multicast_list_wo_lock
/*
* ===========================
* = skfp_ctl_set_mac_address =
* ===========================
*
* Overview:
* set new mac address on adapter and update dev_addr field in device table.
*
* Returns:
* None
*
* Arguments:
* dev - pointer to device information
* addr - pointer to sockaddr structure containing unicast address to set
*
* Assumptions:
* The address pointed to by addr->sa_data is a valid unicast
* address and is presented in canonical (LSB) format.
*/
static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
{
struct s_smc *smc = netdev_priv(dev);
struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
skfddi_priv *bp = &smc->os;
unsigned long Flags;
dev_addr_set(dev, p_sockaddr->sa_data);
spin_lock_irqsave(&bp->DriverLock, Flags);
ResetAdapter(smc);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
return 0; /* always return zero */
} // skfp_ctl_set_mac_address
/*
* =======================
* = skfp_siocdevprivate =
* =======================
*
* Overview:
*
* Perform IOCTL call functions here. Some are privileged operations and the
* effective uid is checked in those cases.
*
* Returns:
* status value
* 0 - success
* other - failure
*
* Arguments:
* dev - pointer to device information
* rq - pointer to ioctl request structure
* cmd - ?
*
*/
static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *lp = &smc->os;
struct s_skfp_ioctl ioc;
int status = 0;
if (copy_from_user(&ioc, data, sizeof(struct s_skfp_ioctl)))
return -EFAULT;
if (in_compat_syscall())
return -EOPNOTSUPP;
switch (ioc.cmd) {
case SKFP_GET_STATS: /* Get the driver statistics */
ioc.len = sizeof(lp->MacStat);
status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
? -EFAULT : 0;
break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) {
status = -EPERM;
} else {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
}
break;
default:
printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
status = -EOPNOTSUPP;
} // switch
return status;
} // skfp_ioctl
/*
* =====================
* = skfp_send_pkt =
* =====================
*
* Overview:
* Queues a packet for transmission and try to transmit it.
*
* Returns:
* Condition code
*
* Arguments:
* skb - pointer to sk_buff to queue for transmission
* dev - pointer to device information
*
* Functional Description:
* Here we assume that an incoming skb transmit request
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
* by using dma_map_single().
*
* We have an internal queue for packets we can not send
* immediately. Packets in this queue can be given to the
* adapter if transmit buffers are freed.
*
* We can't free the skb until after it's been DMA'd
* out by the adapter, so we'll keep it in the driver and
* return it in mac_drv_tx_complete.
*
* Return Codes:
* 0 - driver has queued and/or sent packet
* 1 - caller should requeue the sk_buff for later transmission
*
* Assumptions:
* The entire packet is stored in one physically
* contiguous buffer which is not cached and whose
* 32-bit physical address can be determined.
*
* It's vital that this routine is NOT reentered for the
* same board and that the OS is not in another section of
* code (eg. skfp_interrupt) for the same board on a
* different thread.
*
* Side Effects:
* None
*/
static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
pr_debug("skfp_send_pkt\n");
/*
* Verify that incoming transmit request is OK
*
* Note: The packet size check is consistent with other
* Linux device drivers, although the correct packet
* size should be verified before calling the
* transmit routine.
*/
if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
bp->MacStat.gen.tx_errors++; /* bump error counter */
// dequeue packets from xmt queue and send them
netif_start_queue(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* return "success" */
}
if (bp->QueueSkb == 0) { // return with tbusy set: queue full
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
bp->QueueSkb--;
skb_queue_tail(&bp->SendSkbQueue, skb);
send_queued_packets(netdev_priv(dev));
if (bp->QueueSkb == 0) {
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
} // skfp_send_pkt
/*
* =======================
* = send_queued_packets =
* =======================
*
* Overview:
* Send packets from the driver queue as long as there are some and
* transmit resources are available.
*
* Returns:
* None
*
* Arguments:
* smc - pointer to smc (adapter) structure
*
* Functional Description:
* Take a packet from queue if there is any. If not, then we are done.
* Check if there are resources to send the packet. If not, requeue it
* and exit.
* Set packet descriptor flags and give packet to adapter.
* Check if any send resources can be freed (we do not use the
* transmit complete interrupt).
*/
static void send_queued_packets(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
struct sk_buff *skb;
unsigned char fc;
int queue;
struct s_smt_fp_txd *txd; // Current TxD.
dma_addr_t dma_address;
unsigned long Flags;
int frame_status; // HWM tx frame status.
pr_debug("send queued packets\n");
for (;;) {
// send first buffer from queue
skb = skb_dequeue(&bp->SendSkbQueue);
if (!skb) {
pr_debug("queue empty\n");
return;
} // queue empty !
spin_lock_irqsave(&bp->DriverLock, Flags);
fc = skb->data[0];
queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
#ifdef ESS
// Check if the frame may/must be sent as a synchronous frame.
if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
// It's an LLC frame.
if (!smc->ess.sync_bw_available)
fc &= ~FC_SYNC_BIT; // No bandwidth available.
else { // Bandwidth is available.
if (smc->mib.fddiESSSynchTxMode) {
// Send as sync. frame.
fc |= FC_SYNC_BIT;
}
}
}
#endif // ESS
frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
// Unable to send the frame.
if ((frame_status & RING_DOWN) != 0) {
// Ring is down.
pr_debug("Tx attempt while ring down.\n");
} else if ((frame_status & OUT_OF_TXD) != 0) {
pr_debug("%s: out of TXDs.\n", bp->dev->name);
} else {
pr_debug("%s: out of transmit resources",
bp->dev->name);
}
// Note: We will retry the operation as soon as
// transmit resources become available.
skb_queue_head(&bp->SendSkbQueue, skb);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
return; // Packet has been queued.
} // if (unable to send frame)
bp->QueueSkb++; // one packet less in local queue
// source address in packet ?
CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
dma_address = dma_map_single(&(&bp->pdev)->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (frame_status & LAN_TX) {
txd->txd_os.skb = skb; // save skb
txd->txd_os.dma_addr = dma_address; // save dma mapping
}
hwm_tx_frag(smc, skb->data, dma_address, skb->len,
frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
if (!(frame_status & LAN_TX)) { // local only frame
dma_unmap_single(&(&bp->pdev)->dev, dma_address,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
}
spin_unlock_irqrestore(&bp->DriverLock, Flags);
} // for
return; // never reached
} // send_queued_packets
/************************
*
* CheckSourceAddress
*
* Verify if the source address is set. Insert it if necessary.
*
************************/
static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
{
unsigned char SRBit;
if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
return;
if ((unsigned short) frame[1 + 10] != 0)
return;
SRBit = frame[1 + 6] & 0x01;
memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
frame[8] |= SRBit;
} // CheckSourceAddress
/************************
*
* ResetAdapter
*
* Reset the adapter and bring it back to operational mode.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
static void ResetAdapter(struct s_smc *smc)
{
pr_debug("[fddi: ResetAdapter]\n");
// Stop the adapter.
card_stop(smc); // Stop all activity.
// Clear the transmit and receive descriptor queues.
mac_drv_clear_tx_queue(smc);
mac_drv_clear_rx_queue(smc);
// Restart the adapter.
smt_reset_defaults(smc, 1); // Initialize the SMT module.
init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
smt_online(smc, 1); // Insert into the ring again.
STI_FBI();
// Restore original receive mode (multicasts, promiscuous, etc.).
skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
} // ResetAdapter
//--------------- functions called by hardware module ----------------
/************************
*
* llc_restart_tx
*
* The hardware driver calls this routine when the transmit complete
* interrupt bits (end of frame) for the synchronous or asynchronous
* queue is set.
*
* NOTE The hardware driver calls this function also if no packets are queued.
* The routine must be able to handle this case.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void llc_restart_tx(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
pr_debug("[llc_restart_tx]\n");
// Try to send queued packets
spin_unlock(&bp->DriverLock);
send_queued_packets(smc);
spin_lock(&bp->DriverLock);
netif_start_queue(bp->dev);// system may send again if it was blocked
} // llc_restart_tx
/************************
*
* mac_drv_get_space
*
* The hardware module calls this function to allocate the memory
* for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
* Args
* smc - A pointer to the SMT context struct.
*
* size - Size of memory in bytes to allocate.
* Out
* != 0 A pointer to the virtual address of the allocated memory.
* == 0 Allocation error.
*
************************/
void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
{
void *virt;
pr_debug("mac_drv_get_space (%d bytes), ", size);
virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
printk("Unexpected SMT memory size requested: %d\n", size);
return NULL;
}
smc->os.SharedMemHeap += size; // Move heap pointer.
pr_debug("mac_drv_get_space end\n");
pr_debug("virt addr: %lx\n", (ulong) virt);
pr_debug("bus addr: %lx\n", (ulong)
(smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr)));
return virt;
} // mac_drv_get_space
/************************
*
* mac_drv_get_desc_mem
*
* This function is called by the hardware dependent module.
* It allocates the memory for the RxD and TxD descriptors.
*
* This memory must be non-cached, non-movable and non-swappable.
* This memory should start at a physical page boundary.
* Args
* smc - A pointer to the SMT context struct.
*
* size - Size of memory in bytes to allocate.
* Out
* != 0 A pointer to the virtual address of the allocated memory.
* == 0 Allocation error.
*
************************/
void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
{
char *virt;
pr_debug("mac_drv_get_desc_mem\n");
// Descriptor memory must be aligned on 16-byte boundary.
virt = mac_drv_get_space(smc, size);
size = (u_int) (16 - (((unsigned long) virt) & 15UL));
size = size % 16;
pr_debug("Allocate %u bytes alignment gap ", size);
pr_debug("for descriptor memory.\n");
if (!mac_drv_get_space(smc, size)) {
printk("fddi: Unable to align descriptor memory.\n");
return NULL;
}
return virt + size;
} // mac_drv_get_desc_mem
/************************
*
* mac_drv_virt2phys
*
* Get the physical address of a given virtual address.
* Args
* smc - A pointer to the SMT context struct.
*
* virt - A (virtual) pointer into our 'shared' memory area.
* Out
* Physical address of the given virtual address.
*
************************/
unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
{
return smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr);
} // mac_drv_virt2phys
/************************
*
* dma_master
*
* The HWM calls this function, when the driver leads through a DMA
* transfer. If the OS-specific module must prepare the system hardware
* for the DMA transfer, it should do it in this function.
*
* The hardware module calls this dma_master if it wants to send an SMT
* frame. This means that the virt address passed in here is part of
* the 'shared' memory area.
* Args
* smc - A pointer to the SMT context struct.
*
* virt - The virtual address of the data.
*
* len - The length in bytes of the data.
*
* flag - Indicates the transmit direction and the buffer type:
* DMA_RD (0x01) system RAM ==> adapter buffer memory
* DMA_WR (0x02) adapter buffer memory ==> system RAM
* SMT_BUF (0x80) SMT buffer
*
* >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
* Out
* Returns the pyhsical address for the DMA transfer.
*
************************/
u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
{
return smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr);
} // dma_master
/************************
*
* dma_complete
*
* The hardware module calls this routine when it has completed a DMA
* transfer. If the operating system dependent module has set up the DMA
* channel via dma_master() (e.g. Windows NT or AIX) it should clean up
* the DMA channel.
* Args
* smc - A pointer to the SMT context struct.
*
* descr - A pointer to a TxD or RxD, respectively.
*
* flag - Indicates the DMA transfer direction / SMT buffer:
* DMA_RD (0x01) system RAM ==> adapter buffer memory
* DMA_WR (0x02) adapter buffer memory ==> system RAM
* SMT_BUF (0x80) SMT buffer (managed by HWM)
* Out
* Nothing.
*
************************/
void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
{
/* For TX buffers, there are two cases. If it is an SMT transmit
* buffer, there is nothing to do since we use consistent memory
* for the 'shared' memory area. The other case is for normal
* transmit packets given to us by the networking stack, and in
* that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
* below.
*
* For RX buffers, we have to unmap dynamic PCI DMA mappings here
* because the hardware module is about to potentially look at
* the contents of the buffer. If we did not call the PCI DMA
* unmap first, the hardware module could read inconsistent data.
*/
if (flag & DMA_WR) {
skfddi_priv *bp = &smc->os;
volatile struct s_smt_fp_rxd *r = &descr->r;
/* If SKB is NULL, we used the local buffer. */
if (r->rxd_os.skb && r->rxd_os.dma_addr) {
int MaxFrameSize = bp->MaxFrameSize;
dma_unmap_single(&(&bp->pdev)->dev,
r->rxd_os.dma_addr, MaxFrameSize,
DMA_FROM_DEVICE);
r->rxd_os.dma_addr = 0;
}
}
} // dma_complete
/************************
*
* mac_drv_tx_complete
*
* Transmit of a packet is complete. Release the tx staging buffer.
*
* Args
* smc - A pointer to the SMT context struct.
*
* txd - A pointer to the last TxD which is used by the frame.
* Out
* Returns nothing.
*
************************/
void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
{
struct sk_buff *skb;
pr_debug("entering mac_drv_tx_complete\n");
// Check if this TxD points to a skb
if (!(skb = txd->txd_os.skb)) {
pr_debug("TXD with no skb assigned.\n");
return;
}
txd->txd_os.skb = NULL;
// release the DMA mapping
dma_unmap_single(&(&smc->os.pdev)->dev, txd->txd_os.dma_addr,
skb->len, DMA_TO_DEVICE);
txd->txd_os.dma_addr = 0;
smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
// free the skb
dev_kfree_skb_irq(skb);
pr_debug("leaving mac_drv_tx_complete\n");
} // mac_drv_tx_complete
/************************
*
* dump packets to logfile
*
************************/
#ifdef DUMPPACKETS
void dump_data(unsigned char *Data, int length)
{
printk(KERN_INFO "---Packet start---\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
printk(KERN_INFO "------------------\n");
} // dump_data
#else
#define dump_data(data,len)
#endif // DUMPPACKETS
/************************
*
* mac_drv_rx_complete
*
* The hardware module calls this function if an LLC frame is received
* in a receive buffer. Also the SMT, NSA, and directed beacon frames
* from the network will be passed to the LLC layer by this function
* if passing is enabled.
*
* mac_drv_rx_complete forwards the frame to the LLC layer if it should
* be received. It also fills the RxD ring with new receive buffers if
* some can be queued.
* Args
* smc - A pointer to the SMT context struct.
*
* rxd - A pointer to the first RxD which is used by the receive frame.
*
* frag_count - Count of RxDs used by the received frame.
*
* len - Frame length.
* Out
* Nothing.
*
************************/
void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count, int len)
{
skfddi_priv *bp = &smc->os;
struct sk_buff *skb;
unsigned char *virt, *cp;
unsigned short ri;
u_int RifLength;
pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
if (frag_count != 1) { // This is not allowed to happen.
printk("fddi: Multi-fragment receive!\n");
goto RequeueRxd; // Re-use the given RXD(s).
}
skb = rxd->rxd_os.skb;
if (!skb) {
pr_debug("No skb in rxd\n");
smc->os.MacStat.gen.rx_errors++;
goto RequeueRxd;
}
virt = skb->data;
// The DMA mapping was released in dma_complete above.
dump_data(skb->data, len);
/*
* FDDI Frame format:
* +-------+-------+-------+------------+--------+------------+
* | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
* +-------+-------+-------+------------+--------+------------+
*
* FC = Frame Control
* DA = Destination Address
* SA = Source Address
* RIF = Routing Information Field
* LLC = Logical Link Control
*/
// Remove Routing Information Field (RIF), if present.
if ((virt[1 + 6] & FDDI_RII) == 0)
RifLength = 0;
else {
int n;
// goos: RIF removal has still to be tested
pr_debug("RIF found\n");
// Get RIF length from Routing Control (RC) field.
cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
ri = ntohs(*((__be16 *) cp));
RifLength = ri & FDDI_RCF_LEN_MASK;
if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
printk("fddi: Invalid RIF.\n");
goto RequeueRxd; // Discard the frame.
}
virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
// regions overlap
virt = cp + RifLength;
for (n = FDDI_MAC_HDR_LEN; n; n--)
*--virt = *--cp;
// adjust sbd->data pointer
skb_pull(skb, RifLength);
len -= RifLength;
RifLength = 0;
}
// Count statistics.
smc->os.MacStat.gen.rx_packets++; // Count indicated receive
// packets.
smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
// virt points to header again
if (virt[1] & 0x01) { // Check group (multicast) bit.
smc->os.MacStat.gen.multicast++;
}
// deliver frame to system
rxd->rxd_os.skb = NULL;
skb_trim(skb, len);
skb->protocol = fddi_type_trans(skb, bp->dev);
netif_rx(skb);
HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
return;
RequeueRxd:
pr_debug("Rx: re-queue RXD.\n");
mac_drv_requeue_rxd(smc, rxd, frag_count);
smc->os.MacStat.gen.rx_errors++; // Count receive packets
// not indicated.
} // mac_drv_rx_complete
/************************
*
* mac_drv_requeue_rxd
*
* The hardware module calls this function to request the OS-specific
* module to queue the receive buffer(s) represented by the pointer
* to the RxD and the frag_count into the receive queue again. This
* buffer was filled with an invalid frame or an SMT frame.
* Args
* smc - A pointer to the SMT context struct.
*
* rxd - A pointer to the first RxD which is used by the receive frame.
*
* frag_count - Count of RxDs used by the received frame.
* Out
* Nothing.
*
************************/
void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count)
{
volatile struct s_smt_fp_rxd *next_rxd;
volatile struct s_smt_fp_rxd *src_rxd;
struct sk_buff *skb;
int MaxFrameSize;
unsigned char *v_addr;
dma_addr_t b_addr;
if (frag_count != 1) // This is not allowed to happen.
printk("fddi: Multi-fragment requeue!\n");
MaxFrameSize = smc->os.MaxFrameSize;
src_rxd = rxd;
for (; frag_count > 0; frag_count--) {
next_rxd = src_rxd->rxd_next;
rxd = HWM_GET_CURR_RXD(smc);
skb = src_rxd->rxd_os.skb;
if (skb == NULL) { // this should not happen
pr_debug("Requeue with no skb in rxd!\n");
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
if (skb) {
// we got a skb
rxd->rxd_os.skb = skb;
skb_reserve(skb, 3);
skb_put(skb, MaxFrameSize);
v_addr = skb->data;
b_addr = dma_map_single(&(&smc->os.pdev)->dev,
v_addr, MaxFrameSize,
DMA_FROM_DEVICE);
rxd->rxd_os.dma_addr = b_addr;
} else {
// no skb available, use local buffer
pr_debug("Queueing invalid buffer!\n");
rxd->rxd_os.skb = NULL;
v_addr = smc->os.LocalRxBuffer;
b_addr = smc->os.LocalRxBufferDMA;
}
} else {
// we use skb from old rxd
rxd->rxd_os.skb = skb;
v_addr = skb->data;
b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr,
MaxFrameSize, DMA_FROM_DEVICE);
rxd->rxd_os.dma_addr = b_addr;
}
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
src_rxd = next_rxd;
}
} // mac_drv_requeue_rxd
/************************
*
* mac_drv_fill_rxd
*
* The hardware module calls this function at initialization time
* to fill the RxD ring with receive buffers. It is also called by
* mac_drv_rx_complete if rx_free is large enough to queue some new
* receive buffers into the RxD ring. mac_drv_fill_rxd queues new
* receive buffers as long as enough RxDs and receive buffers are
* available.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void mac_drv_fill_rxd(struct s_smc *smc)
{
int MaxFrameSize;
unsigned char *v_addr;
unsigned long b_addr;
struct sk_buff *skb;
volatile struct s_smt_fp_rxd *rxd;
pr_debug("entering mac_drv_fill_rxd\n");
// Walk through the list of free receive buffers, passing receive
// buffers to the HWM as long as RXDs are available.
MaxFrameSize = smc->os.MaxFrameSize;
// Check if there is any RXD left.
while (HWM_GET_RX_FREE(smc) > 0) {
pr_debug(".\n");
rxd = HWM_GET_CURR_RXD(smc);
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
if (skb) {
// we got a skb
skb_reserve(skb, 3);
skb_put(skb, MaxFrameSize);
v_addr = skb->data;
b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr,
MaxFrameSize, DMA_FROM_DEVICE);
rxd->rxd_os.dma_addr = b_addr;
} else {
// no skb available, use local buffer
// System has run out of buffer memory, but we want to
// keep the receiver running in hope of better times.
// Multiple descriptors may point to this local buffer,
// so data in it must be considered invalid.
pr_debug("Queueing invalid buffer!\n");
v_addr = smc->os.LocalRxBuffer;
b_addr = smc->os.LocalRxBufferDMA;
}
rxd->rxd_os.skb = skb;
// Pass receive buffer to HWM.
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
}
pr_debug("leaving mac_drv_fill_rxd\n");
} // mac_drv_fill_rxd
/************************
*
* mac_drv_clear_rxd
*
* The hardware module calls this function to release unused
* receive buffers.
* Args
* smc - A pointer to the SMT context struct.
*
* rxd - A pointer to the first RxD which is used by the receive buffer.
*
* frag_count - Count of RxDs used by the receive buffer.
* Out
* Nothing.
*
************************/
void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
int frag_count)
{
struct sk_buff *skb;
pr_debug("entering mac_drv_clear_rxd\n");
if (frag_count != 1) // This is not allowed to happen.
printk("fddi: Multi-fragment clear!\n");
for (; frag_count > 0; frag_count--) {
skb = rxd->rxd_os.skb;
if (skb != NULL) {
skfddi_priv *bp = &smc->os;
int MaxFrameSize = bp->MaxFrameSize;
dma_unmap_single(&(&bp->pdev)->dev,
rxd->rxd_os.dma_addr, MaxFrameSize,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rxd->rxd_os.skb = NULL;
}
rxd = rxd->rxd_next; // Next RXD.
}
} // mac_drv_clear_rxd
/************************
*
* mac_drv_rx_init
*
* The hardware module calls this routine when an SMT or NSA frame of the
* local SMT should be delivered to the LLC layer.
*
* It is necessary to have this function, because there is no other way to
* copy the contents of SMT MBufs into receive buffers.
*
* mac_drv_rx_init allocates the required target memory for this frame,
* and receives the frame fragment by fragment by calling mac_drv_rx_frag.
* Args
* smc - A pointer to the SMT context struct.
*
* len - The length (in bytes) of the received frame (FC, DA, SA, Data).
*
* fc - The Frame Control field of the received frame.
*
* look_ahead - A pointer to the lookahead data buffer (may be NULL).
*
* la_len - The length of the lookahead data stored in the lookahead
* buffer (may be zero).
* Out
* Always returns zero (0).
*
************************/
int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
char *look_ahead, int la_len)
{
struct sk_buff *skb;
pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
// "Received" a SMT or NSA frame of the local SMT.
if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
pr_debug("fddi: Discard invalid local SMT frame\n");
pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
len, la_len, (unsigned long) look_ahead);
return 0;
}
skb = alloc_skb(len + 3, GFP_ATOMIC);
if (!skb) {
pr_debug("fddi: Local SMT: skb memory exhausted.\n");
return 0;
}
skb_reserve(skb, 3);
skb_put(skb, len);
skb_copy_to_linear_data(skb, look_ahead, len);
// deliver frame to system
skb->protocol = fddi_type_trans(skb, smc->os.dev);
netif_rx(skb);
return 0;
} // mac_drv_rx_init
/************************
*
* smt_timer_poll
*
* This routine is called periodically by the SMT module to clean up the
* driver.
*
* Return any queued frames back to the upper protocol layers if the ring
* is down.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void smt_timer_poll(struct s_smc *smc)
{
} // smt_timer_poll
/************************
*
* ring_status_indication
*
* This function indicates a change of the ring state.
* Args
* smc - A pointer to the SMT context struct.
*
* status - The current ring status.
* Out
* Nothing.
*
************************/
void ring_status_indication(struct s_smc *smc, u_long status)
{
pr_debug("ring_status_indication( ");
if (status & RS_RES15)
pr_debug("RS_RES15 ");
if (status & RS_HARDERROR)
pr_debug("RS_HARDERROR ");
if (status & RS_SOFTERROR)
pr_debug("RS_SOFTERROR ");
if (status & RS_BEACON)
pr_debug("RS_BEACON ");
if (status & RS_PATHTEST)
pr_debug("RS_PATHTEST ");
if (status & RS_SELFTEST)
pr_debug("RS_SELFTEST ");
if (status & RS_RES9)
pr_debug("RS_RES9 ");
if (status & RS_DISCONNECT)
pr_debug("RS_DISCONNECT ");
if (status & RS_RES7)
pr_debug("RS_RES7 ");
if (status & RS_DUPADDR)
pr_debug("RS_DUPADDR ");
if (status & RS_NORINGOP)
pr_debug("RS_NORINGOP ");
if (status & RS_VERSION)
pr_debug("RS_VERSION ");
if (status & RS_STUCKBYPASSS)
pr_debug("RS_STUCKBYPASSS ");
if (status & RS_EVENT)
pr_debug("RS_EVENT ");
if (status & RS_RINGOPCHANGE)
pr_debug("RS_RINGOPCHANGE ");
if (status & RS_RES0)
pr_debug("RS_RES0 ");
pr_debug("]\n");
} // ring_status_indication
/************************
*
* smt_get_time
*
* Gets the current time from the system.
* Args
* None.
* Out
* The current time in TICKS_PER_SECOND.
*
* TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
* defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
* to the time returned by smt_get_time().
*
************************/
unsigned long smt_get_time(void)
{
return jiffies;
} // smt_get_time
/************************
*
* smt_stat_counter
*
* Status counter update (ring_op, fifo full).
* Args
* smc - A pointer to the SMT context struct.
*
* stat - = 0: A ring operational change occurred.
* = 1: The FORMAC FIFO buffer is full / FIFO overflow.
* Out
* Nothing.
*
************************/
void smt_stat_counter(struct s_smc *smc, int stat)
{
// BOOLEAN RingIsUp ;
pr_debug("smt_stat_counter\n");
switch (stat) {
case 0:
pr_debug("Ring operational change.\n");
break;
case 1:
pr_debug("Receive fifo overflow.\n");
smc->os.MacStat.gen.rx_errors++;
break;
default:
pr_debug("Unknown status (%d).\n", stat);
break;
}
} // smt_stat_counter
/************************
*
* cfm_state_change
*
* Sets CFM state in custom statistics.
* Args
* smc - A pointer to the SMT context struct.
*
* c_state - Possible values are:
*
* EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
* EC5_INSERT, EC6_CHECK, EC7_DEINSERT
* Out
* Nothing.
*
************************/
void cfm_state_change(struct s_smc *smc, int c_state)
{
#ifdef DRIVERDEBUG
char *s;
switch (c_state) {
case SC0_ISOLATED:
s = "SC0_ISOLATED";
break;
case SC1_WRAP_A:
s = "SC1_WRAP_A";
break;
case SC2_WRAP_B:
s = "SC2_WRAP_B";
break;
case SC4_THRU_A:
s = "SC4_THRU_A";
break;
case SC5_THRU_B:
s = "SC5_THRU_B";
break;
case SC7_WRAP_S:
s = "SC7_WRAP_S";
break;
case SC9_C_WRAP_A:
s = "SC9_C_WRAP_A";
break;
case SC10_C_WRAP_B:
s = "SC10_C_WRAP_B";
break;
case SC11_C_WRAP_S:
s = "SC11_C_WRAP_S";
break;
default:
pr_debug("cfm_state_change: unknown %d\n", c_state);
return;
}
pr_debug("cfm_state_change: %s\n", s);
#endif // DRIVERDEBUG
} // cfm_state_change
/************************
*
* ecm_state_change
*
* Sets ECM state in custom statistics.
* Args
* smc - A pointer to the SMT context struct.
*
* e_state - Possible values are:
*
* SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
* SC5_THRU_B (7), SC7_WRAP_S (8)
* Out
* Nothing.
*
************************/
void ecm_state_change(struct s_smc *smc, int e_state)
{
#ifdef DRIVERDEBUG
char *s;
switch (e_state) {
case EC0_OUT:
s = "EC0_OUT";
break;
case EC1_IN:
s = "EC1_IN";
break;
case EC2_TRACE:
s = "EC2_TRACE";
break;
case EC3_LEAVE:
s = "EC3_LEAVE";
break;
case EC4_PATH_TEST:
s = "EC4_PATH_TEST";
break;
case EC5_INSERT:
s = "EC5_INSERT";
break;
case EC6_CHECK:
s = "EC6_CHECK";
break;
case EC7_DEINSERT:
s = "EC7_DEINSERT";
break;
default:
s = "unknown";
break;
}
pr_debug("ecm_state_change: %s\n", s);
#endif //DRIVERDEBUG
} // ecm_state_change
/************************
*
* rmt_state_change
*
* Sets RMT state in custom statistics.
* Args
* smc - A pointer to the SMT context struct.
*
* r_state - Possible values are:
*
* RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
* RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
* Out
* Nothing.
*
************************/
void rmt_state_change(struct s_smc *smc, int r_state)
{
#ifdef DRIVERDEBUG
char *s;
switch (r_state) {
case RM0_ISOLATED:
s = "RM0_ISOLATED";
break;
case RM1_NON_OP:
s = "RM1_NON_OP - not operational";
break;
case RM2_RING_OP:
s = "RM2_RING_OP - ring operational";
break;
case RM3_DETECT:
s = "RM3_DETECT - detect dupl addresses";
break;
case RM4_NON_OP_DUP:
s = "RM4_NON_OP_DUP - dupl. addr detected";
break;
case RM5_RING_OP_DUP:
s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
break;
case RM6_DIRECTED:
s = "RM6_DIRECTED - sending directed beacons";
break;
case RM7_TRACE:
s = "RM7_TRACE - trace initiated";
break;
default:
s = "unknown";
break;
}
pr_debug("[rmt_state_change: %s]\n", s);
#endif // DRIVERDEBUG
} // rmt_state_change
/************************
*
* drv_reset_indication
*
* This function is called by the SMT when it has detected a severe
* hardware problem. The driver should perform a reset on the adapter
* as soon as possible, but not from within this function.
* Args
* smc - A pointer to the SMT context struct.
* Out
* Nothing.
*
************************/
void drv_reset_indication(struct s_smc *smc)
{
pr_debug("entering drv_reset_indication\n");
smc->os.ResetRequested = TRUE; // Set flag.
} // drv_reset_indication
static struct pci_driver skfddi_pci_driver = {
.name = "skfddi",
.id_table = skfddi_pci_tbl,
.probe = skfp_init_one,
.remove = skfp_remove_one,
};
module_pci_driver(skfddi_pci_driver);
| linux-master | drivers/net/fddi/skfp/skfddi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
Parameter Management Frame processing for SMT 7.2
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/smt_p.h"
#define KERNEL
#include "h/smtstate.h"
#ifndef SLIM_SMT
static int smt_authorize(struct s_smc *smc, struct smt_header *sm);
static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm);
static const struct s_p_tab* smt_get_ptab(u_short para);
static int smt_mib_phys(struct s_smc *smc);
static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
int local, int set);
void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
int index, int local);
static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
int set, int local);
static int port_to_mib(struct s_smc *smc, int p);
#define MOFFSS(e) offsetof(struct fddi_mib, e)
#define MOFFMS(e) offsetof(struct fddi_mib_m, e)
#define MOFFAS(e) offsetof(struct fddi_mib_a, e)
#define MOFFPS(e) offsetof(struct fddi_mib_p, e)
#define AC_G 0x01 /* Get */
#define AC_GR 0x02 /* Get/Set */
#define AC_S 0x04 /* Set */
#define AC_NA 0x08
#define AC_GROUP 0x10 /* Group */
#define MS2BCLK(x) ((x)*12500L)
/*
F LFag (byte)
B byte
S u_short 16 bit
C Counter 32 bit
L Long 32 bit
T Timer_2 32 bit
P TimeStamp ;
A LongAddress (6 byte)
E Enum 16 bit
R ResId 16 Bit
*/
static const struct s_p_tab {
u_short p_num ; /* parameter code */
u_char p_access ; /* access rights */
u_short p_offset ; /* offset in mib */
char p_swap[3] ; /* format string */
} p_tab[] = {
/* StationIdGrp */
{ SMT_P100A,AC_GROUP } ,
{ SMT_P100B,AC_G, MOFFSS(fddiSMTStationId), "8" } ,
{ SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } ,
{ SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } ,
{ SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } ,
{ SMT_P1010,AC_G, MOFFSS(fddiSMTManufacturerData), "D" } ,
{ SMT_P1011,AC_GR, MOFFSS(fddiSMTUserData), "D" } ,
{ SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } ,
/* StationConfigGrp */
{ SMT_P1014,AC_GROUP } ,
{ SMT_P1015,AC_G, MOFFSS(fddiSMTMac_Ct), "B" } ,
{ SMT_P1016,AC_G, MOFFSS(fddiSMTNonMaster_Ct), "B" } ,
{ SMT_P1017,AC_G, MOFFSS(fddiSMTMaster_Ct), "B" } ,
{ SMT_P1018,AC_G, MOFFSS(fddiSMTAvailablePaths), "B" } ,
{ SMT_P1019,AC_G, MOFFSS(fddiSMTConfigCapabilities),"S" } ,
{ SMT_P101A,AC_GR, MOFFSS(fddiSMTConfigPolicy), "wS" } ,
{ SMT_P101B,AC_GR, MOFFSS(fddiSMTConnectionPolicy),"wS" } ,
{ SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } ,
{ SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } ,
{ SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } ,
{ SMT_P1020,AC_G, MOFFSS(fddiSMTPORTIndexes), "II" } ,
{ SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } ,
{ SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } ,
/* StatusGrp */
{ SMT_P1028,AC_GROUP } ,
{ SMT_P1029,AC_G, MOFFSS(fddiSMTECMState), "E" } ,
{ SMT_P102A,AC_G, MOFFSS(fddiSMTCF_State), "E" } ,
{ SMT_P102C,AC_G, MOFFSS(fddiSMTRemoteDisconnectFlag),"F" } ,
{ SMT_P102D,AC_G, MOFFSS(fddiSMTStationStatus), "E" } ,
{ SMT_P102E,AC_G, MOFFSS(fddiSMTPeerWrapFlag), "F" } ,
/* MIBOperationGrp */
{ SMT_P1032,AC_GROUP } ,
{ SMT_P1033,AC_G, MOFFSS(fddiSMTTimeStamp),"P" } ,
{ SMT_P1034,AC_G, MOFFSS(fddiSMTTransitionTimeStamp),"P" } ,
/* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */
{ SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } ,
{ SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } ,
{ SMT_P103C,AC_S, 0, "wS" } ,
/*
* PRIVATE EXTENSIONS
* only accessible locally to get/set passwd
*/
{ SMT_P10F0,AC_GR, MOFFSS(fddiPRPMFPasswd), "8" } ,
{ SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } ,
#ifdef ESS
{ SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } ,
{ SMT_P10F3,AC_GR, MOFFSS(fddiESSOverhead), "lL" } ,
{ SMT_P10F4,AC_GR, MOFFSS(fddiESSMaxTNeg), "lL" } ,
{ SMT_P10F5,AC_GR, MOFFSS(fddiESSMinSegmentSize), "lL" } ,
{ SMT_P10F6,AC_GR, MOFFSS(fddiESSCategory), "lL" } ,
{ SMT_P10F7,AC_GR, MOFFSS(fddiESSSynchTxMode), "wS" } ,
#endif
#ifdef SBA
{ SMT_P10F8,AC_GR, MOFFSS(fddiSBACommand), "bF" } ,
{ SMT_P10F9,AC_GR, MOFFSS(fddiSBAAvailable), "bF" } ,
#endif
/* MAC Attributes */
{ SMT_P200A,AC_GROUP } ,
{ SMT_P200B,AC_G, MOFFMS(fddiMACFrameStatusFunctions),"S" } ,
{ SMT_P200D,AC_G, MOFFMS(fddiMACT_MaxCapabilitiy),"T" } ,
{ SMT_P200E,AC_G, MOFFMS(fddiMACTVXCapabilitiy),"T" } ,
/* ConfigGrp */
{ SMT_P2014,AC_GROUP } ,
{ SMT_P2016,AC_G, MOFFMS(fddiMACAvailablePaths), "B" } ,
{ SMT_P2017,AC_G, MOFFMS(fddiMACCurrentPath), "S" } ,
{ SMT_P2018,AC_G, MOFFMS(fddiMACUpstreamNbr), "A" } ,
{ SMT_P2019,AC_G, MOFFMS(fddiMACDownstreamNbr), "A" } ,
{ SMT_P201A,AC_G, MOFFMS(fddiMACOldUpstreamNbr), "A" } ,
{ SMT_P201B,AC_G, MOFFMS(fddiMACOldDownstreamNbr),"A" } ,
{ SMT_P201D,AC_G, MOFFMS(fddiMACDupAddressTest), "E" } ,
{ SMT_P2020,AC_GR, MOFFMS(fddiMACRequestedPaths), "wS" } ,
{ SMT_P2021,AC_G, MOFFMS(fddiMACDownstreamPORTType),"E" } ,
{ SMT_P2022,AC_G, MOFFMS(fddiMACIndex), "S" } ,
/* AddressGrp */
{ SMT_P2028,AC_GROUP } ,
{ SMT_P2029,AC_G, MOFFMS(fddiMACSMTAddress), "A" } ,
/* OperationGrp */
{ SMT_P2032,AC_GROUP } ,
{ SMT_P2033,AC_G, MOFFMS(fddiMACT_Req), "T" } ,
{ SMT_P2034,AC_G, MOFFMS(fddiMACT_Neg), "T" } ,
{ SMT_P2035,AC_G, MOFFMS(fddiMACT_Max), "T" } ,
{ SMT_P2036,AC_G, MOFFMS(fddiMACTvxValue), "T" } ,
{ SMT_P2038,AC_G, MOFFMS(fddiMACT_Pri0), "T" } ,
{ SMT_P2039,AC_G, MOFFMS(fddiMACT_Pri1), "T" } ,
{ SMT_P203A,AC_G, MOFFMS(fddiMACT_Pri2), "T" } ,
{ SMT_P203B,AC_G, MOFFMS(fddiMACT_Pri3), "T" } ,
{ SMT_P203C,AC_G, MOFFMS(fddiMACT_Pri4), "T" } ,
{ SMT_P203D,AC_G, MOFFMS(fddiMACT_Pri5), "T" } ,
{ SMT_P203E,AC_G, MOFFMS(fddiMACT_Pri6), "T" } ,
/* CountersGrp */
{ SMT_P2046,AC_GROUP } ,
{ SMT_P2047,AC_G, MOFFMS(fddiMACFrame_Ct), "C" } ,
{ SMT_P2048,AC_G, MOFFMS(fddiMACCopied_Ct), "C" } ,
{ SMT_P2049,AC_G, MOFFMS(fddiMACTransmit_Ct), "C" } ,
{ SMT_P204A,AC_G, MOFFMS(fddiMACToken_Ct), "C" } ,
{ SMT_P2051,AC_G, MOFFMS(fddiMACError_Ct), "C" } ,
{ SMT_P2052,AC_G, MOFFMS(fddiMACLost_Ct), "C" } ,
{ SMT_P2053,AC_G, MOFFMS(fddiMACTvxExpired_Ct), "C" } ,
{ SMT_P2054,AC_G, MOFFMS(fddiMACNotCopied_Ct), "C" } ,
{ SMT_P2056,AC_G, MOFFMS(fddiMACRingOp_Ct), "C" } ,
/* FrameErrorConditionGrp */
{ SMT_P205A,AC_GROUP } ,
{ SMT_P205F,AC_GR, MOFFMS(fddiMACFrameErrorThreshold),"wS" } ,
{ SMT_P2060,AC_G, MOFFMS(fddiMACFrameErrorRatio), "S" } ,
/* NotCopiedConditionGrp */
{ SMT_P2064,AC_GROUP } ,
{ SMT_P2067,AC_GR, MOFFMS(fddiMACNotCopiedThreshold),"wS" } ,
{ SMT_P2069,AC_G, MOFFMS(fddiMACNotCopiedRatio), "S" } ,
/* StatusGrp */
{ SMT_P206E,AC_GROUP } ,
{ SMT_P206F,AC_G, MOFFMS(fddiMACRMTState), "S" } ,
{ SMT_P2070,AC_G, MOFFMS(fddiMACDA_Flag), "F" } ,
{ SMT_P2071,AC_G, MOFFMS(fddiMACUNDA_Flag), "F" } ,
{ SMT_P2072,AC_G, MOFFMS(fddiMACFrameErrorFlag), "F" } ,
{ SMT_P2073,AC_G, MOFFMS(fddiMACNotCopiedFlag), "F" } ,
{ SMT_P2074,AC_G, MOFFMS(fddiMACMA_UnitdataAvailable),"F" } ,
{ SMT_P2075,AC_G, MOFFMS(fddiMACHardwarePresent), "F" } ,
{ SMT_P2076,AC_GR, MOFFMS(fddiMACMA_UnitdataEnable),"bF" } ,
/*
* PRIVATE EXTENSIONS
* only accessible locally to get/set TMIN
*/
{ SMT_P20F0,AC_NA } ,
{ SMT_P20F1,AC_GR, MOFFMS(fddiMACT_Min), "lT" } ,
/* Path Attributes */
/*
* DON't swap 320B,320F,3210: they are already swapped in swap_para()
*/
{ SMT_P320A,AC_GROUP } ,
{ SMT_P320B,AC_G, MOFFAS(fddiPATHIndex), "r" } ,
{ SMT_P320F,AC_GR, MOFFAS(fddiPATHSbaPayload), "l4" } ,
{ SMT_P3210,AC_GR, MOFFAS(fddiPATHSbaOverhead), "l4" } ,
/* fddiPATHConfiguration */
{ SMT_P3212,AC_G, 0, "" } ,
{ SMT_P3213,AC_GR, MOFFAS(fddiPATHT_Rmode), "lT" } ,
{ SMT_P3214,AC_GR, MOFFAS(fddiPATHSbaAvailable), "lL" } ,
{ SMT_P3215,AC_GR, MOFFAS(fddiPATHTVXLowerBound), "lT" } ,
{ SMT_P3216,AC_GR, MOFFAS(fddiPATHT_MaxLowerBound),"lT" } ,
{ SMT_P3217,AC_GR, MOFFAS(fddiPATHMaxT_Req), "lT" } ,
/* Port Attributes */
/* ConfigGrp */
{ SMT_P400A,AC_GROUP } ,
{ SMT_P400C,AC_G, MOFFPS(fddiPORTMy_Type), "E" } ,
{ SMT_P400D,AC_G, MOFFPS(fddiPORTNeighborType), "E" } ,
{ SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } ,
{ SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } ,
{ SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } ,
{ SMT_P4011,AC_GR, MOFFPS(fddiPORTRequestedPaths), "l4" } ,
{ SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } ,
{ SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } ,
{ SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } ,
{ SMT_P4017,AC_G, MOFFPS(fddiPORTConnectionCapabilities), "B"} ,
{ SMT_P401D,AC_G, MOFFPS(fddiPORTIndex), "R" } ,
/* OperationGrp */
{ SMT_P401E,AC_GROUP } ,
{ SMT_P401F,AC_GR, MOFFPS(fddiPORTMaint_LS), "wE" } ,
{ SMT_P4021,AC_G, MOFFPS(fddiPORTBS_Flag), "F" } ,
{ SMT_P4022,AC_G, MOFFPS(fddiPORTPC_LS), "E" } ,
/* ErrorCtrsGrp */
{ SMT_P4028,AC_GROUP } ,
{ SMT_P4029,AC_G, MOFFPS(fddiPORTEBError_Ct), "C" } ,
{ SMT_P402A,AC_G, MOFFPS(fddiPORTLCTFail_Ct), "C" } ,
/* LerGrp */
{ SMT_P4032,AC_GROUP } ,
{ SMT_P4033,AC_G, MOFFPS(fddiPORTLer_Estimate), "F" } ,
{ SMT_P4034,AC_G, MOFFPS(fddiPORTLem_Reject_Ct), "C" } ,
{ SMT_P4035,AC_G, MOFFPS(fddiPORTLem_Ct), "C" } ,
{ SMT_P403A,AC_GR, MOFFPS(fddiPORTLer_Cutoff), "bB" } ,
{ SMT_P403B,AC_GR, MOFFPS(fddiPORTLer_Alarm), "bB" } ,
/* StatusGrp */
{ SMT_P403C,AC_GROUP } ,
{ SMT_P403D,AC_G, MOFFPS(fddiPORTConnectState), "E" } ,
{ SMT_P403E,AC_G, MOFFPS(fddiPORTPCMStateX), "E" } ,
{ SMT_P403F,AC_G, MOFFPS(fddiPORTPC_Withhold), "E" } ,
{ SMT_P4040,AC_G, MOFFPS(fddiPORTLerFlag), "F" } ,
{ SMT_P4041,AC_G, MOFFPS(fddiPORTHardwarePresent),"F" } ,
{ SMT_P4046,AC_S, 0, "wS" } ,
{ 0, AC_GROUP } ,
{ 0 }
} ;
void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
{
struct smt_header *sm ;
SMbuf *reply ;
sm = smtod(mb,struct smt_header *) ;
DB_SMT("SMT: processing PMF frame at %p len %d", sm, mb->sm_len);
#ifdef DEBUG
dump_smt(smc,sm,"PMF Received") ;
#endif
/*
* Start the watchdog: It may be a long, long packet and
* maybe the watchdog occurs ...
*/
smt_start_watchdog(smc) ;
if (sm->smt_class == SMT_PMF_GET ||
sm->smt_class == SMT_PMF_SET) {
reply = smt_build_pmf_response(smc,sm,
sm->smt_class == SMT_PMF_SET,local) ;
if (reply) {
sm = smtod(reply,struct smt_header *) ;
#ifdef DEBUG
dump_smt(smc,sm,"PMF Reply") ;
#endif
smt_send_frame(smc,reply,FC_SMT_INFO,local) ;
}
}
}
static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
int set, int local)
{
SMbuf *mb ;
struct smt_header *smt ;
struct smt_para *pa ;
struct smt_p_reason *res ;
const struct s_p_tab *pt ;
int len ;
int index ;
int idx_end ;
int error ;
int range ;
SK_LOC_DECL(struct s_pcon,pcon) ;
SK_LOC_DECL(struct s_pcon,set_pcon) ;
/*
* build SMT header
*/
if (!(mb = smt_get_mbuf(smc)))
return mb;
smt = smtod(mb, struct smt_header *) ;
smt->smt_dest = req->smt_source ; /* DA == source of request */
smt->smt_class = req->smt_class ; /* same class (GET/SET) */
smt->smt_type = SMT_REPLY ;
smt->smt_version = SMT_VID_2 ;
smt->smt_tid = req->smt_tid ; /* same TID */
smt->smt_pad = 0 ;
smt->smt_len = 0 ;
/*
* setup parameter status
*/
pcon.pc_len = SMT_MAX_INFO_LEN ; /* max para length */
pcon.pc_err = 0 ; /* no error */
pcon.pc_badset = 0 ; /* no bad set count */
pcon.pc_p = (void *) (smt + 1) ; /* paras start here */
/*
* check authoriziation and set count
*/
error = 0 ;
if (set) {
if (!local && smt_authorize(smc,req))
error = SMT_RDF_AUTHOR ;
else if (smt_check_set_count(smc,req))
pcon.pc_badset = SMT_RDF_BADSET ;
}
/*
* add reason code and all mandatory parameters
*/
res = (struct smt_p_reason *) pcon.pc_p ;
smt_add_para(smc,&pcon,(u_short) SMT_P_REASON,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
/* update 1035 and 1036 later if set */
set_pcon = pcon ;
smt_add_para(smc,&pcon,(u_short) SMT_P1035,0,0) ;
smt_add_para(smc,&pcon,(u_short) SMT_P1036,0,0) ;
pcon.pc_err = error ;
len = req->smt_len ;
pa = (struct smt_para *) (req + 1) ;
/*
* process list of paras
*/
while (!pcon.pc_err && len > 0 ) {
if (((u_short)len < pa->p_len + PARA_LEN) || (pa->p_len & 3)) {
pcon.pc_err = SMT_RDF_LENGTH ;
break ;
}
if (((range = (pa->p_type & 0xf000)) == 0x2000) ||
range == 0x3000 || range == 0x4000) {
/*
* get index for PART,MAC ad PATH group
*/
index = *((u_char *)pa + PARA_LEN + 3) ;/* index */
idx_end = index ;
if (!set && (pa->p_len != 4)) {
pcon.pc_err = SMT_RDF_LENGTH ;
break ;
}
if (!index && !set) {
switch (range) {
case 0x2000 :
index = INDEX_MAC ;
idx_end = index - 1 + NUMMACS ;
break ;
case 0x3000 :
index = INDEX_PATH ;
idx_end = index - 1 + NUMPATHS ;
break ;
case 0x4000 :
index = INDEX_PORT ;
idx_end = index - 1 + NUMPHYS ;
#ifndef CONCENTRATOR
if (smc->s.sas == SMT_SAS)
idx_end = INDEX_PORT ;
#endif
break ;
}
}
}
else {
/*
* smt group has no index
*/
if (!set && (pa->p_len != 0)) {
pcon.pc_err = SMT_RDF_LENGTH ;
break ;
}
index = 0 ;
idx_end = 0 ;
}
while (index <= idx_end) {
/*
* if group
* add all paras of group
*/
pt = smt_get_ptab(pa->p_type) ;
if (pt && pt->p_access == AC_GROUP && !set) {
pt++ ;
while (pt->p_access == AC_G ||
pt->p_access == AC_GR) {
smt_add_para(smc,&pcon,pt->p_num,
index,local);
pt++ ;
}
}
/*
* ignore
* AUTHORIZATION in get/set
* SET COUNT in set
*/
else if (pa->p_type != SMT_P_AUTHOR &&
(!set || (pa->p_type != SMT_P1035))) {
int st ;
if (pcon.pc_badset) {
smt_add_para(smc,&pcon,pa->p_type,
index,local) ;
}
else if (set) {
st = smt_set_para(smc,pa,index,local,1);
/*
* return para even if error
*/
smt_add_para(smc,&pcon,pa->p_type,
index,local) ;
pcon.pc_err = st ;
}
else {
if (pt && pt->p_access == AC_S) {
pcon.pc_err =
SMT_RDF_ILLEGAL ;
}
smt_add_para(smc,&pcon,pa->p_type,
index,local) ;
}
}
if (pcon.pc_err)
break ;
index++ ;
}
len -= pa->p_len + PARA_LEN ;
pa = (struct smt_para *) ((char *)pa + pa->p_len + PARA_LEN) ;
}
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
/* update reason code */
res->rdf_reason = pcon.pc_badset ? pcon.pc_badset :
pcon.pc_err ? pcon.pc_err : SMT_RDF_SUCCESS ;
if (set && (res->rdf_reason == SMT_RDF_SUCCESS)) {
/*
* increment set count
* set time stamp
* store station id of last set
*/
smc->mib.fddiSMTSetCount.count++ ;
smt_set_timestamp(smc,smc->mib.fddiSMTSetCount.timestamp) ;
smc->mib.fddiSMTLastSetStationId = req->smt_sid ;
smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ;
smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ;
}
return mb;
}
static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
{
struct smt_para *pa ;
int i ;
char *p ;
/*
* check source station id if not zero
*/
p = (char *) &smc->mib.fddiPRPMFStation ;
for (i = 0 ; i < 8 && !p[i] ; i++)
;
if (i != 8) {
if (memcmp((char *) &sm->smt_sid,
(char *) &smc->mib.fddiPRPMFStation,8))
return 1;
}
/*
* check authoriziation parameter if passwd not zero
*/
p = (char *) smc->mib.fddiPRPMFPasswd ;
for (i = 0 ; i < 8 && !p[i] ; i++)
;
if (i != 8) {
pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ;
if (!pa)
return 1;
if (pa->p_len != 8)
return 1;
if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8))
return 1;
}
return 0;
}
static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
{
struct smt_para *pa ;
struct smt_p_setcount *sc ;
pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P1035) ;
if (pa) {
sc = (struct smt_p_setcount *) pa ;
if ((smc->mib.fddiSMTSetCount.count != sc->count) ||
memcmp((char *) smc->mib.fddiSMTSetCount.timestamp,
(char *)sc->timestamp,8))
return 1;
}
return 0;
}
void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
int index, int local)
{
struct smt_para *pa ;
const struct s_p_tab *pt ;
struct fddi_mib_m *mib_m = NULL;
struct fddi_mib_p *mib_p = NULL;
int len ;
int plen ;
char *from ;
char *to ;
const char *swap ;
char c ;
int range ;
char *mib_addr ;
int mac ;
int path ;
int port ;
int sp_len ;
/*
* skip if error
*/
if (pcon->pc_err)
return ;
/*
* actions don't have a value
*/
pt = smt_get_ptab(para) ;
if (pt && pt->p_access == AC_S)
return ;
to = (char *) (pcon->pc_p) ; /* destination pointer */
len = pcon->pc_len ; /* free space */
plen = len ; /* remember start length */
pa = (struct smt_para *) to ; /* type/length pointer */
to += PARA_LEN ; /* skip smt_para */
len -= PARA_LEN ;
/*
* set index if required
*/
if (((range = (para & 0xf000)) == 0x2000) ||
range == 0x3000 || range == 0x4000) {
if (len < 4)
goto wrong_error ;
to[0] = 0 ;
to[1] = 0 ;
to[2] = 0 ;
to[3] = index ;
len -= 4 ;
to += 4 ;
}
mac = index - INDEX_MAC ;
path = index - INDEX_PATH ;
port = index - INDEX_PORT ;
/*
* get pointer to mib
*/
switch (range) {
case 0x1000 :
default :
mib_addr = (char *) (&smc->mib) ;
break ;
case 0x2000 :
if (mac < 0 || mac >= NUMMACS) {
pcon->pc_err = SMT_RDF_NOPARAM ;
return ;
}
mib_addr = (char *) (&smc->mib.m[mac]) ;
mib_m = (struct fddi_mib_m *) mib_addr ;
break ;
case 0x3000 :
if (path < 0 || path >= NUMPATHS) {
pcon->pc_err = SMT_RDF_NOPARAM ;
return ;
}
mib_addr = (char *) (&smc->mib.a[path]) ;
break ;
case 0x4000 :
if (port < 0 || port >= smt_mib_phys(smc)) {
pcon->pc_err = SMT_RDF_NOPARAM ;
return ;
}
mib_addr = (char *) (&smc->mib.p[port_to_mib(smc,port)]) ;
mib_p = (struct fddi_mib_p *) mib_addr ;
break ;
}
/*
* check special paras
*/
swap = NULL;
switch (para) {
case SMT_P10F0 :
case SMT_P10F1 :
#ifdef ESS
case SMT_P10F2 :
case SMT_P10F3 :
case SMT_P10F4 :
case SMT_P10F5 :
case SMT_P10F6 :
case SMT_P10F7 :
#endif
#ifdef SBA
case SMT_P10F8 :
case SMT_P10F9 :
#endif
case SMT_P20F1 :
if (!local) {
pcon->pc_err = SMT_RDF_NOPARAM ;
return ;
}
break ;
case SMT_P2034 :
case SMT_P2046 :
case SMT_P2047 :
case SMT_P204A :
case SMT_P2051 :
case SMT_P2052 :
mac_update_counter(smc) ;
break ;
case SMT_P4022:
mib_p->fddiPORTPC_LS = LS2MIB(
sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
break ;
case SMT_P_REASON :
*(u32 *)to = 0 ;
sp_len = 4 ;
goto sp_done ;
case SMT_P1033 : /* time stamp */
smt_set_timestamp(smc,smc->mib.fddiSMTTimeStamp) ;
break ;
case SMT_P1020: /* port indexes */
#if NUMPHYS == 12
swap = "IIIIIIIIIIII" ;
#else
#if NUMPHYS == 2
if (smc->s.sas == SMT_SAS)
swap = "I" ;
else
swap = "II" ;
#else
#if NUMPHYS == 24
swap = "IIIIIIIIIIIIIIIIIIIIIIII" ;
#else
????
#endif
#endif
#endif
break ;
case SMT_P3212 :
{
sp_len = cem_build_path(smc,to,path) ;
goto sp_done ;
}
case SMT_P1048 : /* peer wrap condition */
{
struct smt_p_1048 *sp ;
sp = (struct smt_p_1048 *) to ;
sp->p1048_flag = smc->mib.fddiSMTPeerWrapFlag ;
sp->p1048_cf_state = smc->mib.fddiSMTCF_State ;
sp_len = sizeof(struct smt_p_1048) ;
goto sp_done ;
}
case SMT_P208C :
{
struct smt_p_208c *sp ;
sp = (struct smt_p_208c *) to ;
sp->p208c_flag =
smc->mib.m[MAC0].fddiMACDuplicateAddressCond ;
sp->p208c_dupcondition =
(mib_m->fddiMACDA_Flag ? SMT_ST_MY_DUPA : 0) |
(mib_m->fddiMACUNDA_Flag ? SMT_ST_UNA_DUPA : 0);
sp->p208c_fddilong =
mib_m->fddiMACSMTAddress ;
sp->p208c_fddiunalong =
mib_m->fddiMACUpstreamNbr ;
sp->p208c_pad = 0 ;
sp_len = sizeof(struct smt_p_208c) ;
goto sp_done ;
}
case SMT_P208D : /* frame error condition */
{
struct smt_p_208d *sp ;
sp = (struct smt_p_208d *) to ;
sp->p208d_flag =
mib_m->fddiMACFrameErrorFlag ;
sp->p208d_frame_ct =
mib_m->fddiMACFrame_Ct ;
sp->p208d_error_ct =
mib_m->fddiMACError_Ct ;
sp->p208d_lost_ct =
mib_m->fddiMACLost_Ct ;
sp->p208d_ratio =
mib_m->fddiMACFrameErrorRatio ;
sp_len = sizeof(struct smt_p_208d) ;
goto sp_done ;
}
case SMT_P208E : /* not copied condition */
{
struct smt_p_208e *sp ;
sp = (struct smt_p_208e *) to ;
sp->p208e_flag =
mib_m->fddiMACNotCopiedFlag ;
sp->p208e_not_copied =
mib_m->fddiMACNotCopied_Ct ;
sp->p208e_copied =
mib_m->fddiMACCopied_Ct ;
sp->p208e_not_copied_ratio =
mib_m->fddiMACNotCopiedRatio ;
sp_len = sizeof(struct smt_p_208e) ;
goto sp_done ;
}
case SMT_P208F : /* neighbor change event */
{
struct smt_p_208f *sp ;
sp = (struct smt_p_208f *) to ;
sp->p208f_multiple =
mib_m->fddiMACMultiple_N ;
sp->p208f_nacondition =
mib_m->fddiMACDuplicateAddressCond ;
sp->p208f_old_una =
mib_m->fddiMACOldUpstreamNbr ;
sp->p208f_new_una =
mib_m->fddiMACUpstreamNbr ;
sp->p208f_old_dna =
mib_m->fddiMACOldDownstreamNbr ;
sp->p208f_new_dna =
mib_m->fddiMACDownstreamNbr ;
sp->p208f_curren_path =
mib_m->fddiMACCurrentPath ;
sp->p208f_smt_address =
mib_m->fddiMACSMTAddress ;
sp_len = sizeof(struct smt_p_208f) ;
goto sp_done ;
}
case SMT_P2090 :
{
struct smt_p_2090 *sp ;
sp = (struct smt_p_2090 *) to ;
sp->p2090_multiple =
mib_m->fddiMACMultiple_P ;
sp->p2090_availablepaths =
mib_m->fddiMACAvailablePaths ;
sp->p2090_currentpath =
mib_m->fddiMACCurrentPath ;
sp->p2090_requestedpaths =
mib_m->fddiMACRequestedPaths ;
sp_len = sizeof(struct smt_p_2090) ;
goto sp_done ;
}
case SMT_P4050 :
{
struct smt_p_4050 *sp ;
sp = (struct smt_p_4050 *) to ;
sp->p4050_flag =
mib_p->fddiPORTLerFlag ;
sp->p4050_pad = 0 ;
sp->p4050_cutoff =
mib_p->fddiPORTLer_Cutoff ;
sp->p4050_alarm =
mib_p->fddiPORTLer_Alarm ;
sp->p4050_estimate =
mib_p->fddiPORTLer_Estimate ;
sp->p4050_reject_ct =
mib_p->fddiPORTLem_Reject_Ct ;
sp->p4050_ct =
mib_p->fddiPORTLem_Ct ;
sp_len = sizeof(struct smt_p_4050) ;
goto sp_done ;
}
case SMT_P4051 :
{
struct smt_p_4051 *sp ;
sp = (struct smt_p_4051 *) to ;
sp->p4051_multiple =
mib_p->fddiPORTMultiple_U ;
sp->p4051_porttype =
mib_p->fddiPORTMy_Type ;
sp->p4051_connectstate =
mib_p->fddiPORTConnectState ;
sp->p4051_pc_neighbor =
mib_p->fddiPORTNeighborType ;
sp->p4051_pc_withhold =
mib_p->fddiPORTPC_Withhold ;
sp_len = sizeof(struct smt_p_4051) ;
goto sp_done ;
}
case SMT_P4052 :
{
struct smt_p_4052 *sp ;
sp = (struct smt_p_4052 *) to ;
sp->p4052_flag =
mib_p->fddiPORTEB_Condition ;
sp->p4052_eberrorcount =
mib_p->fddiPORTEBError_Ct ;
sp_len = sizeof(struct smt_p_4052) ;
goto sp_done ;
}
case SMT_P4053 :
{
struct smt_p_4053 *sp ;
sp = (struct smt_p_4053 *) to ;
sp->p4053_multiple =
mib_p->fddiPORTMultiple_P ;
sp->p4053_availablepaths =
mib_p->fddiPORTAvailablePaths ;
sp->p4053_currentpath =
mib_p->fddiPORTCurrentPath ;
memcpy( (char *) &sp->p4053_requestedpaths,
(char *) mib_p->fddiPORTRequestedPaths,4) ;
sp->p4053_mytype =
mib_p->fddiPORTMy_Type ;
sp->p4053_neighbortype =
mib_p->fddiPORTNeighborType ;
sp_len = sizeof(struct smt_p_4053) ;
goto sp_done ;
}
default :
break ;
}
/*
* in table ?
*/
if (!pt) {
pcon->pc_err = (para & 0xff00) ? SMT_RDF_NOPARAM :
SMT_RDF_ILLEGAL ;
return ;
}
/*
* check access rights
*/
switch (pt->p_access) {
case AC_G :
case AC_GR :
break ;
default :
pcon->pc_err = SMT_RDF_ILLEGAL ;
return ;
}
from = mib_addr + pt->p_offset ;
if (!swap)
swap = pt->p_swap ; /* pointer to swap string */
/*
* copy values
*/
while ((c = *swap++)) {
switch(c) {
case 'b' :
case 'w' :
case 'l' :
break ;
case 'S' :
case 'E' :
case 'R' :
case 'r' :
if (len < 4)
goto len_error ;
to[0] = 0 ;
to[1] = 0 ;
#ifdef LITTLE_ENDIAN
if (c == 'r') {
to[2] = *from++ ;
to[3] = *from++ ;
}
else {
to[3] = *from++ ;
to[2] = *from++ ;
}
#else
to[2] = *from++ ;
to[3] = *from++ ;
#endif
to += 4 ;
len -= 4 ;
break ;
case 'I' : /* for SET of port indexes */
if (len < 2)
goto len_error ;
#ifdef LITTLE_ENDIAN
to[1] = *from++ ;
to[0] = *from++ ;
#else
to[0] = *from++ ;
to[1] = *from++ ;
#endif
to += 2 ;
len -= 2 ;
break ;
case 'F' :
case 'B' :
if (len < 4)
goto len_error ;
len -= 4 ;
to[0] = 0 ;
to[1] = 0 ;
to[2] = 0 ;
to[3] = *from++ ;
to += 4 ;
break ;
case 'C' :
case 'T' :
case 'L' :
if (len < 4)
goto len_error ;
#ifdef LITTLE_ENDIAN
to[3] = *from++ ;
to[2] = *from++ ;
to[1] = *from++ ;
to[0] = *from++ ;
#else
to[0] = *from++ ;
to[1] = *from++ ;
to[2] = *from++ ;
to[3] = *from++ ;
#endif
len -= 4 ;
to += 4 ;
break ;
case '2' : /* PortMacIndicated */
if (len < 4)
goto len_error ;
to[0] = 0 ;
to[1] = 0 ;
to[2] = *from++ ;
to[3] = *from++ ;
len -= 4 ;
to += 4 ;
break ;
case '4' :
if (len < 4)
goto len_error ;
to[0] = *from++ ;
to[1] = *from++ ;
to[2] = *from++ ;
to[3] = *from++ ;
len -= 4 ;
to += 4 ;
break ;
case 'A' :
if (len < 8)
goto len_error ;
to[0] = 0 ;
to[1] = 0 ;
memcpy((char *) to+2,(char *) from,6) ;
to += 8 ;
from += 8 ;
len -= 8 ;
break ;
case '8' :
if (len < 8)
goto len_error ;
memcpy((char *) to,(char *) from,8) ;
to += 8 ;
from += 8 ;
len -= 8 ;
break ;
case 'D' :
if (len < 32)
goto len_error ;
memcpy((char *) to,(char *) from,32) ;
to += 32 ;
from += 32 ;
len -= 32 ;
break ;
case 'P' : /* timestamp is NOT swapped */
if (len < 8)
goto len_error ;
to[0] = *from++ ;
to[1] = *from++ ;
to[2] = *from++ ;
to[3] = *from++ ;
to[4] = *from++ ;
to[5] = *from++ ;
to[6] = *from++ ;
to[7] = *from++ ;
to += 8 ;
len -= 8 ;
break ;
default :
SMT_PANIC(smc,SMT_E0119, SMT_E0119_MSG) ;
break ;
}
}
done:
/*
* make it even (in case of 'I' encoding)
* note: len is DECREMENTED
*/
if (len & 3) {
to[0] = 0 ;
to[1] = 0 ;
to += 4 - (len & 3 ) ;
len = len & ~ 3 ;
}
/* set type and length */
pa->p_type = para ;
pa->p_len = plen - len - PARA_LEN ;
/* return values */
pcon->pc_p = (void *) to ;
pcon->pc_len = len ;
return ;
sp_done:
len -= sp_len ;
to += sp_len ;
goto done ;
len_error:
/* parameter does not fit in frame */
pcon->pc_err = SMT_RDF_TOOLONG ;
return ;
wrong_error:
pcon->pc_err = SMT_RDF_LENGTH ;
}
/*
* set parameter
*/
static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
int local, int set)
{
#define IFSET(x) if (set) (x)
const struct s_p_tab *pt ;
int len ;
char *from ;
char *to ;
const char *swap ;
char c ;
char *mib_addr ;
struct fddi_mib *mib ;
struct fddi_mib_m *mib_m = NULL;
struct fddi_mib_a *mib_a = NULL;
struct fddi_mib_p *mib_p = NULL;
int mac ;
int path ;
int port ;
SK_LOC_DECL(u_char,byte_val) ;
SK_LOC_DECL(u_short,word_val) ;
SK_LOC_DECL(u_long,long_val) ;
mac = index - INDEX_MAC ;
path = index - INDEX_PATH ;
port = index - INDEX_PORT ;
len = pa->p_len ;
from = (char *) (pa + 1 ) ;
mib = &smc->mib ;
switch (pa->p_type & 0xf000) {
case 0x1000 :
default :
mib_addr = (char *) mib ;
break ;
case 0x2000 :
if (mac < 0 || mac >= NUMMACS) {
return SMT_RDF_NOPARAM;
}
mib_m = &smc->mib.m[mac] ;
mib_addr = (char *) mib_m ;
from += 4 ; /* skip index */
len -= 4 ;
break ;
case 0x3000 :
if (path < 0 || path >= NUMPATHS) {
return SMT_RDF_NOPARAM;
}
mib_a = &smc->mib.a[path] ;
mib_addr = (char *) mib_a ;
from += 4 ; /* skip index */
len -= 4 ;
break ;
case 0x4000 :
if (port < 0 || port >= smt_mib_phys(smc)) {
return SMT_RDF_NOPARAM;
}
mib_p = &smc->mib.p[port_to_mib(smc,port)] ;
mib_addr = (char *) mib_p ;
from += 4 ; /* skip index */
len -= 4 ;
break ;
}
switch (pa->p_type) {
case SMT_P10F0 :
case SMT_P10F1 :
#ifdef ESS
case SMT_P10F2 :
case SMT_P10F3 :
case SMT_P10F4 :
case SMT_P10F5 :
case SMT_P10F6 :
case SMT_P10F7 :
#endif
#ifdef SBA
case SMT_P10F8 :
case SMT_P10F9 :
#endif
case SMT_P20F1 :
if (!local)
return SMT_RDF_NOPARAM;
break ;
}
pt = smt_get_ptab(pa->p_type) ;
if (!pt)
return (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM :
SMT_RDF_ILLEGAL;
switch (pt->p_access) {
case AC_GR :
case AC_S :
break ;
default :
return SMT_RDF_ILLEGAL;
}
to = mib_addr + pt->p_offset ;
swap = pt->p_swap ; /* pointer to swap string */
while (swap && (c = *swap++)) {
switch(c) {
case 'b' :
to = (char *) &byte_val ;
break ;
case 'w' :
to = (char *) &word_val ;
break ;
case 'l' :
to = (char *) &long_val ;
break ;
case 'S' :
case 'E' :
case 'R' :
case 'r' :
if (len < 4) {
goto len_error ;
}
if (from[0] | from[1])
goto val_error ;
#ifdef LITTLE_ENDIAN
if (c == 'r') {
to[0] = from[2] ;
to[1] = from[3] ;
}
else {
to[1] = from[2] ;
to[0] = from[3] ;
}
#else
to[0] = from[2] ;
to[1] = from[3] ;
#endif
from += 4 ;
to += 2 ;
len -= 4 ;
break ;
case 'F' :
case 'B' :
if (len < 4) {
goto len_error ;
}
if (from[0] | from[1] | from[2])
goto val_error ;
to[0] = from[3] ;
len -= 4 ;
from += 4 ;
to += 4 ;
break ;
case 'C' :
case 'T' :
case 'L' :
if (len < 4) {
goto len_error ;
}
#ifdef LITTLE_ENDIAN
to[3] = *from++ ;
to[2] = *from++ ;
to[1] = *from++ ;
to[0] = *from++ ;
#else
to[0] = *from++ ;
to[1] = *from++ ;
to[2] = *from++ ;
to[3] = *from++ ;
#endif
len -= 4 ;
to += 4 ;
break ;
case 'A' :
if (len < 8)
goto len_error ;
if (set)
memcpy(to,from+2,6) ;
to += 8 ;
from += 8 ;
len -= 8 ;
break ;
case '4' :
if (len < 4)
goto len_error ;
if (set)
memcpy(to,from,4) ;
to += 4 ;
from += 4 ;
len -= 4 ;
break ;
case '8' :
if (len < 8)
goto len_error ;
if (set)
memcpy(to,from,8) ;
to += 8 ;
from += 8 ;
len -= 8 ;
break ;
case 'D' :
if (len < 32)
goto len_error ;
if (set)
memcpy(to,from,32) ;
to += 32 ;
from += 32 ;
len -= 32 ;
break ;
case 'P' : /* timestamp is NOT swapped */
if (set) {
to[0] = *from++ ;
to[1] = *from++ ;
to[2] = *from++ ;
to[3] = *from++ ;
to[4] = *from++ ;
to[5] = *from++ ;
to[6] = *from++ ;
to[7] = *from++ ;
}
to += 8 ;
len -= 8 ;
break ;
default :
SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ;
return SMT_RDF_ILLEGAL;
}
}
/*
* actions and internal updates
*/
switch (pa->p_type) {
case SMT_P101A: /* fddiSMTConfigPolicy */
if (word_val & ~1)
goto val_error ;
IFSET(mib->fddiSMTConfigPolicy = word_val) ;
break ;
case SMT_P101B : /* fddiSMTConnectionPolicy */
if (!(word_val & POLICY_MM))
goto val_error ;
IFSET(mib->fddiSMTConnectionPolicy = word_val) ;
break ;
case SMT_P101D : /* fddiSMTTT_Notify */
if (word_val < 2 || word_val > 30)
goto val_error ;
IFSET(mib->fddiSMTTT_Notify = word_val) ;
break ;
case SMT_P101E : /* fddiSMTStatRptPolicy */
if (byte_val & ~1)
goto val_error ;
IFSET(mib->fddiSMTStatRptPolicy = byte_val) ;
break ;
case SMT_P101F : /* fddiSMTTrace_MaxExpiration */
/*
* note: lower limit trace_max = 6.001773... s
* NO upper limit
*/
if (long_val < (long)0x478bf51L)
goto val_error ;
IFSET(mib->fddiSMTTrace_MaxExpiration = long_val) ;
break ;
#ifdef ESS
case SMT_P10F2 : /* fddiESSPayload */
if (long_val > 1562)
goto val_error ;
if (set && smc->mib.fddiESSPayload != long_val) {
smc->ess.raf_act_timer_poll = TRUE ;
smc->mib.fddiESSPayload = long_val ;
}
break ;
case SMT_P10F3 : /* fddiESSOverhead */
if (long_val < 50 || long_val > 5000)
goto val_error ;
if (set && smc->mib.fddiESSPayload &&
smc->mib.fddiESSOverhead != long_val) {
smc->ess.raf_act_timer_poll = TRUE ;
smc->mib.fddiESSOverhead = long_val ;
}
break ;
case SMT_P10F4 : /* fddiESSMaxTNeg */
if (long_val > -MS2BCLK(5) || long_val < -MS2BCLK(165))
goto val_error ;
IFSET(mib->fddiESSMaxTNeg = long_val) ;
break ;
case SMT_P10F5 : /* fddiESSMinSegmentSize */
if (long_val < 1 || long_val > 4478)
goto val_error ;
IFSET(mib->fddiESSMinSegmentSize = long_val) ;
break ;
case SMT_P10F6 : /* fddiESSCategory */
if ((long_val & 0xffff) != 1)
goto val_error ;
IFSET(mib->fddiESSCategory = long_val) ;
break ;
case SMT_P10F7 : /* fddiESSSyncTxMode */
if (word_val > 1)
goto val_error ;
IFSET(mib->fddiESSSynchTxMode = word_val) ;
break ;
#endif
#ifdef SBA
case SMT_P10F8 : /* fddiSBACommand */
if (byte_val != SB_STOP && byte_val != SB_START)
goto val_error ;
IFSET(mib->fddiSBACommand = byte_val) ;
break ;
case SMT_P10F9 : /* fddiSBAAvailable */
if (byte_val > 100)
goto val_error ;
IFSET(mib->fddiSBAAvailable = byte_val) ;
break ;
#endif
case SMT_P2020 : /* fddiMACRequestedPaths */
if ((word_val & (MIB_P_PATH_PRIM_PREFER |
MIB_P_PATH_PRIM_ALTER)) == 0 )
goto val_error ;
IFSET(mib_m->fddiMACRequestedPaths = word_val) ;
break ;
case SMT_P205F : /* fddiMACFrameErrorThreshold */
/* 0 .. ffff acceptable */
IFSET(mib_m->fddiMACFrameErrorThreshold = word_val) ;
break ;
case SMT_P2067 : /* fddiMACNotCopiedThreshold */
/* 0 .. ffff acceptable */
IFSET(mib_m->fddiMACNotCopiedThreshold = word_val) ;
break ;
case SMT_P2076: /* fddiMACMA_UnitdataEnable */
if (byte_val & ~1)
goto val_error ;
if (set) {
mib_m->fddiMACMA_UnitdataEnable = byte_val ;
queue_event(smc,EVENT_RMT,RM_ENABLE_FLAG) ;
}
break ;
case SMT_P20F1 : /* fddiMACT_Min */
IFSET(mib_m->fddiMACT_Min = long_val) ;
break ;
case SMT_P320F :
if (long_val > 1562)
goto val_error ;
IFSET(mib_a->fddiPATHSbaPayload = long_val) ;
#ifdef ESS
if (set)
ess_para_change(smc) ;
#endif
break ;
case SMT_P3210 :
if (long_val > 5000)
goto val_error ;
if (long_val != 0 && mib_a->fddiPATHSbaPayload == 0)
goto val_error ;
IFSET(mib_a->fddiPATHSbaOverhead = long_val) ;
#ifdef ESS
if (set)
ess_para_change(smc) ;
#endif
break ;
case SMT_P3213: /* fddiPATHT_Rmode */
/* no limit :
* 0 .. 343.597 => 0 .. 2e32 * 80nS
*/
if (set) {
mib_a->fddiPATHT_Rmode = long_val ;
rtm_set_timer(smc) ;
}
break ;
case SMT_P3214 : /* fddiPATHSbaAvailable */
if (long_val > 0x00BEBC20L)
goto val_error ;
#ifdef SBA
if (set && mib->fddiSBACommand == SB_STOP)
goto val_error ;
#endif
IFSET(mib_a->fddiPATHSbaAvailable = long_val) ;
break ;
case SMT_P3215 : /* fddiPATHTVXLowerBound */
IFSET(mib_a->fddiPATHTVXLowerBound = long_val) ;
goto change_mac_para ;
case SMT_P3216 : /* fddiPATHT_MaxLowerBound */
IFSET(mib_a->fddiPATHT_MaxLowerBound = long_val) ;
goto change_mac_para ;
case SMT_P3217 : /* fddiPATHMaxT_Req */
IFSET(mib_a->fddiPATHMaxT_Req = long_val) ;
change_mac_para:
if (set && smt_set_mac_opvalues(smc)) {
RS_SET(smc,RS_EVENT) ;
smc->sm.please_reconnect = 1 ;
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
}
break ;
case SMT_P400E : /* fddiPORTConnectionPolicies */
if (byte_val > 1)
goto val_error ;
IFSET(mib_p->fddiPORTConnectionPolicies = byte_val) ;
break ;
case SMT_P4011 : /* fddiPORTRequestedPaths */
/* all 3*8 bits allowed */
IFSET(memcpy((char *)mib_p->fddiPORTRequestedPaths,
(char *)&long_val,4)) ;
break ;
case SMT_P401F: /* fddiPORTMaint_LS */
if (word_val > 4)
goto val_error ;
IFSET(mib_p->fddiPORTMaint_LS = word_val) ;
break ;
case SMT_P403A : /* fddiPORTLer_Cutoff */
if (byte_val < 4 || byte_val > 15)
goto val_error ;
IFSET(mib_p->fddiPORTLer_Cutoff = byte_val) ;
break ;
case SMT_P403B : /* fddiPORTLer_Alarm */
if (byte_val < 4 || byte_val > 15)
goto val_error ;
IFSET(mib_p->fddiPORTLer_Alarm = byte_val) ;
break ;
/*
* Actions
*/
case SMT_P103C : /* fddiSMTStationAction */
if (smt_action(smc,SMT_STATION_ACTION, (int) word_val, 0))
goto val_error ;
break ;
case SMT_P4046: /* fddiPORTAction */
if (smt_action(smc,SMT_PORT_ACTION, (int) word_val,
port_to_mib(smc,port)))
goto val_error ;
break ;
default :
break ;
}
return 0;
val_error:
/* parameter value in frame is out of range */
return SMT_RDF_RANGE;
len_error:
/* parameter value in frame is too short */
return SMT_RDF_LENGTH;
#if 0
no_author_error:
/* parameter not setable, because the SBA is not active
* Please note: we give the return code 'not authorizeed
* because SBA denied is not a valid return code in the
* PMF protocol.
*/
return SMT_RDF_AUTHOR;
#endif
}
static const struct s_p_tab *smt_get_ptab(u_short para)
{
const struct s_p_tab *pt ;
for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
;
return pt->p_num ? pt : NULL;
}
static int smt_mib_phys(struct s_smc *smc)
{
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
return NUMPHYS;
#else
if (smc->s.sas == SMT_SAS)
return 1;
return NUMPHYS;
#endif
}
static int port_to_mib(struct s_smc *smc, int p)
{
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
return p;
#else
if (smc->s.sas == SMT_SAS)
return PS;
return p;
#endif
}
#ifdef DEBUG
#ifndef BOOT
void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text)
{
int len ;
struct smt_para *pa ;
char *c ;
int n ;
int nn ;
#ifdef LITTLE_ENDIAN
int smtlen ;
#endif
SK_UNUSED(smc) ;
#ifdef DEBUG_BRD
if (smc->debug.d_smtf < 2)
#else
if (debug.d_smtf < 2)
#endif
return ;
#ifdef LITTLE_ENDIAN
smtlen = sm->smt_len + sizeof(struct smt_header) ;
#endif
printf("SMT Frame [%s]:\nDA ",text) ;
dump_hex((char *) &sm->smt_dest,6) ;
printf("\tSA ") ;
dump_hex((char *) &sm->smt_source,6) ;
printf(" Class %x Type %x Version %x\n",
sm->smt_class,sm->smt_type,sm->smt_version) ;
printf("TID %x\t\tSID ", sm->smt_tid);
dump_hex((char *) &sm->smt_sid,8) ;
printf(" LEN %x\n",sm->smt_len) ;
len = sm->smt_len ;
pa = (struct smt_para *) (sm + 1) ;
while (len > 0 ) {
int plen ;
#ifdef UNIX
printf("TYPE %x LEN %x VALUE\t",pa->p_type,pa->p_len) ;
#else
printf("TYPE %04x LEN %2x VALUE\t",pa->p_type,pa->p_len) ;
#endif
n = pa->p_len ;
if ( (n < 0 ) || (n > (int)(len - PARA_LEN))) {
n = len - PARA_LEN ;
printf(" BAD LENGTH\n") ;
break ;
}
#ifdef LITTLE_ENDIAN
smt_swap_para(sm,smtlen,0) ;
#endif
if (n < 24) {
dump_hex((char *)(pa+1),(int) n) ;
printf("\n") ;
}
else {
int first = 0 ;
c = (char *)(pa+1) ;
dump_hex(c,16) ;
printf("\n") ;
n -= 16 ;
c += 16 ;
while (n > 0) {
nn = (n > 16) ? 16 : n ;
if (n > 64) {
if (first == 0)
printf("\t\t\t...\n") ;
first = 1 ;
}
else {
printf("\t\t\t") ;
dump_hex(c,nn) ;
printf("\n") ;
}
n -= nn ;
c += 16 ;
}
}
#ifdef LITTLE_ENDIAN
smt_swap_para(sm,smtlen,1) ;
#endif
plen = (pa->p_len + PARA_LEN + 3) & ~3 ;
len -= plen ;
pa = (struct smt_para *)((char *)pa + plen) ;
}
printf("-------------------------------------------------\n\n") ;
}
void dump_hex(char *p, int len)
{
int n = 0 ;
while (len--) {
n++ ;
#ifdef UNIX
printf("%x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ;
#else
printf("%02x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ;
#endif
}
}
#endif /* no BOOT */
#endif /* DEBUG */
#endif /* no SLIM_SMT */
| linux-master | drivers/net/fddi/skfp/pmf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
* Timer Driver for FBI board (timer chip 82C54)
*/
/*
* Modifications:
*
* 28-Jun-1994 sw Edit v1.6.
* MCA: Added support for the SK-NET FDDI-FM2 adapter. The
* following functions have been added(+) or modified(*):
* hwt_start(*), hwt_stop(*), hwt_restart(*), hwt_read(*)
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
/*
* Prototypes of local functions.
*/
/* 28-Jun-1994 sw - Note: hwt_restart() is also used in module 'drvfbi.c'. */
/*static void hwt_restart() ; */
/************************
*
* hwt_start
*
* Start hardware timer (clock ticks are 16us).
*
* void hwt_start(
* struct s_smc *smc,
* u_long time) ;
* In
* smc - A pointer to the SMT Context structure.
*
* time - The time in units of 16us to load the timer with.
* Out
* Nothing.
*
************************/
#define HWT_MAX (65000)
void hwt_start(struct s_smc *smc, u_long time)
{
u_short cnt ;
if (time > HWT_MAX)
time = HWT_MAX ;
smc->hw.t_start = time ;
smc->hw.t_stop = 0L ;
cnt = (u_short)time ;
/*
* if time < 16 us
* time = 16 us
*/
if (!cnt)
cnt++ ;
outpd(ADDR(B2_TI_INI), (u_long) cnt * 200) ; /* Load timer value. */
outpw(ADDR(B2_TI_CRTL), TIM_START) ; /* Start timer. */
smc->hw.timer_activ = TRUE ;
}
/************************
*
* hwt_stop
*
* Stop hardware timer.
*
* void hwt_stop(
* struct s_smc *smc) ;
* In
* smc - A pointer to the SMT Context structure.
* Out
* Nothing.
*
************************/
void hwt_stop(struct s_smc *smc)
{
outpw(ADDR(B2_TI_CRTL), TIM_STOP) ;
outpw(ADDR(B2_TI_CRTL), TIM_CL_IRQ) ;
smc->hw.timer_activ = FALSE ;
}
/************************
*
* hwt_init
*
* Initialize hardware timer.
*
* void hwt_init(
* struct s_smc *smc) ;
* In
* smc - A pointer to the SMT Context structure.
* Out
* Nothing.
*
************************/
void hwt_init(struct s_smc *smc)
{
smc->hw.t_start = 0 ;
smc->hw.t_stop = 0 ;
smc->hw.timer_activ = FALSE ;
hwt_restart(smc) ;
}
/************************
*
* hwt_restart
*
* Clear timer interrupt.
*
* void hwt_restart(
* struct s_smc *smc) ;
* In
* smc - A pointer to the SMT Context structure.
* Out
* Nothing.
*
************************/
void hwt_restart(struct s_smc *smc)
{
hwt_stop(smc) ;
}
/************************
*
* hwt_read
*
* Stop hardware timer and read time elapsed since last start.
*
* u_long hwt_read(smc) ;
* In
* smc - A pointer to the SMT Context structure.
* Out
* The elapsed time since last start in units of 16us.
*
************************/
u_long hwt_read(struct s_smc *smc)
{
u_short tr ;
u_long is ;
if (smc->hw.timer_activ) {
hwt_stop(smc) ;
tr = (u_short)((inpd(ADDR(B2_TI_VAL))/200) & 0xffff) ;
is = GET_ISR() ;
/* Check if timer expired (or wraparound). */
if ((tr > smc->hw.t_start) || (is & IS_TIMINT)) {
hwt_restart(smc) ;
smc->hw.t_stop = smc->hw.t_start ;
}
else
smc->hw.t_stop = smc->hw.t_start - tr ;
}
return smc->hw.t_stop;
}
#ifdef PCI
/************************
*
* hwt_quick_read
*
* Stop hardware timer and read timer value and start the timer again.
*
* u_long hwt_read(smc) ;
* In
* smc - A pointer to the SMT Context structure.
* Out
* current timer value in units of 80ns.
*
************************/
u_long hwt_quick_read(struct s_smc *smc)
{
u_long interval ;
u_long time ;
interval = inpd(ADDR(B2_TI_INI)) ;
outpw(ADDR(B2_TI_CRTL), TIM_STOP) ;
time = inpd(ADDR(B2_TI_VAL)) ;
outpd(ADDR(B2_TI_INI),time) ;
outpw(ADDR(B2_TI_CRTL), TIM_START) ;
outpd(ADDR(B2_TI_INI),interval) ;
return time;
}
/************************
*
* hwt_wait_time(smc,start,duration)
*
* This function returnes after the amount of time is elapsed
* since the start time.
*
* para start start time
* duration time to wait
*
* NOTE: The function will return immediately, if the timer is not
* started
************************/
void hwt_wait_time(struct s_smc *smc, u_long start, long int duration)
{
long diff ;
long interval ;
int wrapped ;
/*
* check if timer is running
*/
if (smc->hw.timer_activ == FALSE ||
hwt_quick_read(smc) == hwt_quick_read(smc)) {
return ;
}
interval = inpd(ADDR(B2_TI_INI)) ;
if (interval > duration) {
do {
diff = (long)(start - hwt_quick_read(smc)) ;
if (diff < 0) {
diff += interval ;
}
} while (diff <= duration) ;
}
else {
diff = interval ;
wrapped = 0 ;
do {
if (!wrapped) {
if (hwt_quick_read(smc) >= start) {
diff += interval ;
wrapped = 1 ;
}
}
else {
if (hwt_quick_read(smc) < start) {
wrapped = 0 ;
}
}
} while (diff <= duration) ;
}
}
#endif
| linux-master | drivers/net/fddi/skfp/hwt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/smt_p.h"
#include <linux/bitrev.h>
#include <linux/kernel.h>
#define KERNEL
#include "h/smtstate.h"
/*
* FC in SMbuf
*/
#define m_fc(mb) ((mb)->sm_data[0])
#define SMT_TID_MAGIC 0x1f0a7b3c
static const char *const smt_type_name[] = {
"SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??",
"SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??",
"SMT_08??", "SMT_09??", "SMT_0A??", "SMT_0B??",
"SMT_0C??", "SMT_0D??", "SMT_0E??", "SMT_NSA"
} ;
static const char *const smt_class_name[] = {
"UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF",
"SRF","PMF_GET","PMF_SET","ESF"
} ;
#define LAST_CLASS (SMT_PMF_SET)
static const struct fddi_addr SMT_Unknown = {
{ 0,0,0x1f,0,0,0 }
} ;
/*
* function prototypes
*/
#ifdef LITTLE_ENDIAN
static int smt_swap_short(u_short s);
#endif
static int mac_index(struct s_smc *smc, int mac);
static int phy_index(struct s_smc *smc, int phy);
static int mac_con_resource_index(struct s_smc *smc, int mac);
static int phy_con_resource_index(struct s_smc *smc, int phy);
static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
int local);
static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest,
int fc, u_long tid, int type, int local);
static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc,
u_long tid, int type, int len);
static void smt_echo_test(struct s_smc *smc, int dna);
static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest,
u_long tid, int local);
static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest,
u_long tid, int local);
#ifdef LITTLE_ENDIAN
static void smt_string_swap(char *data, const char *format, int len);
#endif
static void smt_add_frame_len(SMbuf *mb, int len);
static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una);
static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde);
static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state);
static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts);
static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy);
static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency);
static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor);
static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path);
static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st);
static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy);
static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers);
static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc);
static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc);
static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc);
static void smt_fill_manufacturer(struct s_smc *smc,
struct smp_p_manufacturer *man);
static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user);
static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount);
static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed,
int len);
static void smt_clear_una_dna(struct s_smc *smc);
static void smt_clear_old_una_dna(struct s_smc *smc);
#ifdef CONCENTRATOR
static int entity_to_index(void);
#endif
static void update_dac(struct s_smc *smc, int report);
static int div_ratio(u_long upper, u_long lower);
#ifdef USE_CAN_ADDR
static void hwm_conv_can(struct s_smc *smc, char *data, int len);
#else
#define hwm_conv_can(smc,data,len)
#endif
static inline int is_my_addr(const struct s_smc *smc,
const struct fddi_addr *addr)
{
return(*(short *)(&addr->a[0]) ==
*(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[0])
&& *(short *)(&addr->a[2]) ==
*(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[2])
&& *(short *)(&addr->a[4]) ==
*(short *)(&smc->mib.m[MAC0].fddiMACSMTAddress.a[4])) ;
}
static inline int is_broadcast(const struct fddi_addr *addr)
{
return *(u_short *)(&addr->a[0]) == 0xffff &&
*(u_short *)(&addr->a[2]) == 0xffff &&
*(u_short *)(&addr->a[4]) == 0xffff;
}
static inline int is_individual(const struct fddi_addr *addr)
{
return !(addr->a[0] & GROUP_ADDR);
}
static inline int is_equal(const struct fddi_addr *addr1,
const struct fddi_addr *addr2)
{
return *(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) &&
*(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) &&
*(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]);
}
/*
* list of mandatory paras in frames
*/
static const u_short plist_nif[] = { SMT_P_UNA,SMT_P_SDE,SMT_P_STATE,0 } ;
/*
* init SMT agent
*/
void smt_agent_init(struct s_smc *smc)
{
int i ;
/*
* get MAC address
*/
smc->mib.m[MAC0].fddiMACSMTAddress = smc->hw.fddi_home_addr ;
/*
* get OUI address from driver (bia == built-in-address)
*/
smc->mib.fddiSMTStationId.sid_oem[0] = 0 ;
smc->mib.fddiSMTStationId.sid_oem[1] = 0 ;
driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
for (i = 0 ; i < 6 ; i ++) {
smc->mib.fddiSMTStationId.sid_node.a[i] =
bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]);
}
smc->mib.fddiSMTManufacturerData[0] =
smc->mib.fddiSMTStationId.sid_node.a[0] ;
smc->mib.fddiSMTManufacturerData[1] =
smc->mib.fddiSMTStationId.sid_node.a[1] ;
smc->mib.fddiSMTManufacturerData[2] =
smc->mib.fddiSMTStationId.sid_node.a[2] ;
smc->sm.smt_tid = 0 ;
smc->mib.m[MAC0].fddiMACDupAddressTest = DA_NONE ;
smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ;
#ifndef SLIM_SMT
smt_clear_una_dna(smc) ;
smt_clear_old_una_dna(smc) ;
#endif
for (i = 0 ; i < SMT_MAX_TEST ; i++)
smc->sm.pend[i] = 0 ;
smc->sm.please_reconnect = 0 ;
smc->sm.uniq_ticks = 0 ;
}
/*
* SMT task
* forever
* delay 30 seconds
* send NIF
* check tvu & tvd
* end
*/
void smt_agent_task(struct s_smc *smc)
{
smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L,
EV_TOKEN(EVENT_SMT,SM_TIMER)) ;
DB_SMT("SMT agent task");
}
#ifndef SMT_REAL_TOKEN_CT
void smt_emulate_token_ct(struct s_smc *smc, int mac_index)
{
u_long count;
u_long time;
time = smt_get_time();
count = ((time - smc->sm.last_tok_time[mac_index]) *
100)/TICKS_PER_SECOND;
/*
* Only when ring is up we will have a token count. The
* flag is unfortunately a single instance value. This
* doesn't matter now, because we currently have only
* one MAC instance.
*/
if (smc->hw.mac_ring_is_up){
smc->mib.m[mac_index].fddiMACToken_Ct += count;
}
/* Remember current time */
smc->sm.last_tok_time[mac_index] = time;
}
#endif
/*ARGSUSED1*/
void smt_event(struct s_smc *smc, int event)
{
u_long time ;
#ifndef SMT_REAL_TOKEN_CT
int i ;
#endif
if (smc->sm.please_reconnect) {
smc->sm.please_reconnect -- ;
if (smc->sm.please_reconnect == 0) {
/* Counted down */
queue_event(smc,EVENT_ECM,EC_CONNECT) ;
}
}
if (event == SM_FAST)
return ;
/*
* timer for periodic cleanup in driver
* reset and start the watchdog (FM2)
* ESS timer
* SBA timer
*/
smt_timer_poll(smc) ;
smt_start_watchdog(smc) ;
#ifndef SLIM_SMT
#ifndef BOOT
#ifdef ESS
ess_timer_poll(smc) ;
#endif
#endif
#ifdef SBA
sba_timer_poll(smc) ;
#endif
smt_srf_event(smc,0,0,0) ;
#endif /* no SLIM_SMT */
time = smt_get_time() ;
if (time - smc->sm.smt_last_lem >= TICKS_PER_SECOND*8) {
/*
* Use 8 sec. for the time intervall, it simplifies the
* LER estimation.
*/
struct fddi_mib_m *mib ;
u_long upper ;
u_long lower ;
int cond ;
int port;
struct s_phy *phy ;
/*
* calculate LEM bit error rate
*/
sm_lem_evaluate(smc) ;
smc->sm.smt_last_lem = time ;
/*
* check conditions
*/
#ifndef SLIM_SMT
mac_update_counter(smc) ;
mib = smc->mib.m ;
upper =
(mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) +
(mib->fddiMACError_Ct - mib->fddiMACOld_Error_Ct) ;
lower =
(mib->fddiMACFrame_Ct - mib->fddiMACOld_Frame_Ct) +
(mib->fddiMACLost_Ct - mib->fddiMACOld_Lost_Ct) ;
mib->fddiMACFrameErrorRatio = div_ratio(upper,lower) ;
cond =
((!mib->fddiMACFrameErrorThreshold &&
mib->fddiMACError_Ct != mib->fddiMACOld_Error_Ct) ||
(mib->fddiMACFrameErrorRatio >
mib->fddiMACFrameErrorThreshold)) ;
if (cond != mib->fddiMACFrameErrorFlag)
smt_srf_event(smc,SMT_COND_MAC_FRAME_ERROR,
INDEX_MAC,cond) ;
upper =
(mib->fddiMACNotCopied_Ct - mib->fddiMACOld_NotCopied_Ct) ;
lower =
upper +
(mib->fddiMACCopied_Ct - mib->fddiMACOld_Copied_Ct) ;
mib->fddiMACNotCopiedRatio = div_ratio(upper,lower) ;
cond =
((!mib->fddiMACNotCopiedThreshold &&
mib->fddiMACNotCopied_Ct !=
mib->fddiMACOld_NotCopied_Ct)||
(mib->fddiMACNotCopiedRatio >
mib->fddiMACNotCopiedThreshold)) ;
if (cond != mib->fddiMACNotCopiedFlag)
smt_srf_event(smc,SMT_COND_MAC_NOT_COPIED,
INDEX_MAC,cond) ;
/*
* set old values
*/
mib->fddiMACOld_Frame_Ct = mib->fddiMACFrame_Ct ;
mib->fddiMACOld_Copied_Ct = mib->fddiMACCopied_Ct ;
mib->fddiMACOld_Error_Ct = mib->fddiMACError_Ct ;
mib->fddiMACOld_Lost_Ct = mib->fddiMACLost_Ct ;
mib->fddiMACOld_NotCopied_Ct = mib->fddiMACNotCopied_Ct ;
/*
* Check port EBError Condition
*/
for (port = 0; port < NUMPHYS; port ++) {
phy = &smc->y[port] ;
if (!phy->mib->fddiPORTHardwarePresent) {
continue;
}
cond = (phy->mib->fddiPORTEBError_Ct -
phy->mib->fddiPORTOldEBError_Ct > 5) ;
/* If ratio is more than 5 in 8 seconds
* Set the condition.
*/
smt_srf_event(smc,SMT_COND_PORT_EB_ERROR,
(int) (INDEX_PORT+ phy->np) ,cond) ;
/*
* set old values
*/
phy->mib->fddiPORTOldEBError_Ct =
phy->mib->fddiPORTEBError_Ct ;
}
#endif /* no SLIM_SMT */
}
#ifndef SLIM_SMT
if (time - smc->sm.smt_last_notify >= (u_long)
(smc->mib.fddiSMTTT_Notify * TICKS_PER_SECOND) ) {
/*
* we can either send an announcement or a request
* a request will trigger a reply so that we can update
* our dna
* note: same tid must be used until reply is received
*/
if (!smc->sm.pend[SMT_TID_NIF])
smc->sm.pend[SMT_TID_NIF] = smt_get_tid(smc) ;
smt_send_nif(smc,&fddi_broadcast, FC_SMT_NSA,
smc->sm.pend[SMT_TID_NIF], SMT_REQUEST,0) ;
smc->sm.smt_last_notify = time ;
}
/*
* check timer
*/
if (smc->sm.smt_tvu &&
time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) {
DB_SMT("SMT : UNA expired");
smc->sm.smt_tvu = 0 ;
if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr,
&SMT_Unknown)){
/* Do not update unknown address */
smc->mib.m[MAC0].fddiMACOldUpstreamNbr=
smc->mib.m[MAC0].fddiMACUpstreamNbr ;
}
smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ;
smc->mib.m[MAC0].fddiMACUNDA_Flag = FALSE ;
/*
* Make sure the fddiMACUNDA_Flag = FALSE is
* included in the SRF so we don't generate
* a separate SRF for the deassertion of this
* condition
*/
update_dac(smc,0) ;
smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE,
INDEX_MAC,0) ;
}
if (smc->sm.smt_tvd &&
time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) {
DB_SMT("SMT : DNA expired");
smc->sm.smt_tvd = 0 ;
if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr,
&SMT_Unknown)){
/* Do not update unknown address */
smc->mib.m[MAC0].fddiMACOldDownstreamNbr=
smc->mib.m[MAC0].fddiMACDownstreamNbr ;
}
smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ;
smt_srf_event(smc, SMT_EVENT_MAC_NEIGHBOR_CHANGE,
INDEX_MAC,0) ;
}
#endif /* no SLIM_SMT */
#ifndef SMT_REAL_TOKEN_CT
/*
* Token counter emulation section. If hardware supports the token
* count, the token counter will be updated in mac_update_counter.
*/
for (i = MAC0; i < NUMMACS; i++ ){
if (time - smc->sm.last_tok_time[i] > 2*TICKS_PER_SECOND ){
smt_emulate_token_ct( smc, i );
}
}
#endif
smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L,
EV_TOKEN(EVENT_SMT,SM_TIMER)) ;
}
static int div_ratio(u_long upper, u_long lower)
{
if ((upper<<16L) < upper)
upper = 0xffff0000L ;
else
upper <<= 16L ;
if (!lower)
return 0;
return (int)(upper/lower) ;
}
#ifndef SLIM_SMT
/*
* receive packet handler
*/
void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
/* int fs; frame status */
{
struct smt_header *sm ;
int local ;
int illegal = 0 ;
switch (m_fc(mb)) {
case FC_SMT_INFO :
case FC_SMT_LAN_LOC :
case FC_SMT_LOC :
case FC_SMT_NSA :
break ;
default :
smt_free_mbuf(smc,mb) ;
return ;
}
smc->mib.m[MAC0].fddiMACSMTCopied_Ct++ ;
sm = smtod(mb,struct smt_header *) ;
local = ((fs & L_INDICATOR) != 0) ;
hwm_conv_can(smc,(char *)sm,12) ;
/* check destination address */
if (is_individual(&sm->smt_dest) && !is_my_addr(smc,&sm->smt_dest)) {
smt_free_mbuf(smc,mb) ;
return ;
}
#if 0 /* for DUP recognition, do NOT filter them */
/* ignore loop back packets */
if (is_my_addr(smc,&sm->smt_source) && !local) {
smt_free_mbuf(smc,mb) ;
return ;
}
#endif
smt_swap_para(sm,(int) mb->sm_len,1) ;
DB_SMT("SMT : received packet [%s] at 0x%p",
smt_type_name[m_fc(mb) & 0xf], sm);
DB_SMT("SMT : version %d, class %s",
sm->smt_version,
smt_class_name[sm->smt_class > LAST_CLASS ? 0 : sm->smt_class]);
#ifdef SBA
/*
* check if NSA frame
*/
if (m_fc(mb) == FC_SMT_NSA && sm->smt_class == SMT_NIF &&
(sm->smt_type == SMT_ANNOUNCE || sm->smt_type == SMT_REQUEST)) {
smc->sba.sm = sm ;
sba(smc,NIF) ;
}
#endif
/*
* ignore any packet with NSA and A-indicator set
*/
if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) {
DB_SMT("SMT : ignoring NSA with A-indicator set from %pM",
&sm->smt_source);
smt_free_mbuf(smc,mb) ;
return ;
}
/*
* ignore frames with illegal length
*/
if (((sm->smt_class == SMT_ECF) && (sm->smt_len > SMT_MAX_ECHO_LEN)) ||
((sm->smt_class != SMT_ECF) && (sm->smt_len > SMT_MAX_INFO_LEN))) {
smt_free_mbuf(smc,mb) ;
return ;
}
/*
* check SMT version
*/
switch (sm->smt_class) {
case SMT_NIF :
case SMT_SIF_CONFIG :
case SMT_SIF_OPER :
case SMT_ECF :
if (sm->smt_version != SMT_VID)
illegal = 1;
break ;
default :
if (sm->smt_version != SMT_VID_2)
illegal = 1;
break ;
}
if (illegal) {
DB_SMT("SMT : version = %d, dest = %pM",
sm->smt_version, &sm->smt_source);
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ;
smt_free_mbuf(smc,mb) ;
return ;
}
if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) ||
((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) {
DB_SMT("SMT: info length error, len = %d", sm->smt_len);
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ;
smt_free_mbuf(smc,mb) ;
return ;
}
switch (sm->smt_class) {
case SMT_NIF :
if (smt_check_para(smc,sm,plist_nif)) {
DB_SMT("SMT: NIF with para problem, ignoring");
break ;
}
switch (sm->smt_type) {
case SMT_ANNOUNCE :
case SMT_REQUEST :
if (!(fs & C_INDICATOR) && m_fc(mb) == FC_SMT_NSA
&& is_broadcast(&sm->smt_dest)) {
struct smt_p_state *st ;
/* set my UNA */
if (!is_equal(
&smc->mib.m[MAC0].fddiMACUpstreamNbr,
&sm->smt_source)) {
DB_SMT("SMT : updated my UNA = %pM",
&sm->smt_source);
if (!is_equal(&smc->mib.m[MAC0].
fddiMACUpstreamNbr,&SMT_Unknown)){
/* Do not update unknown address */
smc->mib.m[MAC0].fddiMACOldUpstreamNbr=
smc->mib.m[MAC0].fddiMACUpstreamNbr ;
}
smc->mib.m[MAC0].fddiMACUpstreamNbr =
sm->smt_source ;
smt_srf_event(smc,
SMT_EVENT_MAC_NEIGHBOR_CHANGE,
INDEX_MAC,0) ;
smt_echo_test(smc,0) ;
}
smc->sm.smt_tvu = smt_get_time() ;
st = (struct smt_p_state *)
sm_to_para(smc,sm,SMT_P_STATE) ;
if (st) {
smc->mib.m[MAC0].fddiMACUNDA_Flag =
(st->st_dupl_addr & SMT_ST_MY_DUPA) ?
TRUE : FALSE ;
update_dac(smc,1) ;
}
}
if ((sm->smt_type == SMT_REQUEST) &&
is_individual(&sm->smt_source) &&
((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) ||
(m_fc(mb) != FC_SMT_NSA))) {
DB_SMT("SMT : replying to NIF request %pM",
&sm->smt_source);
smt_send_nif(smc,&sm->smt_source,
FC_SMT_INFO,
sm->smt_tid,
SMT_REPLY,local) ;
}
break ;
case SMT_REPLY :
DB_SMT("SMT : received NIF response from %pM",
&sm->smt_source);
if (fs & A_INDICATOR) {
smc->sm.pend[SMT_TID_NIF] = 0 ;
DB_SMT("SMT : duplicate address");
smc->mib.m[MAC0].fddiMACDupAddressTest =
DA_FAILED ;
smc->r.dup_addr_test = DA_FAILED ;
queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
smc->mib.m[MAC0].fddiMACDA_Flag = TRUE ;
update_dac(smc,1) ;
break ;
}
if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF]) {
smc->sm.pend[SMT_TID_NIF] = 0 ;
/* set my DNA */
if (!is_equal(
&smc->mib.m[MAC0].fddiMACDownstreamNbr,
&sm->smt_source)) {
DB_SMT("SMT : updated my DNA");
if (!is_equal(&smc->mib.m[MAC0].
fddiMACDownstreamNbr, &SMT_Unknown)){
/* Do not update unknown address */
smc->mib.m[MAC0].fddiMACOldDownstreamNbr =
smc->mib.m[MAC0].fddiMACDownstreamNbr ;
}
smc->mib.m[MAC0].fddiMACDownstreamNbr =
sm->smt_source ;
smt_srf_event(smc,
SMT_EVENT_MAC_NEIGHBOR_CHANGE,
INDEX_MAC,0) ;
smt_echo_test(smc,1) ;
}
smc->mib.m[MAC0].fddiMACDA_Flag = FALSE ;
update_dac(smc,1) ;
smc->sm.smt_tvd = smt_get_time() ;
smc->mib.m[MAC0].fddiMACDupAddressTest =
DA_PASSED ;
if (smc->r.dup_addr_test != DA_PASSED) {
smc->r.dup_addr_test = DA_PASSED ;
queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
}
}
else if (sm->smt_tid ==
smc->sm.pend[SMT_TID_NIF_TEST]) {
DB_SMT("SMT : NIF test TID ok");
}
else {
DB_SMT("SMT : expected TID %lx, got %x",
smc->sm.pend[SMT_TID_NIF], sm->smt_tid);
}
break ;
default :
illegal = 2 ;
break ;
}
break ;
case SMT_SIF_CONFIG : /* station information */
if (sm->smt_type != SMT_REQUEST)
break ;
DB_SMT("SMT : replying to SIF Config request from %pM",
&sm->smt_source);
smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ;
break ;
case SMT_SIF_OPER : /* station information */
if (sm->smt_type != SMT_REQUEST)
break ;
DB_SMT("SMT : replying to SIF Operation request from %pM",
&sm->smt_source);
smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ;
break ;
case SMT_ECF : /* echo frame */
switch (sm->smt_type) {
case SMT_REPLY :
smc->mib.priv.fddiPRIVECF_Reply_Rx++ ;
DB_SMT("SMT: received ECF reply from %pM",
&sm->smt_source);
if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) {
DB_SMT("SMT: ECHODATA missing");
break ;
}
if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) {
DB_SMT("SMT : ECF test TID ok");
}
else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) {
DB_SMT("SMT : ECF test UNA ok");
}
else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) {
DB_SMT("SMT : ECF test DNA ok");
}
else {
DB_SMT("SMT : expected TID %lx, got %x",
smc->sm.pend[SMT_TID_ECF],
sm->smt_tid);
}
break ;
case SMT_REQUEST :
smc->mib.priv.fddiPRIVECF_Req_Rx++ ;
{
if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) {
DB_SMT("SMT: ECF with para problem,sending RDF");
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,
local) ;
break ;
}
DB_SMT("SMT - sending ECF reply to %pM",
&sm->smt_source);
/* set destination addr. & reply */
sm->smt_dest = sm->smt_source ;
sm->smt_type = SMT_REPLY ;
dump_smt(smc,sm,"ECF REPLY") ;
smc->mib.priv.fddiPRIVECF_Reply_Tx++ ;
smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
return ; /* DON'T free mbuf */
}
default :
illegal = 1 ;
break ;
}
break ;
#ifndef BOOT
case SMT_RAF : /* resource allocation */
#ifdef ESS
DB_ESSN(2, "ESS: RAF frame received");
fs = ess_raf_received_pack(smc,mb,sm,fs) ;
#endif
#ifdef SBA
DB_SBAN(2, "SBA: RAF frame received") ;
sba_raf_received_pack(smc,sm,fs) ;
#endif
break ;
case SMT_RDF : /* request denied */
smc->mib.priv.fddiPRIVRDF_Rx++ ;
break ;
case SMT_ESF : /* extended service - not supported */
if (sm->smt_type == SMT_REQUEST) {
DB_SMT("SMT - received ESF, sending RDF");
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
}
break ;
case SMT_PMF_GET :
case SMT_PMF_SET :
if (sm->smt_type != SMT_REQUEST)
break ;
/* update statistics */
if (sm->smt_class == SMT_PMF_GET)
smc->mib.priv.fddiPRIVPMF_Get_Rx++ ;
else
smc->mib.priv.fddiPRIVPMF_Set_Rx++ ;
/*
* ignore PMF SET with I/G set
*/
if ((sm->smt_class == SMT_PMF_SET) &&
!is_individual(&sm->smt_dest)) {
DB_SMT("SMT: ignoring PMF-SET with I/G set");
break ;
}
smt_pmf_received_pack(smc,mb, local) ;
break ;
case SMT_SRF :
dump_smt(smc,sm,"SRF received") ;
break ;
default :
if (sm->smt_type != SMT_REQUEST)
break ;
/*
* For frames with unknown class:
* we need to send a RDF frame according to 8.1.3.1.1,
* only if it is a REQUEST.
*/
DB_SMT("SMT : class = %d, send RDF to %pM",
sm->smt_class, &sm->smt_source);
smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
break ;
#endif
}
if (illegal) {
DB_SMT("SMT: discarding invalid frame, reason = %d", illegal);
}
smt_free_mbuf(smc,mb) ;
}
static void update_dac(struct s_smc *smc, int report)
{
int cond ;
cond = ( smc->mib.m[MAC0].fddiMACUNDA_Flag |
smc->mib.m[MAC0].fddiMACDA_Flag) != 0 ;
if (report && (cond != smc->mib.m[MAC0].fddiMACDuplicateAddressCond))
smt_srf_event(smc, SMT_COND_MAC_DUP_ADDR,INDEX_MAC,cond) ;
else
smc->mib.m[MAC0].fddiMACDuplicateAddressCond = cond ;
}
/*
* send SMT frame
* set source address
* set station ID
* send frame
*/
void smt_send_frame(struct s_smc *smc, SMbuf *mb, int fc, int local)
/* SMbuf *mb; buffer to send */
/* int fc; FC value */
{
struct smt_header *sm ;
if (!smc->r.sm_ma_avail && !local) {
smt_free_mbuf(smc,mb) ;
return ;
}
sm = smtod(mb,struct smt_header *) ;
sm->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ;
sm->smt_sid = smc->mib.fddiSMTStationId ;
smt_swap_para(sm,(int) mb->sm_len,0) ; /* swap para & header */
hwm_conv_can(smc,(char *)sm,12) ; /* convert SA and DA */
smc->mib.m[MAC0].fddiMACSMTTransmit_Ct++ ;
smt_send_mbuf(smc,mb,local ? FC_SMT_LOC : fc) ;
}
/*
* generate and send RDF
*/
static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
int local)
/* SMbuf *rej; mbuf of offending frame */
/* int fc; FC of denied frame */
/* int reason; reason code */
{
SMbuf *mb ;
struct smt_header *sm ; /* header of offending frame */
struct smt_rdf *rdf ;
int len ;
int frame_len ;
sm = smtod(rej,struct smt_header *) ;
if (sm->smt_type != SMT_REQUEST)
return ;
DB_SMT("SMT: sending RDF to %pM,reason = 0x%x",
&sm->smt_source, reason);
/*
* note: get framelength from MAC length, NOT from SMT header
* smt header length is included in sm_len
*/
frame_len = rej->sm_len ;
if (!(mb=smt_build_frame(smc,SMT_RDF,SMT_REPLY,sizeof(struct smt_rdf))))
return ;
rdf = smtod(mb,struct smt_rdf *) ;
rdf->smt.smt_tid = sm->smt_tid ; /* use TID from sm */
rdf->smt.smt_dest = sm->smt_source ; /* set dest = source */
/* set P12 */
rdf->reason.para.p_type = SMT_P_REASON ;
rdf->reason.para.p_len = sizeof(struct smt_p_reason) - PARA_LEN ;
rdf->reason.rdf_reason = reason ;
/* set P14 */
rdf->version.para.p_type = SMT_P_VERSION ;
rdf->version.para.p_len = sizeof(struct smt_p_version) - PARA_LEN ;
rdf->version.v_pad = 0 ;
rdf->version.v_n = 1 ;
rdf->version.v_index = 1 ;
rdf->version.v_version[0] = SMT_VID_2 ;
rdf->version.v_pad2 = 0 ;
/* set P13 */
if ((unsigned int) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
2*sizeof(struct smt_header))
len = frame_len ;
else
len = SMT_MAX_INFO_LEN - sizeof(*rdf) +
2*sizeof(struct smt_header) ;
/* make length multiple of 4 */
len &= ~3 ;
rdf->refused.para.p_type = SMT_P_REFUSED ;
/* length of para is smt_frame + ref_fc */
rdf->refused.para.p_len = len + 4 ;
rdf->refused.ref_fc = fc ;
/* swap it back */
smt_swap_para(sm,frame_len,0) ;
memcpy((char *) &rdf->refused.ref_header,(char *) sm,len) ;
len -= sizeof(struct smt_header) ;
mb->sm_len += len ;
rdf->smt.smt_len += len ;
dump_smt(smc,(struct smt_header *)rdf,"RDF") ;
smc->mib.priv.fddiPRIVRDF_Tx++ ;
smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
}
/*
* generate and send NIF
*/
static void smt_send_nif(struct s_smc *smc, const struct fddi_addr *dest,
int fc, u_long tid, int type, int local)
/* struct fddi_addr *dest; dest address */
/* int fc; frame control */
/* u_long tid; transaction id */
/* int type; frame type */
{
struct smt_nif *nif ;
SMbuf *mb ;
if (!(mb = smt_build_frame(smc,SMT_NIF,type,sizeof(struct smt_nif))))
return ;
nif = smtod(mb, struct smt_nif *) ;
smt_fill_una(smc,&nif->una) ; /* set UNA */
smt_fill_sde(smc,&nif->sde) ; /* set station descriptor */
smt_fill_state(smc,&nif->state) ; /* set state information */
#ifdef SMT6_10
smt_fill_fsc(smc,&nif->fsc) ; /* set frame status cap. */
#endif
nif->smt.smt_dest = *dest ; /* destination address */
nif->smt.smt_tid = tid ; /* transaction ID */
dump_smt(smc,(struct smt_header *)nif,"NIF") ;
smt_send_frame(smc,mb,fc,local) ;
}
#ifdef DEBUG
/*
* send NIF request (test purpose)
*/
static void smt_send_nif_request(struct s_smc *smc, struct fddi_addr *dest)
{
smc->sm.pend[SMT_TID_NIF_TEST] = smt_get_tid(smc) ;
smt_send_nif(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_NIF_TEST],
SMT_REQUEST,0) ;
}
/*
* send ECF request (test purpose)
*/
static void smt_send_ecf_request(struct s_smc *smc, struct fddi_addr *dest,
int len)
{
smc->sm.pend[SMT_TID_ECF] = smt_get_tid(smc) ;
smt_send_ecf(smc,dest, FC_SMT_INFO, smc->sm.pend[SMT_TID_ECF],
SMT_REQUEST,len) ;
}
#endif
/*
* echo test
*/
static void smt_echo_test(struct s_smc *smc, int dna)
{
u_long tid ;
smc->sm.pend[dna ? SMT_TID_ECF_DNA : SMT_TID_ECF_UNA] =
tid = smt_get_tid(smc) ;
smt_send_ecf(smc, dna ?
&smc->mib.m[MAC0].fddiMACDownstreamNbr :
&smc->mib.m[MAC0].fddiMACUpstreamNbr,
FC_SMT_INFO,tid, SMT_REQUEST, (SMT_TEST_ECHO_LEN & ~3)-8) ;
}
/*
* generate and send ECF
*/
static void smt_send_ecf(struct s_smc *smc, struct fddi_addr *dest, int fc,
u_long tid, int type, int len)
/* struct fddi_addr *dest; dest address */
/* int fc; frame control */
/* u_long tid; transaction id */
/* int type; frame type */
/* int len; frame length */
{
struct smt_ecf *ecf ;
SMbuf *mb ;
if (!(mb = smt_build_frame(smc,SMT_ECF,type,SMT_ECF_LEN + len)))
return ;
ecf = smtod(mb, struct smt_ecf *) ;
smt_fill_echo(smc,&ecf->ec_echo,tid,len) ; /* set ECHO */
ecf->smt.smt_dest = *dest ; /* destination address */
ecf->smt.smt_tid = tid ; /* transaction ID */
smc->mib.priv.fddiPRIVECF_Req_Tx++ ;
smt_send_frame(smc,mb,fc,0) ;
}
/*
* generate and send SIF config response
*/
static void smt_send_sif_config(struct s_smc *smc, struct fddi_addr *dest,
u_long tid, int local)
/* struct fddi_addr *dest; dest address */
/* u_long tid; transaction id */
{
struct smt_sif_config *sif ;
SMbuf *mb ;
int len ;
if (!(mb = smt_build_frame(smc,SMT_SIF_CONFIG,SMT_REPLY,
SIZEOF_SMT_SIF_CONFIG)))
return ;
sif = smtod(mb, struct smt_sif_config *) ;
smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */
smt_fill_sde(smc,&sif->sde) ; /* set station descriptor */
smt_fill_version(smc,&sif->version) ; /* set version information */
smt_fill_state(smc,&sif->state) ; /* set state information */
smt_fill_policy(smc,&sif->policy) ; /* set station policy */
smt_fill_latency(smc,&sif->latency); /* set station latency */
smt_fill_neighbor(smc,&sif->neighbor); /* set station neighbor */
smt_fill_setcount(smc,&sif->setcount) ; /* set count */
len = smt_fill_path(smc,&sif->path); /* set station path descriptor*/
sif->smt.smt_dest = *dest ; /* destination address */
sif->smt.smt_tid = tid ; /* transaction ID */
smt_add_frame_len(mb,len) ; /* adjust length fields */
dump_smt(smc,(struct smt_header *)sif,"SIF Configuration Reply") ;
smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
}
/*
* generate and send SIF operation response
*/
static void smt_send_sif_operation(struct s_smc *smc, struct fddi_addr *dest,
u_long tid, int local)
/* struct fddi_addr *dest; dest address */
/* u_long tid; transaction id */
{
struct smt_sif_operation *sif ;
SMbuf *mb ;
int ports ;
int i ;
ports = NUMPHYS ;
#ifndef CONCENTRATOR
if (smc->s.sas == SMT_SAS)
ports = 1 ;
#endif
if (!(mb = smt_build_frame(smc,SMT_SIF_OPER,SMT_REPLY,
struct_size(sif, lem, ports))))
return ;
sif = smtod(mb, typeof(sif));
smt_fill_timestamp(smc,&sif->ts) ; /* set time stamp */
smt_fill_mac_status(smc,&sif->status) ; /* set mac status */
smt_fill_mac_counter(smc,&sif->mc) ; /* set mac counter field */
smt_fill_mac_fnc(smc,&sif->fnc) ; /* set frame not copied counter */
smt_fill_manufacturer(smc,&sif->man) ; /* set manufacturer field */
smt_fill_user(smc,&sif->user) ; /* set user field */
smt_fill_setcount(smc,&sif->setcount) ; /* set count */
/*
* set link error mon information
*/
if (ports == 1) {
smt_fill_lem(smc,sif->lem,PS) ;
}
else {
for (i = 0 ; i < ports ; i++) {
smt_fill_lem(smc,&sif->lem[i],i) ;
}
}
sif->smt.smt_dest = *dest ; /* destination address */
sif->smt.smt_tid = tid ; /* transaction ID */
dump_smt(smc,(struct smt_header *)sif,"SIF Operation Reply") ;
smt_send_frame(smc,mb,FC_SMT_INFO,local) ;
}
/*
* get and initialize SMT frame
*/
SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
int length)
{
SMbuf *mb ;
struct smt_header *smt ;
#if 0
if (!smc->r.sm_ma_avail) {
return 0;
}
#endif
if (!(mb = smt_get_mbuf(smc)))
return mb;
mb->sm_len = length ;
smt = smtod(mb, struct smt_header *) ;
smt->smt_dest = fddi_broadcast ; /* set dest = broadcast */
smt->smt_class = class ;
smt->smt_type = type ;
switch (class) {
case SMT_NIF :
case SMT_SIF_CONFIG :
case SMT_SIF_OPER :
case SMT_ECF :
smt->smt_version = SMT_VID ;
break ;
default :
smt->smt_version = SMT_VID_2 ;
break ;
}
smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */
smt->smt_pad = 0 ;
smt->smt_len = length - sizeof(struct smt_header) ;
return mb;
}
static void smt_add_frame_len(SMbuf *mb, int len)
{
struct smt_header *smt ;
smt = smtod(mb, struct smt_header *) ;
smt->smt_len += len ;
mb->sm_len += len ;
}
/*
* fill values in UNA parameter
*/
static void smt_fill_una(struct s_smc *smc, struct smt_p_una *una)
{
SMTSETPARA(una,SMT_P_UNA) ;
una->una_pad = 0 ;
una->una_node = smc->mib.m[MAC0].fddiMACUpstreamNbr ;
}
/*
* fill values in SDE parameter
*/
static void smt_fill_sde(struct s_smc *smc, struct smt_p_sde *sde)
{
SMTSETPARA(sde,SMT_P_SDE) ;
sde->sde_non_master = smc->mib.fddiSMTNonMaster_Ct ;
sde->sde_master = smc->mib.fddiSMTMaster_Ct ;
sde->sde_mac_count = NUMMACS ; /* only 1 MAC */
#ifdef CONCENTRATOR
sde->sde_type = SMT_SDE_CONCENTRATOR ;
#else
sde->sde_type = SMT_SDE_STATION ;
#endif
}
/*
* fill in values in station state parameter
*/
static void smt_fill_state(struct s_smc *smc, struct smt_p_state *state)
{
int top ;
int twist ;
SMTSETPARA(state,SMT_P_STATE) ;
state->st_pad = 0 ;
/* determine topology */
top = 0 ;
if (smc->mib.fddiSMTPeerWrapFlag) {
top |= SMT_ST_WRAPPED ; /* state wrapped */
}
#ifdef CONCENTRATOR
if (cfm_status_unattached(smc)) {
top |= SMT_ST_UNATTACHED ; /* unattached concentrator */
}
#endif
if ((twist = pcm_status_twisted(smc)) & 1) {
top |= SMT_ST_TWISTED_A ; /* twisted cable */
}
if (twist & 2) {
top |= SMT_ST_TWISTED_B ; /* twisted cable */
}
#ifdef OPT_SRF
top |= SMT_ST_SRF ;
#endif
if (pcm_rooted_station(smc))
top |= SMT_ST_ROOTED_S ;
if (smc->mib.a[0].fddiPATHSbaPayload != 0)
top |= SMT_ST_SYNC_SERVICE ;
state->st_topology = top ;
state->st_dupl_addr =
((smc->mib.m[MAC0].fddiMACDA_Flag ? SMT_ST_MY_DUPA : 0 ) |
(smc->mib.m[MAC0].fddiMACUNDA_Flag ? SMT_ST_UNA_DUPA : 0)) ;
}
/*
* fill values in timestamp parameter
*/
static void smt_fill_timestamp(struct s_smc *smc, struct smt_p_timestamp *ts)
{
SMTSETPARA(ts,SMT_P_TIMESTAMP) ;
smt_set_timestamp(smc,ts->ts_time) ;
}
void smt_set_timestamp(struct s_smc *smc, u_char *p)
{
u_long time ;
u_long utime ;
/*
* timestamp is 64 bits long ; resolution is 80 nS
* our clock resolution is 10mS
* 10mS/80ns = 125000 ~ 2^17 = 131072
*/
utime = smt_get_time() ;
time = utime * 100 ;
time /= TICKS_PER_SECOND ;
p[0] = 0 ;
p[1] = (u_char)((time>>(8+8+8+8-1)) & 1) ;
p[2] = (u_char)(time>>(8+8+8-1)) ;
p[3] = (u_char)(time>>(8+8-1)) ;
p[4] = (u_char)(time>>(8-1)) ;
p[5] = (u_char)(time<<1) ;
p[6] = (u_char)(smc->sm.uniq_ticks>>8) ;
p[7] = (u_char)smc->sm.uniq_ticks ;
/*
* make sure we don't wrap: restart whenever the upper digits change
*/
if (utime != smc->sm.uniq_time) {
smc->sm.uniq_ticks = 0 ;
}
smc->sm.uniq_ticks++ ;
smc->sm.uniq_time = utime ;
}
/*
* fill values in station policy parameter
*/
static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
{
int i ;
const u_char *map ;
u_short in ;
u_short out ;
/*
* MIB para 101b (fddiSMTConnectionPolicy) coding
* is different from 0005 coding
*/
static const u_char ansi_weirdness[16] = {
0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15
} ;
SMTSETPARA(policy,SMT_P_POLICY) ;
out = 0 ;
in = smc->mib.fddiSMTConnectionPolicy ;
for (i = 0, map = ansi_weirdness ; i < 16 ; i++) {
if (in & 1)
out |= (1<<*map) ;
in >>= 1 ;
map++ ;
}
policy->pl_config = smc->mib.fddiSMTConfigPolicy ;
policy->pl_connect = out ;
}
/*
* fill values in latency equivalent parameter
*/
static void smt_fill_latency(struct s_smc *smc, struct smt_p_latency *latency)
{
SMTSETPARA(latency,SMT_P_LATENCY) ;
latency->lt_phyout_idx1 = phy_index(smc,0) ;
latency->lt_latency1 = 10 ; /* in octets (byte clock) */
/*
* note: latency has two phy entries by definition
* for a SAS, the 2nd one is null
*/
if (smc->s.sas == SMT_DAS) {
latency->lt_phyout_idx2 = phy_index(smc,1) ;
latency->lt_latency2 = 10 ; /* in octets (byte clock) */
}
else {
latency->lt_phyout_idx2 = 0 ;
latency->lt_latency2 = 0 ;
}
}
/*
* fill values in MAC neighbors parameter
*/
static void smt_fill_neighbor(struct s_smc *smc, struct smt_p_neighbor *neighbor)
{
SMTSETPARA(neighbor,SMT_P_NEIGHBORS) ;
neighbor->nb_mib_index = INDEX_MAC ;
neighbor->nb_mac_index = mac_index(smc,1) ;
neighbor->nb_una = smc->mib.m[MAC0].fddiMACUpstreamNbr ;
neighbor->nb_dna = smc->mib.m[MAC0].fddiMACDownstreamNbr ;
}
/*
* fill values in path descriptor
*/
#ifdef CONCENTRATOR
#define ALLPHYS NUMPHYS
#else
#define ALLPHYS ((smc->s.sas == SMT_SAS) ? 1 : 2)
#endif
static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path)
{
SK_LOC_DECL(int,type) ;
SK_LOC_DECL(int,state) ;
SK_LOC_DECL(int,remote) ;
SK_LOC_DECL(int,mac) ;
int len ;
int p ;
int physp ;
struct smt_phy_rec *phy ;
struct smt_mac_rec *pd_mac ;
len = PARA_LEN +
sizeof(struct smt_mac_rec) * NUMMACS +
sizeof(struct smt_phy_rec) * ALLPHYS ;
path->para.p_type = SMT_P_PATH ;
path->para.p_len = len - PARA_LEN ;
/* PHYs */
for (p = 0,phy = path->pd_phy ; p < ALLPHYS ; p++, phy++) {
physp = p ;
#ifndef CONCENTRATOR
if (smc->s.sas == SMT_SAS)
physp = PS ;
#endif
pcm_status_state(smc,physp,&type,&state,&remote,&mac) ;
#ifdef LITTLE_ENDIAN
phy->phy_mib_index = smt_swap_short((u_short)p+INDEX_PORT) ;
#else
phy->phy_mib_index = p+INDEX_PORT ;
#endif
phy->phy_type = type ;
phy->phy_connect_state = state ;
phy->phy_remote_type = remote ;
phy->phy_remote_mac = mac ;
phy->phy_resource_idx = phy_con_resource_index(smc,p) ;
}
/* MAC */
pd_mac = (struct smt_mac_rec *) phy ;
pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ;
pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ;
return len;
}
/*
* fill values in mac status
*/
static void smt_fill_mac_status(struct s_smc *smc, struct smt_p_mac_status *st)
{
SMTSETPARA(st,SMT_P_MAC_STATUS) ;
st->st_mib_index = INDEX_MAC ;
st->st_mac_index = mac_index(smc,1) ;
mac_update_counter(smc) ;
/*
* timer values are represented in SMT as 2's complement numbers
* units : internal : 2's complement BCLK
*/
st->st_t_req = smc->mib.m[MAC0].fddiMACT_Req ;
st->st_t_neg = smc->mib.m[MAC0].fddiMACT_Neg ;
st->st_t_max = smc->mib.m[MAC0].fddiMACT_Max ;
st->st_tvx_value = smc->mib.m[MAC0].fddiMACTvxValue ;
st->st_t_min = smc->mib.m[MAC0].fddiMACT_Min ;
st->st_sba = smc->mib.a[PATH0].fddiPATHSbaPayload ;
st->st_frame_ct = smc->mib.m[MAC0].fddiMACFrame_Ct ;
st->st_error_ct = smc->mib.m[MAC0].fddiMACError_Ct ;
st->st_lost_ct = smc->mib.m[MAC0].fddiMACLost_Ct ;
}
/*
* fill values in LEM status
*/
static void smt_fill_lem(struct s_smc *smc, struct smt_p_lem *lem, int phy)
{
struct fddi_mib_p *mib ;
mib = smc->y[phy].mib ;
SMTSETPARA(lem,SMT_P_LEM) ;
lem->lem_mib_index = phy+INDEX_PORT ;
lem->lem_phy_index = phy_index(smc,phy) ;
lem->lem_pad2 = 0 ;
lem->lem_cutoff = mib->fddiPORTLer_Cutoff ;
lem->lem_alarm = mib->fddiPORTLer_Alarm ;
/* long term bit error rate */
lem->lem_estimate = mib->fddiPORTLer_Estimate ;
/* # of rejected connections */
lem->lem_reject_ct = mib->fddiPORTLem_Reject_Ct ;
lem->lem_ct = mib->fddiPORTLem_Ct ; /* total number of errors */
}
/*
* fill version parameter
*/
static void smt_fill_version(struct s_smc *smc, struct smt_p_version *vers)
{
SK_UNUSED(smc) ;
SMTSETPARA(vers,SMT_P_VERSION) ;
vers->v_pad = 0 ;
vers->v_n = 1 ; /* one version is enough .. */
vers->v_index = 1 ;
vers->v_version[0] = SMT_VID_2 ;
vers->v_pad2 = 0 ;
}
#ifdef SMT6_10
/*
* fill frame status capabilities
*/
/*
* note: this para 200B is NOT in swap table, because it's also set in
* PMF add_para
*/
static void smt_fill_fsc(struct s_smc *smc, struct smt_p_fsc *fsc)
{
SK_UNUSED(smc) ;
SMTSETPARA(fsc,SMT_P_FSC) ;
fsc->fsc_pad0 = 0 ;
fsc->fsc_mac_index = INDEX_MAC ; /* this is MIB ; MIB is NOT
* mac_index ()i !
*/
fsc->fsc_pad1 = 0 ;
fsc->fsc_value = FSC_TYPE0 ; /* "normal" node */
#ifdef LITTLE_ENDIAN
fsc->fsc_mac_index = smt_swap_short(INDEX_MAC) ;
fsc->fsc_value = smt_swap_short(FSC_TYPE0) ;
#endif
}
#endif
/*
* fill mac counter field
*/
static void smt_fill_mac_counter(struct s_smc *smc, struct smt_p_mac_counter *mc)
{
SMTSETPARA(mc,SMT_P_MAC_COUNTER) ;
mc->mc_mib_index = INDEX_MAC ;
mc->mc_index = mac_index(smc,1) ;
mc->mc_receive_ct = smc->mib.m[MAC0].fddiMACCopied_Ct ;
mc->mc_transmit_ct = smc->mib.m[MAC0].fddiMACTransmit_Ct ;
}
/*
* fill mac frame not copied counter
*/
static void smt_fill_mac_fnc(struct s_smc *smc, struct smt_p_mac_fnc *fnc)
{
SMTSETPARA(fnc,SMT_P_MAC_FNC) ;
fnc->nc_mib_index = INDEX_MAC ;
fnc->nc_index = mac_index(smc,1) ;
fnc->nc_counter = smc->mib.m[MAC0].fddiMACNotCopied_Ct ;
}
/*
* fill manufacturer field
*/
static void smt_fill_manufacturer(struct s_smc *smc,
struct smp_p_manufacturer *man)
{
SMTSETPARA(man,SMT_P_MANUFACTURER) ;
memcpy((char *) man->mf_data,
(char *) smc->mib.fddiSMTManufacturerData,
sizeof(man->mf_data)) ;
}
/*
* fill user field
*/
static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user)
{
SMTSETPARA(user,SMT_P_USER) ;
memcpy((char *) user->us_data,
(char *) smc->mib.fddiSMTUserData,
sizeof(user->us_data)) ;
}
/*
* fill set count
*/
static void smt_fill_setcount(struct s_smc *smc, struct smt_p_setcount *setcount)
{
SK_UNUSED(smc) ;
SMTSETPARA(setcount,SMT_P_SETCOUNT) ;
setcount->count = smc->mib.fddiSMTSetCount.count ;
memcpy((char *)setcount->timestamp,
(char *)smc->mib.fddiSMTSetCount.timestamp,8) ;
}
/*
* fill echo data
*/
static void smt_fill_echo(struct s_smc *smc, struct smt_p_echo *echo, u_long seed,
int len)
{
u_char *p ;
SK_UNUSED(smc) ;
SMTSETPARA(echo,SMT_P_ECHODATA) ;
echo->para.p_len = len ;
for (p = echo->ec_data ; len ; len--) {
*p++ = (u_char) seed ;
seed += 13 ;
}
}
/*
* clear DNA and UNA
* called from CFM if configuration changes
*/
static void smt_clear_una_dna(struct s_smc *smc)
{
smc->mib.m[MAC0].fddiMACUpstreamNbr = SMT_Unknown ;
smc->mib.m[MAC0].fddiMACDownstreamNbr = SMT_Unknown ;
}
static void smt_clear_old_una_dna(struct s_smc *smc)
{
smc->mib.m[MAC0].fddiMACOldUpstreamNbr = SMT_Unknown ;
smc->mib.m[MAC0].fddiMACOldDownstreamNbr = SMT_Unknown ;
}
u_long smt_get_tid(struct s_smc *smc)
{
u_long tid ;
while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0)
;
return tid & 0x3fffffffL;
}
#ifdef LITTLE_ENDIAN
/*
* table of parameter lengths
*/
static const struct smt_pdef {
int ptype ;
int plen ;
const char *pswap ;
} smt_pdef[] = {
{ SMT_P_UNA, sizeof(struct smt_p_una) ,
SWAP_SMT_P_UNA } ,
{ SMT_P_SDE, sizeof(struct smt_p_sde) ,
SWAP_SMT_P_SDE } ,
{ SMT_P_STATE, sizeof(struct smt_p_state) ,
SWAP_SMT_P_STATE } ,
{ SMT_P_TIMESTAMP,sizeof(struct smt_p_timestamp) ,
SWAP_SMT_P_TIMESTAMP } ,
{ SMT_P_POLICY, sizeof(struct smt_p_policy) ,
SWAP_SMT_P_POLICY } ,
{ SMT_P_LATENCY, sizeof(struct smt_p_latency) ,
SWAP_SMT_P_LATENCY } ,
{ SMT_P_NEIGHBORS,sizeof(struct smt_p_neighbor) ,
SWAP_SMT_P_NEIGHBORS } ,
{ SMT_P_PATH, sizeof(struct smt_p_path) ,
SWAP_SMT_P_PATH } ,
{ SMT_P_MAC_STATUS,sizeof(struct smt_p_mac_status) ,
SWAP_SMT_P_MAC_STATUS } ,
{ SMT_P_LEM, sizeof(struct smt_p_lem) ,
SWAP_SMT_P_LEM } ,
{ SMT_P_MAC_COUNTER,sizeof(struct smt_p_mac_counter) ,
SWAP_SMT_P_MAC_COUNTER } ,
{ SMT_P_MAC_FNC,sizeof(struct smt_p_mac_fnc) ,
SWAP_SMT_P_MAC_FNC } ,
{ SMT_P_PRIORITY,sizeof(struct smt_p_priority) ,
SWAP_SMT_P_PRIORITY } ,
{ SMT_P_EB,sizeof(struct smt_p_eb) ,
SWAP_SMT_P_EB } ,
{ SMT_P_MANUFACTURER,sizeof(struct smp_p_manufacturer) ,
SWAP_SMT_P_MANUFACTURER } ,
{ SMT_P_REASON, sizeof(struct smt_p_reason) ,
SWAP_SMT_P_REASON } ,
{ SMT_P_REFUSED, sizeof(struct smt_p_refused) ,
SWAP_SMT_P_REFUSED } ,
{ SMT_P_VERSION, sizeof(struct smt_p_version) ,
SWAP_SMT_P_VERSION } ,
#ifdef ESS
{ SMT_P0015, sizeof(struct smt_p_0015) , SWAP_SMT_P0015 } ,
{ SMT_P0016, sizeof(struct smt_p_0016) , SWAP_SMT_P0016 } ,
{ SMT_P0017, sizeof(struct smt_p_0017) , SWAP_SMT_P0017 } ,
{ SMT_P0018, sizeof(struct smt_p_0018) , SWAP_SMT_P0018 } ,
{ SMT_P0019, sizeof(struct smt_p_0019) , SWAP_SMT_P0019 } ,
{ SMT_P001A, sizeof(struct smt_p_001a) , SWAP_SMT_P001A } ,
{ SMT_P001B, sizeof(struct smt_p_001b) , SWAP_SMT_P001B } ,
{ SMT_P001C, sizeof(struct smt_p_001c) , SWAP_SMT_P001C } ,
{ SMT_P001D, sizeof(struct smt_p_001d) , SWAP_SMT_P001D } ,
#endif
#if 0
{ SMT_P_FSC, sizeof(struct smt_p_fsc) ,
SWAP_SMT_P_FSC } ,
#endif
{ SMT_P_SETCOUNT,0, SWAP_SMT_P_SETCOUNT } ,
{ SMT_P1048, 0, SWAP_SMT_P1048 } ,
{ SMT_P208C, 0, SWAP_SMT_P208C } ,
{ SMT_P208D, 0, SWAP_SMT_P208D } ,
{ SMT_P208E, 0, SWAP_SMT_P208E } ,
{ SMT_P208F, 0, SWAP_SMT_P208F } ,
{ SMT_P2090, 0, SWAP_SMT_P2090 } ,
#ifdef ESS
{ SMT_P320B, sizeof(struct smt_p_320b) , SWAP_SMT_P320B } ,
{ SMT_P320F, sizeof(struct smt_p_320f) , SWAP_SMT_P320F } ,
{ SMT_P3210, sizeof(struct smt_p_3210) , SWAP_SMT_P3210 } ,
#endif
{ SMT_P4050, 0, SWAP_SMT_P4050 } ,
{ SMT_P4051, 0, SWAP_SMT_P4051 } ,
{ SMT_P4052, 0, SWAP_SMT_P4052 } ,
{ SMT_P4053, 0, SWAP_SMT_P4053 } ,
} ;
#define N_SMT_PLEN ARRAY_SIZE(smt_pdef)
#endif
int smt_check_para(struct s_smc *smc, struct smt_header *sm,
const u_short list[])
{
const u_short *p = list ;
while (*p) {
if (!sm_to_para(smc,sm,(int) *p)) {
DB_SMT("SMT: smt_check_para - missing para %hx", *p);
return -1;
}
p++ ;
}
return 0;
}
void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
{
char *p ;
int len ;
int plen ;
void *found = NULL;
SK_UNUSED(smc) ;
len = sm->smt_len ;
p = (char *)(sm+1) ; /* pointer to info */
while (len > 0 ) {
if (((struct smt_para *)p)->p_type == para)
found = (void *) p ;
plen = ((struct smt_para *)p)->p_len + PARA_LEN ;
p += plen ;
len -= plen ;
if (len < 0) {
DB_SMT("SMT : sm_to_para - length error %d", plen);
return NULL;
}
if ((plen & 3) && (para != SMT_P_ECHODATA)) {
DB_SMT("SMT : sm_to_para - odd length %d", plen);
return NULL;
}
if (found)
return found;
}
return NULL;
}
#if 0
/*
* send ANTC data test frame
*/
void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest)
{
SK_UNUSED(smc) ;
SK_UNUSED(dest) ;
#if 0
SMbuf *mb ;
struct smt_header *smt ;
int i ;
char *p ;
mb = smt_get_mbuf() ;
mb->sm_len = 3000+12 ;
p = smtod(mb, char *) + 12 ;
for (i = 0 ; i < 3000 ; i++)
*p++ = 1 << (i&7) ;
smt = smtod(mb, struct smt_header *) ;
smt->smt_dest = *dest ;
smt->smt_source = smc->mib.m[MAC0].fddiMACSMTAddress ;
smt_send_mbuf(smc,mb,FC_ASYNC_LLC) ;
#endif
}
#endif
/*
* return static mac index
*/
static int mac_index(struct s_smc *smc, int mac)
{
SK_UNUSED(mac) ;
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
return NUMPHYS + 1;
#else
return (smc->s.sas == SMT_SAS) ? 2 : 3;
#endif
}
/*
* return static phy index
*/
static int phy_index(struct s_smc *smc, int phy)
{
SK_UNUSED(smc) ;
return phy + 1;
}
/*
* return dynamic mac connection resource index
*/
static int mac_con_resource_index(struct s_smc *smc, int mac)
{
#ifdef CONCENTRATOR
SK_UNUSED(smc) ;
SK_UNUSED(mac) ;
return entity_to_index(smc, cem_get_downstream(smc, ENTITY_MAC));
#else
SK_UNUSED(mac) ;
switch (smc->mib.fddiSMTCF_State) {
case SC9_C_WRAP_A :
case SC5_THRU_B :
case SC11_C_WRAP_S :
return 1;
case SC10_C_WRAP_B :
case SC4_THRU_A :
return 2;
}
return smc->s.sas == SMT_SAS ? 2 : 3;
#endif
}
/*
* return dynamic phy connection resource index
*/
static int phy_con_resource_index(struct s_smc *smc, int phy)
{
#ifdef CONCENTRATOR
return entity_to_index(smc, cem_get_downstream(smc, ENTITY_PHY(phy))) ;
#else
switch (smc->mib.fddiSMTCF_State) {
case SC9_C_WRAP_A :
return phy == PA ? 3 : 2;
case SC10_C_WRAP_B :
return phy == PA ? 1 : 3;
case SC4_THRU_A :
return phy == PA ? 3 : 1;
case SC5_THRU_B :
return phy == PA ? 2 : 3;
case SC11_C_WRAP_S :
return 2;
}
return phy;
#endif
}
#ifdef CONCENTRATOR
static int entity_to_index(struct s_smc *smc, int e)
{
if (e == ENTITY_MAC)
return mac_index(smc, 1);
else
return phy_index(smc, e - ENTITY_PHY(0));
}
#endif
#ifdef LITTLE_ENDIAN
static int smt_swap_short(u_short s)
{
return ((s>>8)&0xff) | ((s&0xff)<<8);
}
void smt_swap_para(struct smt_header *sm, int len, int direction)
/* int direction; 0 encode 1 decode */
{
struct smt_para *pa ;
const struct smt_pdef *pd ;
char *p ;
int plen ;
int type ;
int i ;
/* printf("smt_swap_para sm %x len %d dir %d\n",
sm,len,direction) ;
*/
smt_string_swap((char *)sm,SWAP_SMTHEADER,len) ;
/* swap args */
len -= sizeof(struct smt_header) ;
p = (char *) (sm + 1) ;
while (len > 0) {
pa = (struct smt_para *) p ;
plen = pa->p_len ;
type = pa->p_type ;
pa->p_type = smt_swap_short(pa->p_type) ;
pa->p_len = smt_swap_short(pa->p_len) ;
if (direction) {
plen = pa->p_len ;
type = pa->p_type ;
}
/*
* note: paras can have 0 length !
*/
if (plen < 0)
break ;
plen += PARA_LEN ;
for (i = N_SMT_PLEN, pd = smt_pdef; i ; i--,pd++) {
if (pd->ptype == type)
break ;
}
if (i && pd->pswap) {
smt_string_swap(p+PARA_LEN,pd->pswap,len) ;
}
len -= plen ;
p += plen ;
}
}
static void smt_string_swap(char *data, const char *format, int len)
{
const char *open_paren = NULL ;
while (len > 0 && *format) {
switch (*format) {
case '[' :
open_paren = format ;
break ;
case ']' :
format = open_paren ;
break ;
case '1' :
case '2' :
case '3' :
case '4' :
case '5' :
case '6' :
case '7' :
case '8' :
case '9' :
data += *format - '0' ;
len -= *format - '0' ;
break ;
case 'c':
data++ ;
len-- ;
break ;
case 's' :
swap(data[0], data[1]) ;
data += 2 ;
len -= 2 ;
break ;
case 'l' :
swap(data[0], data[3]) ;
swap(data[1], data[2]) ;
data += 4 ;
len -= 4 ;
break ;
}
format++ ;
}
}
#else
void smt_swap_para(struct smt_header *sm, int len, int direction)
/* int direction; 0 encode 1 decode */
{
SK_UNUSED(sm) ;
SK_UNUSED(len) ;
SK_UNUSED(direction) ;
}
#endif
/*
* PMF actions
*/
int smt_action(struct s_smc *smc, int class, int code, int index)
{
int event ;
int port ;
DB_SMT("SMT: action %d code %d", class, code);
switch(class) {
case SMT_STATION_ACTION :
switch(code) {
case SMT_STATION_ACTION_CONNECT :
smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ;
queue_event(smc,EVENT_ECM,EC_CONNECT) ;
break ;
case SMT_STATION_ACTION_DISCONNECT :
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
smc->mib.fddiSMTRemoteDisconnectFlag = TRUE ;
RS_SET(smc,RS_DISCONNECT) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_DISCONNECT,
smt_get_event_word(smc));
break ;
case SMT_STATION_ACTION_PATHTEST :
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_SMT_EVENT, (u_long) FDDI_PATH_TEST,
smt_get_event_word(smc));
break ;
case SMT_STATION_ACTION_SELFTEST :
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_SELF_TEST,
smt_get_event_word(smc));
break ;
case SMT_STATION_ACTION_DISABLE_A :
if (smc->y[PA].pc_mode == PM_PEER) {
RS_SET(smc,RS_EVENT) ;
queue_event(smc,EVENT_PCM+PA,PC_DISABLE) ;
}
break ;
case SMT_STATION_ACTION_DISABLE_B :
if (smc->y[PB].pc_mode == PM_PEER) {
RS_SET(smc,RS_EVENT) ;
queue_event(smc,EVENT_PCM+PB,PC_DISABLE) ;
}
break ;
case SMT_STATION_ACTION_DISABLE_M :
for (port = 0 ; port < NUMPHYS ; port++) {
if (smc->mib.p[port].fddiPORTMy_Type != TM)
continue ;
RS_SET(smc,RS_EVENT) ;
queue_event(smc,EVENT_PCM+port,PC_DISABLE) ;
}
break ;
default :
return 1;
}
break ;
case SMT_PORT_ACTION :
switch(code) {
case SMT_PORT_ACTION_ENABLE :
event = PC_ENABLE ;
break ;
case SMT_PORT_ACTION_DISABLE :
event = PC_DISABLE ;
break ;
case SMT_PORT_ACTION_MAINT :
event = PC_MAINT ;
break ;
case SMT_PORT_ACTION_START :
event = PC_START ;
break ;
case SMT_PORT_ACTION_STOP :
event = PC_STOP ;
break ;
default :
return 1;
}
queue_event(smc,EVENT_PCM+index,event) ;
break ;
default :
return 1;
}
return 0;
}
/*
* canonical conversion of <len> bytes beginning form *data
*/
#ifdef USE_CAN_ADDR
static void hwm_conv_can(struct s_smc *smc, char *data, int len)
{
int i ;
SK_UNUSED(smc) ;
for (i = len; i ; i--, data++)
*data = bitrev8(*data);
}
#endif
#endif /* no SLIM_SMT */
| linux-master | drivers/net/fddi/skfp/smt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
SMT RMT
Ring Management
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
* smt_timer_start()
* smt_timer_stop()
*
* The following external HW dependent functions are referenced :
* sm_ma_control()
* sm_mac_check_beacon_claim()
*
* The following HW dependent events are required :
* RM_RING_OP
* RM_RING_NON_OP
* RM_MY_BEACON
* RM_OTHER_BEACON
* RM_MY_CLAIM
* RM_TRT_EXP
* RM_VALID_CLAIM
*
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#define KERNEL
#include "h/smtstate.h"
/*
* FSM Macros
*/
#define AFLAG 0x10
#define GO_STATE(x) (smc->mib.m[MAC0].fddiMACRMTState = (x)|AFLAG)
#define ACTIONS_DONE() (smc->mib.m[MAC0].fddiMACRMTState &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
#define RM0_ISOLATED 0
#define RM1_NON_OP 1 /* not operational */
#define RM2_RING_OP 2 /* ring operational */
#define RM3_DETECT 3 /* detect dupl addresses */
#define RM4_NON_OP_DUP 4 /* dupl. addr detected */
#define RM5_RING_OP_DUP 5 /* ring oper. with dupl. addr */
#define RM6_DIRECTED 6 /* sending directed beacons */
#define RM7_TRACE 7 /* trace initiated */
/*
* symbolic state names
*/
static const char * const rmt_states[] = {
"RM0_ISOLATED","RM1_NON_OP","RM2_RING_OP","RM3_DETECT",
"RM4_NON_OP_DUP","RM5_RING_OP_DUP","RM6_DIRECTED",
"RM7_TRACE"
} ;
/*
* symbolic event names
*/
static const char * const rmt_events[] = {
"NONE","RM_RING_OP","RM_RING_NON_OP","RM_MY_BEACON",
"RM_OTHER_BEACON","RM_MY_CLAIM","RM_TRT_EXP","RM_VALID_CLAIM",
"RM_JOIN","RM_LOOP","RM_DUP_ADDR","RM_ENABLE_FLAG",
"RM_TIMEOUT_NON_OP","RM_TIMEOUT_T_STUCK",
"RM_TIMEOUT_ANNOUNCE","RM_TIMEOUT_T_DIRECT",
"RM_TIMEOUT_D_MAX","RM_TIMEOUT_POLL","RM_TX_STATE_CHANGE"
} ;
/*
* Globals
* in struct s_rmt
*/
/*
* function declarations
*/
static void rmt_fsm(struct s_smc *smc, int cmd);
static void start_rmt_timer0(struct s_smc *smc, u_long value, int event);
static void start_rmt_timer1(struct s_smc *smc, u_long value, int event);
static void start_rmt_timer2(struct s_smc *smc, u_long value, int event);
static void stop_rmt_timer0(struct s_smc *smc);
static void stop_rmt_timer1(struct s_smc *smc);
static void stop_rmt_timer2(struct s_smc *smc);
static void rmt_dup_actions(struct s_smc *smc);
static void rmt_reinsert_actions(struct s_smc *smc);
static void rmt_leave_actions(struct s_smc *smc);
static void rmt_new_dup_actions(struct s_smc *smc);
#ifndef SUPERNET_3
extern void restart_trt_for_dbcn() ;
#endif /*SUPERNET_3*/
/*
init RMT state machine
clear all RMT vars and flags
*/
void rmt_init(struct s_smc *smc)
{
smc->mib.m[MAC0].fddiMACRMTState = ACTIONS(RM0_ISOLATED) ;
smc->r.dup_addr_test = DA_NONE ;
smc->r.da_flag = 0 ;
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
smc->r.sm_ma_avail = FALSE ;
smc->r.loop_avail = 0 ;
smc->r.bn_flag = 0 ;
smc->r.jm_flag = 0 ;
smc->r.no_flag = TRUE ;
}
/*
RMT state machine
called by dispatcher
do
display state change
process event
until SM is stable
*/
void rmt(struct s_smc *smc, int event)
{
int state ;
do {
DB_RMT("RMT : state %s%s event %s",
smc->mib.m[MAC0].fddiMACRMTState & AFLAG ? "ACTIONS " : "",
rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG],
rmt_events[event]);
state = smc->mib.m[MAC0].fddiMACRMTState ;
rmt_fsm(smc,event) ;
event = 0 ;
} while (state != smc->mib.m[MAC0].fddiMACRMTState) ;
rmt_state_change(smc,(int)smc->mib.m[MAC0].fddiMACRMTState) ;
}
/*
process RMT event
*/
static void rmt_fsm(struct s_smc *smc, int cmd)
{
/*
* RM00-RM70 : from all states
*/
if (!smc->r.rm_join && !smc->r.rm_loop &&
smc->mib.m[MAC0].fddiMACRMTState != ACTIONS(RM0_ISOLATED) &&
smc->mib.m[MAC0].fddiMACRMTState != RM0_ISOLATED) {
RS_SET(smc,RS_NORINGOP) ;
rmt_indication(smc,0) ;
GO_STATE(RM0_ISOLATED) ;
return ;
}
switch(smc->mib.m[MAC0].fddiMACRMTState) {
case ACTIONS(RM0_ISOLATED) :
stop_rmt_timer0(smc) ;
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
/*
* Disable MAC.
*/
sm_ma_control(smc,MA_OFFLINE) ;
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
smc->r.loop_avail = FALSE ;
smc->r.sm_ma_avail = FALSE ;
smc->r.no_flag = TRUE ;
DB_RMTN(1, "RMT : ISOLATED");
ACTIONS_DONE() ;
break ;
case RM0_ISOLATED :
/*RM01*/
if (smc->r.rm_join || smc->r.rm_loop) {
/*
* According to the standard the MAC must be reset
* here. The FORMAC will be initialized and Claim
* and Beacon Frames will be uploaded to the MAC.
* So any change of Treq will take effect NOW.
*/
sm_ma_control(smc,MA_RESET) ;
GO_STATE(RM1_NON_OP) ;
break ;
}
break ;
case ACTIONS(RM1_NON_OP) :
start_rmt_timer0(smc,smc->s.rmt_t_non_op,RM_TIMEOUT_NON_OP) ;
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
sm_ma_control(smc,MA_BEACON) ;
DB_RMTN(1, "RMT : RING DOWN");
RS_SET(smc,RS_NORINGOP) ;
smc->r.sm_ma_avail = FALSE ;
rmt_indication(smc,0) ;
ACTIONS_DONE() ;
break ;
case RM1_NON_OP :
/*RM12*/
if (cmd == RM_RING_OP) {
RS_SET(smc,RS_RINGOPCHANGE) ;
GO_STATE(RM2_RING_OP) ;
break ;
}
/*RM13*/
else if (cmd == RM_TIMEOUT_NON_OP) {
smc->r.bn_flag = FALSE ;
smc->r.no_flag = TRUE ;
GO_STATE(RM3_DETECT) ;
break ;
}
break ;
case ACTIONS(RM2_RING_OP) :
stop_rmt_timer0(smc) ;
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
smc->r.no_flag = FALSE ;
if (smc->r.rm_loop)
smc->r.loop_avail = TRUE ;
if (smc->r.rm_join) {
smc->r.sm_ma_avail = TRUE ;
if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable)
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE;
else
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE;
}
DB_RMTN(1, "RMT : RING UP");
RS_CLEAR(smc,RS_NORINGOP) ;
RS_SET(smc,RS_RINGOPCHANGE) ;
rmt_indication(smc,1) ;
smt_stat_counter(smc,0) ;
ACTIONS_DONE() ;
break ;
case RM2_RING_OP :
/*RM21*/
if (cmd == RM_RING_NON_OP) {
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
smc->r.loop_avail = FALSE ;
RS_SET(smc,RS_RINGOPCHANGE) ;
GO_STATE(RM1_NON_OP) ;
break ;
}
/*RM22a*/
else if (cmd == RM_ENABLE_FLAG) {
if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable)
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE ;
else
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
}
/*RM25*/
else if (smc->r.dup_addr_test == DA_FAILED) {
smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
smc->r.loop_avail = FALSE ;
smc->r.da_flag = TRUE ;
GO_STATE(RM5_RING_OP_DUP) ;
break ;
}
break ;
case ACTIONS(RM3_DETECT) :
start_rmt_timer0(smc,smc->s.mac_d_max*2,RM_TIMEOUT_D_MAX) ;
start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
sm_mac_check_beacon_claim(smc) ;
DB_RMTN(1, "RMT : RM3_DETECT");
ACTIONS_DONE() ;
break ;
case RM3_DETECT :
if (cmd == RM_TIMEOUT_POLL) {
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL);
sm_mac_check_beacon_claim(smc) ;
break ;
}
if (cmd == RM_TIMEOUT_D_MAX) {
smc->r.timer0_exp = TRUE ;
}
/*
*jd(22-Feb-1999)
* We need a time ">= 2*mac_d_max" since we had finished
* Claim or Beacon state. So we will restart timer0 at
* every state change.
*/
if (cmd == RM_TX_STATE_CHANGE) {
start_rmt_timer0(smc,
smc->s.mac_d_max*2,
RM_TIMEOUT_D_MAX) ;
}
/*RM32*/
if (cmd == RM_RING_OP) {
GO_STATE(RM2_RING_OP) ;
break ;
}
/*RM33a*/
else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON)
&& smc->r.bn_flag) {
smc->r.bn_flag = FALSE ;
}
/*RM33b*/
else if (cmd == RM_TRT_EXP && !smc->r.bn_flag) {
int tx ;
/*
* set bn_flag only if in state T4 or T5:
* only if we're the beaconer should we start the
* trace !
*/
if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
DB_RMTN(2, "RMT : DETECT && TRT_EXPIRED && T4/T5");
smc->r.bn_flag = TRUE ;
/*
* If one of the upstream stations beaconed
* and the link to the upstream neighbor is
* lost we need to restart the stuck timer to
* check the "stuck beacon" condition.
*/
start_rmt_timer1(smc,smc->s.rmt_t_stuck,
RM_TIMEOUT_T_STUCK) ;
}
/*
* We do NOT need to clear smc->r.bn_flag in case of
* not being in state T4 or T5, because the flag
* must be cleared in order to get in this condition.
*/
DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)",
tx, smc->r.bn_flag);
}
/*RM34a*/
else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) {
rmt_new_dup_actions(smc) ;
GO_STATE(RM4_NON_OP_DUP) ;
break ;
}
/*RM34b*/
else if (cmd == RM_MY_BEACON && smc->r.timer0_exp) {
rmt_new_dup_actions(smc) ;
GO_STATE(RM4_NON_OP_DUP) ;
break ;
}
/*RM34c*/
else if (cmd == RM_VALID_CLAIM) {
rmt_new_dup_actions(smc) ;
GO_STATE(RM4_NON_OP_DUP) ;
break ;
}
/*RM36*/
else if (cmd == RM_TIMEOUT_T_STUCK &&
smc->r.rm_join && smc->r.bn_flag) {
GO_STATE(RM6_DIRECTED) ;
break ;
}
break ;
case ACTIONS(RM4_NON_OP_DUP) :
start_rmt_timer0(smc,smc->s.rmt_t_announce,RM_TIMEOUT_ANNOUNCE);
start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
sm_mac_check_beacon_claim(smc) ;
DB_RMTN(1, "RMT : RM4_NON_OP_DUP");
ACTIONS_DONE() ;
break ;
case RM4_NON_OP_DUP :
if (cmd == RM_TIMEOUT_POLL) {
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL);
sm_mac_check_beacon_claim(smc) ;
break ;
}
/*RM41*/
if (!smc->r.da_flag) {
GO_STATE(RM1_NON_OP) ;
break ;
}
/*RM44a*/
else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) &&
smc->r.bn_flag) {
smc->r.bn_flag = FALSE ;
}
/*RM44b*/
else if (cmd == RM_TRT_EXP && !smc->r.bn_flag) {
int tx ;
/*
* set bn_flag only if in state T4 or T5:
* only if we're the beaconer should we start the
* trace !
*/
if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
DB_RMTN(2, "RMT : NOPDUP && TRT_EXPIRED && T4/T5");
smc->r.bn_flag = TRUE ;
/*
* If one of the upstream stations beaconed
* and the link to the upstream neighbor is
* lost we need to restart the stuck timer to
* check the "stuck beacon" condition.
*/
start_rmt_timer1(smc,smc->s.rmt_t_stuck,
RM_TIMEOUT_T_STUCK) ;
}
/*
* We do NOT need to clear smc->r.bn_flag in case of
* not being in state T4 or T5, because the flag
* must be cleared in order to get in this condition.
*/
DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)",
tx, smc->r.bn_flag);
}
/*RM44c*/
else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) {
rmt_dup_actions(smc) ;
}
/*RM45*/
else if (cmd == RM_RING_OP) {
smc->r.no_flag = FALSE ;
GO_STATE(RM5_RING_OP_DUP) ;
break ;
}
/*RM46*/
else if (cmd == RM_TIMEOUT_T_STUCK &&
smc->r.rm_join && smc->r.bn_flag) {
GO_STATE(RM6_DIRECTED) ;
break ;
}
break ;
case ACTIONS(RM5_RING_OP_DUP) :
stop_rmt_timer0(smc) ;
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
DB_RMTN(1, "RMT : RM5_RING_OP_DUP");
ACTIONS_DONE() ;
break;
case RM5_RING_OP_DUP :
/*RM52*/
if (smc->r.dup_addr_test == DA_PASSED) {
smc->r.da_flag = FALSE ;
GO_STATE(RM2_RING_OP) ;
break ;
}
/*RM54*/
else if (cmd == RM_RING_NON_OP) {
smc->r.jm_flag = FALSE ;
smc->r.bn_flag = FALSE ;
GO_STATE(RM4_NON_OP_DUP) ;
break ;
}
break ;
case ACTIONS(RM6_DIRECTED) :
start_rmt_timer0(smc,smc->s.rmt_t_direct,RM_TIMEOUT_T_DIRECT) ;
stop_rmt_timer1(smc) ;
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
sm_ma_control(smc,MA_DIRECTED) ;
RS_SET(smc,RS_BEACON) ;
DB_RMTN(1, "RMT : RM6_DIRECTED");
ACTIONS_DONE() ;
break ;
case RM6_DIRECTED :
/*RM63*/
if (cmd == RM_TIMEOUT_POLL) {
start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL);
sm_mac_check_beacon_claim(smc) ;
#ifndef SUPERNET_3
/* Because of problems with the Supernet II chip set
* sending of Directed Beacon will stop after 165ms
* therefore restart_trt_for_dbcn(smc) will be called
* to prevent this.
*/
restart_trt_for_dbcn(smc) ;
#endif /*SUPERNET_3*/
break ;
}
if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) &&
!smc->r.da_flag) {
smc->r.bn_flag = FALSE ;
GO_STATE(RM3_DETECT) ;
break ;
}
/*RM64*/
else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) &&
smc->r.da_flag) {
smc->r.bn_flag = FALSE ;
GO_STATE(RM4_NON_OP_DUP) ;
break ;
}
/*RM67*/
else if (cmd == RM_TIMEOUT_T_DIRECT) {
GO_STATE(RM7_TRACE) ;
break ;
}
break ;
case ACTIONS(RM7_TRACE) :
stop_rmt_timer0(smc) ;
stop_rmt_timer1(smc) ;
stop_rmt_timer2(smc) ;
smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ;
queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
DB_RMTN(1, "RMT : RM7_TRACE");
ACTIONS_DONE() ;
break ;
case RM7_TRACE :
break ;
default:
SMT_PANIC(smc,SMT_E0122, SMT_E0122_MSG) ;
break;
}
}
/*
* (jd) RMT duplicate address actions
* leave the ring or reinsert just as configured
*/
static void rmt_dup_actions(struct s_smc *smc)
{
if (smc->r.jm_flag) {
}
else {
if (smc->s.rmt_dup_mac_behavior) {
SMT_ERR_LOG(smc,SMT_E0138, SMT_E0138_MSG) ;
rmt_reinsert_actions(smc) ;
}
else {
SMT_ERR_LOG(smc,SMT_E0135, SMT_E0135_MSG) ;
rmt_leave_actions(smc) ;
}
}
}
/*
* Reconnect to the Ring
*/
static void rmt_reinsert_actions(struct s_smc *smc)
{
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
queue_event(smc,EVENT_ECM,EC_CONNECT) ;
}
/*
* duplicate address detected
*/
static void rmt_new_dup_actions(struct s_smc *smc)
{
smc->r.da_flag = TRUE ;
smc->r.bn_flag = FALSE ;
smc->r.jm_flag = FALSE ;
/*
* we have three options : change address, jam or leave
* we leave the ring as default
* Optionally it's possible to reinsert after leaving the Ring
* but this will not conform with SMT Spec.
*/
if (smc->s.rmt_dup_mac_behavior) {
SMT_ERR_LOG(smc,SMT_E0138, SMT_E0138_MSG) ;
rmt_reinsert_actions(smc) ;
}
else {
SMT_ERR_LOG(smc,SMT_E0135, SMT_E0135_MSG) ;
rmt_leave_actions(smc) ;
}
}
/*
* leave the ring
*/
static void rmt_leave_actions(struct s_smc *smc)
{
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
/*
* Note: Do NOT try again later. (with please reconnect)
* The station must be left from the ring!
*/
}
/*
* SMT timer interface
* start RMT timer 0
*/
static void start_rmt_timer0(struct s_smc *smc, u_long value, int event)
{
smc->r.timer0_exp = FALSE ; /* clear timer event flag */
smt_timer_start(smc,&smc->r.rmt_timer0,value,EV_TOKEN(EVENT_RMT,event));
}
/*
* SMT timer interface
* start RMT timer 1
*/
static void start_rmt_timer1(struct s_smc *smc, u_long value, int event)
{
smc->r.timer1_exp = FALSE ; /* clear timer event flag */
smt_timer_start(smc,&smc->r.rmt_timer1,value,EV_TOKEN(EVENT_RMT,event));
}
/*
* SMT timer interface
* start RMT timer 2
*/
static void start_rmt_timer2(struct s_smc *smc, u_long value, int event)
{
smc->r.timer2_exp = FALSE ; /* clear timer event flag */
smt_timer_start(smc,&smc->r.rmt_timer2,value,EV_TOKEN(EVENT_RMT,event));
}
/*
* SMT timer interface
* stop RMT timer 0
*/
static void stop_rmt_timer0(struct s_smc *smc)
{
if (smc->r.rmt_timer0.tm_active)
smt_timer_stop(smc,&smc->r.rmt_timer0) ;
}
/*
* SMT timer interface
* stop RMT timer 1
*/
static void stop_rmt_timer1(struct s_smc *smc)
{
if (smc->r.rmt_timer1.tm_active)
smt_timer_stop(smc,&smc->r.rmt_timer1) ;
}
/*
* SMT timer interface
* stop RMT timer 2
*/
static void stop_rmt_timer2(struct s_smc *smc)
{
if (smc->r.rmt_timer2.tm_active)
smt_timer_stop(smc,&smc->r.rmt_timer2) ;
}
| linux-master | drivers/net/fddi/skfp/rmt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/stddef.h>
#include <linux/soc/qcom/qmi.h>
#include "ipa_qmi_msg.h"
/* QMI message structure definition for struct ipa_indication_register_req */
const struct qmi_elem_info ipa_indication_register_req_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
master_driver_init_complete_valid),
.tlv_type = 0x10,
.offset = offsetof(struct ipa_indication_register_req,
master_driver_init_complete_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
master_driver_init_complete),
.tlv_type = 0x10,
.offset = offsetof(struct ipa_indication_register_req,
master_driver_init_complete),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
data_usage_quota_reached_valid),
.tlv_type = 0x11,
.offset = offsetof(struct ipa_indication_register_req,
data_usage_quota_reached_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
data_usage_quota_reached),
.tlv_type = 0x11,
.offset = offsetof(struct ipa_indication_register_req,
data_usage_quota_reached),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
ipa_mhi_ready_ind_valid),
.tlv_type = 0x12,
.offset = offsetof(struct ipa_indication_register_req,
ipa_mhi_ready_ind_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
ipa_mhi_ready_ind),
.tlv_type = 0x12,
.offset = offsetof(struct ipa_indication_register_req,
ipa_mhi_ready_ind),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
endpoint_desc_ind_valid),
.tlv_type = 0x13,
.offset = offsetof(struct ipa_indication_register_req,
endpoint_desc_ind_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
endpoint_desc_ind),
.tlv_type = 0x13,
.offset = offsetof(struct ipa_indication_register_req,
endpoint_desc_ind),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
bw_change_ind_valid),
.tlv_type = 0x14,
.offset = offsetof(struct ipa_indication_register_req,
bw_change_ind_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_req,
bw_change_ind),
.tlv_type = 0x14,
.offset = offsetof(struct ipa_indication_register_req,
bw_change_ind),
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_indication_register_rsp */
const struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_indication_register_rsp,
rsp),
.tlv_type = 0x02,
.offset = offsetof(struct ipa_indication_register_rsp,
rsp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_driver_init_complete_req */
const struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_driver_init_complete_req,
status),
.tlv_type = 0x01,
.offset = offsetof(struct ipa_driver_init_complete_req,
status),
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_driver_init_complete_rsp,
rsp),
.tlv_type = 0x02,
.offset = offsetof(struct ipa_driver_init_complete_rsp,
rsp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_init_complete_ind */
const struct qmi_elem_info ipa_init_complete_ind_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_complete_ind,
status),
.tlv_type = 0x02,
.offset = offsetof(struct ipa_init_complete_ind,
status),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_mem_bounds */
const struct qmi_elem_info ipa_mem_bounds_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_mem_bounds, start),
.offset = offsetof(struct ipa_mem_bounds, start),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_mem_bounds, end),
.offset = offsetof(struct ipa_mem_bounds, end),
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_mem_array */
const struct qmi_elem_info ipa_mem_array_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_mem_array, start),
.offset = offsetof(struct ipa_mem_array, start),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_mem_array, count),
.offset = offsetof(struct ipa_mem_array, count),
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_mem_range */
const struct qmi_elem_info ipa_mem_range_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_mem_range, start),
.offset = offsetof(struct ipa_mem_range, start),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_mem_range, size),
.offset = offsetof(struct ipa_mem_range, size),
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_init_modem_driver_req */
const struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
platform_type_valid),
.tlv_type = 0x10,
.offset = offsetof(struct ipa_init_modem_driver_req,
platform_type_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
platform_type),
.tlv_type = 0x10,
.offset = offsetof(struct ipa_init_modem_driver_req,
platform_type),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hdr_tbl_info_valid),
.tlv_type = 0x11,
.offset = offsetof(struct ipa_init_modem_driver_req,
hdr_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hdr_tbl_info),
.tlv_type = 0x11,
.offset = offsetof(struct ipa_init_modem_driver_req,
hdr_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_route_tbl_info_valid),
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_route_tbl_info_valid),
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_filter_tbl_start_valid),
.tlv_type = 0x14,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_filter_tbl_start_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_filter_tbl_start),
.tlv_type = 0x14,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_filter_tbl_start),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_filter_tbl_start_valid),
.tlv_type = 0x15,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_filter_tbl_start_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_filter_tbl_start),
.tlv_type = 0x15,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_filter_tbl_start),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
modem_mem_info_valid),
.tlv_type = 0x16,
.offset = offsetof(struct ipa_init_modem_driver_req,
modem_mem_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
modem_mem_info),
.tlv_type = 0x16,
.offset = offsetof(struct ipa_init_modem_driver_req,
modem_mem_info),
.ei_array = ipa_mem_range_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
ctrl_comm_dest_end_pt_valid),
.tlv_type = 0x17,
.offset = offsetof(struct ipa_init_modem_driver_req,
ctrl_comm_dest_end_pt_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
ctrl_comm_dest_end_pt),
.tlv_type = 0x17,
.offset = offsetof(struct ipa_init_modem_driver_req,
ctrl_comm_dest_end_pt),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
skip_uc_load_valid),
.tlv_type = 0x18,
.offset = offsetof(struct ipa_init_modem_driver_req,
skip_uc_load_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
skip_uc_load),
.tlv_type = 0x18,
.offset = offsetof(struct ipa_init_modem_driver_req,
skip_uc_load),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hdr_proc_ctx_tbl_info_valid),
.tlv_type = 0x19,
.offset = offsetof(struct ipa_init_modem_driver_req,
hdr_proc_ctx_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hdr_proc_ctx_tbl_info),
.tlv_type = 0x19,
.offset = offsetof(struct ipa_init_modem_driver_req,
hdr_proc_ctx_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
zip_tbl_info_valid),
.tlv_type = 0x1a,
.offset = offsetof(struct ipa_init_modem_driver_req,
zip_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
zip_tbl_info),
.tlv_type = 0x1a,
.offset = offsetof(struct ipa_init_modem_driver_req,
zip_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info_valid),
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info_valid),
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_hash_filter_tbl_start_valid),
.tlv_type = 0x1d,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_filter_tbl_start_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v4_hash_filter_tbl_start),
.tlv_type = 0x1d,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_filter_tbl_start),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_hash_filter_tbl_start_valid),
.tlv_type = 0x1e,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_filter_tbl_start_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
v6_hash_filter_tbl_start),
.tlv_type = 0x1e,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_filter_tbl_start),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_quota_base_addr_valid),
.tlv_type = 0x1f,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_quota_base_addr_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_quota_base_addr),
.tlv_type = 0x1f,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_quota_base_addr),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_quota_size_valid),
.tlv_type = 0x20,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_quota_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_quota_size),
.tlv_type = 0x20,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_quota_size),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_drop_base_addr_valid),
.tlv_type = 0x21,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_drop_base_addr_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_drop_base_addr),
.tlv_type = 0x21,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_drop_base_addr),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_drop_size_valid),
.tlv_type = 0x22,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_drop_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_req,
hw_stats_drop_size),
.tlv_type = 0x22,
.offset = offsetof(struct ipa_init_modem_driver_req,
hw_stats_drop_size),
},
{
.data_type = QMI_EOTI,
},
};
/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
rsp),
.tlv_type = 0x02,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
rsp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
ctrl_comm_dest_end_pt_valid),
.tlv_type = 0x10,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
ctrl_comm_dest_end_pt_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
ctrl_comm_dest_end_pt),
.tlv_type = 0x10,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
ctrl_comm_dest_end_pt),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
default_end_pt_valid),
.tlv_type = 0x11,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
default_end_pt_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
default_end_pt),
.tlv_type = 0x11,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
default_end_pt),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
modem_driver_init_pending_valid),
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
modem_driver_init_pending_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size =
sizeof_field(struct ipa_init_modem_driver_rsp,
modem_driver_init_pending),
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_rsp,
modem_driver_init_pending),
},
{
.data_type = QMI_EOTI,
},
};
| linux-master | drivers/net/ipa/ipa_qmi_msg.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
#include <linux/if_rmnet.h>
#include <linux/dma-direction.h>
#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
#include "ipa_cmd.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
#include "ipa_table.h"
#include "ipa_gsi.h"
#include "ipa_power.h"
/* Hardware is told about receive buffers once a "batch" has been queued */
#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
/** enum ipa_status_opcode - IPA status opcode field hardware values */
enum ipa_status_opcode { /* *Not* a bitmask */
IPA_STATUS_OPCODE_PACKET = 1,
IPA_STATUS_OPCODE_NEW_RULE_PACKET = 2,
IPA_STATUS_OPCODE_DROPPED_PACKET = 4,
IPA_STATUS_OPCODE_SUSPENDED_PACKET = 8,
IPA_STATUS_OPCODE_LOG = 16,
IPA_STATUS_OPCODE_DCMP = 32,
IPA_STATUS_OPCODE_PACKET_2ND_PASS = 64,
};
/** enum ipa_status_exception - IPA status exception field hardware values */
enum ipa_status_exception { /* *Not* a bitmask */
/* 0 means no exception */
IPA_STATUS_EXCEPTION_DEAGGR = 1,
IPA_STATUS_EXCEPTION_IPTYPE = 4,
IPA_STATUS_EXCEPTION_PACKET_LENGTH = 8,
IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 16,
IPA_STATUS_EXCEPTION_SW_FILTER = 32,
IPA_STATUS_EXCEPTION_NAT = 64, /* IPv4 */
IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK = 64, /* IPv6 */
IPA_STATUS_EXCEPTION_UC = 128,
IPA_STATUS_EXCEPTION_INVALID_ENDPOINT = 129,
IPA_STATUS_EXCEPTION_HEADER_INSERT = 136,
IPA_STATUS_EXCEPTION_CHEKCSUM = 229,
};
/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
enum ipa_status_mask {
IPA_STATUS_MASK_FRAG_PROCESS = BIT(0),
IPA_STATUS_MASK_FILT_PROCESS = BIT(1),
IPA_STATUS_MASK_NAT_PROCESS = BIT(2),
IPA_STATUS_MASK_ROUTE_PROCESS = BIT(3),
IPA_STATUS_MASK_TAG_VALID = BIT(4),
IPA_STATUS_MASK_FRAGMENT = BIT(5),
IPA_STATUS_MASK_FIRST_FRAGMENT = BIT(6),
IPA_STATUS_MASK_V4 = BIT(7),
IPA_STATUS_MASK_CKSUM_PROCESS = BIT(8),
IPA_STATUS_MASK_AGGR_PROCESS = BIT(9),
IPA_STATUS_MASK_DEST_EOT = BIT(10),
IPA_STATUS_MASK_DEAGGR_PROCESS = BIT(11),
IPA_STATUS_MASK_DEAGG_FIRST = BIT(12),
IPA_STATUS_MASK_SRC_EOT = BIT(13),
IPA_STATUS_MASK_PREV_EOT = BIT(14),
IPA_STATUS_MASK_BYTE_LIMIT = BIT(15),
};
/* Special IPA filter/router rule field value indicating "rule miss" */
#define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
/** The IPA status nat_type field uses enum ipa_nat_type hardware values */
/* enum ipa_status_field_id - IPA packet status structure field identifiers */
enum ipa_status_field_id {
STATUS_OPCODE, /* enum ipa_status_opcode */
STATUS_EXCEPTION, /* enum ipa_status_exception */
STATUS_MASK, /* enum ipa_status_mask (bitmask) */
STATUS_LENGTH,
STATUS_SRC_ENDPOINT,
STATUS_DST_ENDPOINT,
STATUS_METADATA,
STATUS_FILTER_LOCAL, /* Boolean */
STATUS_FILTER_HASH, /* Boolean */
STATUS_FILTER_GLOBAL, /* Boolean */
STATUS_FILTER_RETAIN, /* Boolean */
STATUS_FILTER_RULE_INDEX,
STATUS_ROUTER_LOCAL, /* Boolean */
STATUS_ROUTER_HASH, /* Boolean */
STATUS_UCP, /* Boolean */
STATUS_ROUTER_TABLE,
STATUS_ROUTER_RULE_INDEX,
STATUS_NAT_HIT, /* Boolean */
STATUS_NAT_INDEX,
STATUS_NAT_TYPE, /* enum ipa_nat_type */
STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
STATUS_SEQUENCE,
STATUS_TIME_OF_DAY,
STATUS_HEADER_LOCAL, /* Boolean */
STATUS_HEADER_OFFSET,
STATUS_FRAG_HIT, /* Boolean */
STATUS_FRAG_RULE_INDEX,
};
/* Size in bytes of an IPA packet status structure */
#define IPA_STATUS_SIZE sizeof(__le32[8])
/* IPA status structure decoder; looks up field values for a structure */
static u32 ipa_status_extract(struct ipa *ipa, const void *data,
enum ipa_status_field_id field)
{
enum ipa_version version = ipa->version;
const __le32 *word = data;
switch (field) {
case STATUS_OPCODE:
return le32_get_bits(word[0], GENMASK(7, 0));
case STATUS_EXCEPTION:
return le32_get_bits(word[0], GENMASK(15, 8));
case STATUS_MASK:
return le32_get_bits(word[0], GENMASK(31, 16));
case STATUS_LENGTH:
return le32_get_bits(word[1], GENMASK(15, 0));
case STATUS_SRC_ENDPOINT:
if (version < IPA_VERSION_5_0)
return le32_get_bits(word[1], GENMASK(20, 16));
return le32_get_bits(word[1], GENMASK(23, 16));
/* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
/* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
case STATUS_DST_ENDPOINT:
if (version < IPA_VERSION_5_0)
return le32_get_bits(word[1], GENMASK(28, 24));
return le32_get_bits(word[7], GENMASK(23, 16));
/* Status word 1, bits 29-31 are reserved */
case STATUS_METADATA:
return le32_to_cpu(word[2]);
case STATUS_FILTER_LOCAL:
return le32_get_bits(word[3], GENMASK(0, 0));
case STATUS_FILTER_HASH:
return le32_get_bits(word[3], GENMASK(1, 1));
case STATUS_FILTER_GLOBAL:
return le32_get_bits(word[3], GENMASK(2, 2));
case STATUS_FILTER_RETAIN:
return le32_get_bits(word[3], GENMASK(3, 3));
case STATUS_FILTER_RULE_INDEX:
return le32_get_bits(word[3], GENMASK(13, 4));
/* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
case STATUS_ROUTER_LOCAL:
if (version < IPA_VERSION_5_0)
return le32_get_bits(word[3], GENMASK(14, 14));
return le32_get_bits(word[1], GENMASK(27, 27));
case STATUS_ROUTER_HASH:
if (version < IPA_VERSION_5_0)
return le32_get_bits(word[3], GENMASK(15, 15));
return le32_get_bits(word[1], GENMASK(28, 28));
case STATUS_UCP:
if (version < IPA_VERSION_5_0)
return le32_get_bits(word[3], GENMASK(16, 16));
return le32_get_bits(word[7], GENMASK(31, 31));
case STATUS_ROUTER_TABLE:
if (version < IPA_VERSION_5_0)
return le32_get_bits(word[3], GENMASK(21, 17));
return le32_get_bits(word[3], GENMASK(21, 14));
case STATUS_ROUTER_RULE_INDEX:
return le32_get_bits(word[3], GENMASK(31, 22));
case STATUS_NAT_HIT:
return le32_get_bits(word[4], GENMASK(0, 0));
case STATUS_NAT_INDEX:
return le32_get_bits(word[4], GENMASK(13, 1));
case STATUS_NAT_TYPE:
return le32_get_bits(word[4], GENMASK(15, 14));
case STATUS_TAG_LOW32:
return le32_get_bits(word[4], GENMASK(31, 16)) |
(le32_get_bits(word[5], GENMASK(15, 0)) << 16);
case STATUS_TAG_HIGH16:
return le32_get_bits(word[5], GENMASK(31, 16));
case STATUS_SEQUENCE:
return le32_get_bits(word[6], GENMASK(7, 0));
case STATUS_TIME_OF_DAY:
return le32_get_bits(word[6], GENMASK(31, 8));
case STATUS_HEADER_LOCAL:
return le32_get_bits(word[7], GENMASK(0, 0));
case STATUS_HEADER_OFFSET:
return le32_get_bits(word[7], GENMASK(10, 1));
case STATUS_FRAG_HIT:
return le32_get_bits(word[7], GENMASK(11, 11));
case STATUS_FRAG_RULE_INDEX:
return le32_get_bits(word[7], GENMASK(15, 12));
/* Status word 7, bits 16-30 are reserved */
/* Status word 7, bit 31 is reserved (not IPA v5.0+) */
default:
WARN(true, "%s: bad field_id %u\n", __func__, field);
return 0;
}
}
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
{
/* A hard aggregation limit will not be crossed; aggregation closes
* if saving incoming data would cross the hard byte limit boundary.
*
* With a soft limit, aggregation closes *after* the size boundary
* has been crossed. In that case the limit must leave enough space
* after that limit to receive a full MTU of data plus overhead.
*/
if (!aggr_hard_limit)
rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
/* The byte limit is encoded as a number of kilobytes */
return rx_buffer_size / SZ_1K;
}
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *other_data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name other_name;
if (ipa_gsi_endpoint_data_empty(data))
return true;
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
const struct reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
"RX endpoint %u\n",
data->endpoint_id);
return false;
}
/* Nothing more to check for non-AP RX */
if (data->ee_id != GSI_EE_AP)
return true;
rx_config = &data->endpoint.config.rx;
/* The buffer size must hold an MTU plus overhead */
buffer_size = rx_config->buffer_size;
limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
if (buffer_size < limit) {
dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
data->endpoint_id, buffer_size, limit);
return false;
}
if (!data->endpoint.config.aggregation) {
bool result = true;
/* No aggregation; check for bogus aggregation data */
if (rx_config->aggr_time_limit) {
dev_err(dev,
"time limit with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
if (rx_config->aggr_hard_limit) {
dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
if (rx_config->aggr_close_eof) {
dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
return result; /* Nothing more to check */
}
/* For an endpoint supporting receive aggregation, the byte
* limit defines the point at which aggregation closes. This
* check ensures the receive buffer size doesn't result in a
* limit that exceeds what's representable in the aggregation
* byte limit field.
*/
aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
limit = reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
return false;
}
return true; /* Nothing more to check for RX */
}
/* Starting with IPA v4.5 sequencer replication is obsolete */
if (ipa->version >= IPA_VERSION_4_5) {
if (data->endpoint.config.tx.seq_rep_type) {
dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
data->endpoint_id);
return false;
}
}
if (data->endpoint.config.status_enable) {
other_name = data->endpoint.config.tx.status_endpoint;
if (other_name >= count) {
dev_err(dev, "status endpoint name %u out of range "
"for endpoint %u\n",
other_name, data->endpoint_id);
return false;
}
/* Status endpoint must be defined... */
other_data = &all_data[other_name];
if (ipa_gsi_endpoint_data_empty(other_data)) {
dev_err(dev, "DMA endpoint name %u undefined "
"for endpoint %u\n",
other_name, data->endpoint_id);
return false;
}
/* ...and has to be an RX endpoint... */
if (other_data->toward_ipa) {
dev_err(dev,
"status endpoint for endpoint %u not RX\n",
data->endpoint_id);
return false;
}
/* ...and if it's to be an AP endpoint... */
if (other_data->ee_id == GSI_EE_AP) {
/* ...make sure it has status enabled. */
if (!other_data->endpoint.config.status_enable) {
dev_err(dev,
"status not enabled for endpoint %u\n",
other_data->endpoint_id);
return false;
}
}
}
if (data->endpoint.config.dma_mode) {
other_name = data->endpoint.config.dma_endpoint;
if (other_name >= count) {
dev_err(dev, "DMA endpoint name %u out of range "
"for endpoint %u\n",
other_name, data->endpoint_id);
return false;
}
other_data = &all_data[other_name];
if (ipa_gsi_endpoint_data_empty(other_data)) {
dev_err(dev, "DMA endpoint name %u undefined "
"for endpoint %u\n",
other_name, data->endpoint_id);
return false;
}
}
return true;
}
/* Validate endpoint configuration data. Return max defined endpoint ID */
static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
u32 max;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
return 0;
}
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
dev_err(dev, "LAN RX endpoint not defined\n");
return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
dev_err(dev, "AP->modem TX endpoint not defined\n");
return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
dev_err(dev, "AP<-modem RX endpoint not defined\n");
return 0;
}
max = 0;
for (name = 0; name < count; name++, dp++) {
if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
return 0;
max = max_t(u32, max, dp->endpoint_id);
}
return max;
}
/* Allocate a transaction to use on a non-command endpoint */
static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
u32 tre_count)
{
struct gsi *gsi = &endpoint->ipa->gsi;
u32 channel_id = endpoint->channel_id;
enum dma_data_direction direction;
direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
}
/* suspend_delay represents suspend for RX, delay for TX endpoints.
* Note that suspend is not supported starting with IPA v4.0, and
* delay mode should not be used starting with IPA v4.2.
*/
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 field_id;
u32 offset;
bool state;
u32 mask;
u32 val;
if (endpoint->toward_ipa)
WARN_ON(ipa->version >= IPA_VERSION_4_2);
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
reg = ipa_reg(ipa, ENDP_INIT_CTRL);
offset = reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
mask = reg_bit(reg, field_id);
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
if (suspend_delay != state) {
val ^= mask;
iowrite32(val, ipa->reg_virt + offset);
}
return state;
}
/* We don't care what the previous state was for delay mode */
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
/* Delay mode should not be used for IPA v4.2+ */
WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
WARN_ON(!endpoint->toward_ipa);
(void)ipa_endpoint_init_ctrl(endpoint, enable);
}
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
const struct reg *reg;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
return !!(val & BIT(endpoint_id % 32));
}
static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
const struct reg *reg;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
}
/**
* ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
* @endpoint: Endpoint on which to emulate a suspend
*
* Emulate suspend IPA interrupt to unsuspend an endpoint suspended
* with an open aggregation frame. This is to work around a hardware
* issue in IPA version 3.5.1 where the suspend interrupt will not be
* generated when it should be.
*/
static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
if (!endpoint->config.aggregation)
return;
/* Nothing to do if the endpoint doesn't have aggregation open */
if (!ipa_endpoint_aggr_active(endpoint))
return;
/* Force close aggregation */
ipa_endpoint_force_close(endpoint);
ipa_interrupt_simulate_suspend(ipa->interrupt);
}
/* Returns previous suspend state (true means suspend was enabled) */
static bool
ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
{
bool suspended;
if (endpoint->ipa->version >= IPA_VERSION_4_0)
return enable; /* For IPA v4.0+, no change made */
WARN_ON(endpoint->toward_ipa);
suspended = ipa_endpoint_init_ctrl(endpoint, enable);
/* A client suspended with an open aggregation frame will not
* generate a SUSPEND IPA interrupt. If enabling suspend, have
* ipa_endpoint_suspend_aggr() handle this.
*/
if (enable && !suspended)
ipa_endpoint_suspend_aggr(endpoint);
return suspended;
}
/* Put all modem RX endpoints into suspend mode, and stop transmission
* on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
* used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
* control instead.
*/
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
u32 endpoint_id = 0;
while (endpoint_id < ipa->endpoint_count) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, enable);
else if (ipa->version < IPA_VERSION_4_2)
ipa_endpoint_program_delay(endpoint, enable);
else
gsi_modem_channel_flow_control(&ipa->gsi,
endpoint->channel_id,
enable);
}
}
/* Reset all modem endpoints to use the default exception endpoint */
int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{
struct gsi_trans *trans;
u32 endpoint_id;
u32 count;
/* We need one command per modem TX endpoint, plus the commands
* that clear the pipeline.
*/
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction to reset modem exception endpoints\n");
return -EBUSY;
}
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
const struct reg *reg;
u32 offset;
/* We only reset modem TX endpoints */
endpoint = &ipa->endpoint[endpoint_id];
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
reg = ipa_reg(ipa, ENDP_STATUS);
offset = reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
* result all other fields in the register are ignored.
*/
ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
}
ipa_cmd_pipeline_clear_add(trans);
gsi_trans_commit_wait(trans);
ipa_cmd_pipeline_clear_wait(ipa);
return 0;
}
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_CFG);
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->config.checksum) {
enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
u32 off;
/* Checksum header offset is in 4-byte units */
off = sizeof(struct rmnet_map_header) / sizeof(u32);
val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
: IPA_CS_OFFLOAD_INLINE;
} else {
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_DL
: IPA_CS_OFFLOAD_INLINE;
}
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
reg = ipa_reg(ipa, ENDP_INIT_NAT);
val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static u32
ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
{
u32 header_size = sizeof(struct rmnet_map_header);
/* Without checksum offload, we just have the MAP header */
if (!endpoint->config.checksum)
return header_size;
if (version < IPA_VERSION_4_5) {
/* Checksum header inserted for AP TX endpoints only */
if (endpoint->toward_ipa)
header_size += sizeof(struct rmnet_map_ul_csum_header);
} else {
/* Checksum header is used in both directions */
header_size += sizeof(struct rmnet_map_v5_csum_header);
}
return header_size;
}
/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
static u32 ipa_header_size_encode(enum ipa_version version,
const struct reg *reg, u32 header_size)
{
u32 field_max = reg_field_max(reg, HDR_LEN);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
val = reg_encode(reg, HDR_LEN, header_size & field_max);
if (version < IPA_VERSION_4_5) {
WARN_ON(header_size > field_max);
return val;
}
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(field_max);
WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
val |= reg_encode(reg, HDR_LEN_MSB, header_size);
return val;
}
/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
static u32 ipa_metadata_offset_encode(enum ipa_version version,
const struct reg *reg, u32 offset)
{
u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
val = reg_encode(reg, HDR_OFST_METADATA, offset);
if (version < IPA_VERSION_4_5) {
WARN_ON(offset > field_max);
return val;
}
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(field_max);
WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
return val;
}
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
*
* We program QMAP endpoints so each packet received is preceded by a QMAP
* header structure. The QMAP header contains a 1-byte mux_id and 2-byte
* packet size field, and we have the IPA hardware populate both for each
* received packet. The header is configured (in the HDR_EXT register)
* to use big endian format.
*
* The packet size is written into the QMAP header's pkt_len field. That
* location is defined here using the HDR_OFST_PKT_SIZE field.
*
* The mux_id comes from a 4-byte metadata value supplied with each packet
* by the modem. It is *not* a QMAP header, but it does contain the mux_id
* value that we want, in its low-order byte. A bitmask defined in the
* endpoint's METADATA_MASK register defines which byte within the modem
* metadata contains the mux_id. And the OFST_METADATA field programmed
* here indicates where the extracted byte should be placed within the QMAP
* header.
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR);
if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
header_size = ipa_qmap_header_size(version, endpoint);
val = ipa_header_size_encode(version, reg, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
off = offsetof(struct rmnet_map_header, mux_id);
val |= ipa_metadata_offset_encode(version, reg, off);
/* Where IPA will write the length */
off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
/* HDR_LEN_INC_DEAGG_HDR is 0 */
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 pad_align = endpoint->config.rx.pad_align;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
* packets it receives, and assumes the header's payload
* length includes that padding. The RMNet driver does
* *not* pad packets it sends, however, so the pad field
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
*/
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len);
/* Low bits are in the ENDP_INIT_HDR register */
off >>= hweight32(mask);
val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
offset = reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_MODE);
if (endpoint->config.dma_mode) {
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
val = reg_encode(reg, ENDP_MODE, IPA_DMA);
val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
offset = reg_n_offset(reg, endpoint->endpoint_id);
iowrite32(val, ipa->reg_virt + offset);
}
/* For IPA v4.5+, times are expressed using Qtime. A time is represented
* at one of several available granularities, which are configured in
* ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
* generators are set up with different "tick" periods. A Qtime value
* encodes a tick count along with an indication of a pulse generator
* (which has a fixed tick period). Two pulse generators are always
* available to the AP; a third is available starting with IPA v5.0.
* This function determines which pulse generator most accurately
* represents the time period provided, and returns the tick count to
* use to represent that time.
*/
static u32
ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
{
u32 which = 0;
u32 ticks;
/* Pulse generator 0 has 100 microsecond granularity */
ticks = DIV_ROUND_CLOSEST(microseconds, 100);
if (ticks <= max)
goto out;
/* Pulse generator 1 has millisecond granularity */
which = 1;
ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
if (ticks <= max)
goto out;
if (ipa->version >= IPA_VERSION_5_0) {
/* Pulse generator 2 has 10 millisecond granularity */
which = 2;
ticks = DIV_ROUND_CLOSEST(microseconds, 100);
}
WARN_ON(ticks > max);
out:
*select = which;
return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
u32 ticks;
u32 max;
if (!microseconds)
return 0; /* Nothing to compute if time limit is 0 */
max = reg_field_max(reg, TIME_LIMIT);
if (ipa->version >= IPA_VERSION_4_5) {
u32 select;
ticks = ipa_qtime_val(ipa, microseconds, max, &select);
return reg_encode(reg, AGGR_GRAN_SEL, select) |
reg_encode(reg, TIME_LIMIT, ticks);
}
/* We program aggregation granularity in ipa_hardware_config() */
ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
return reg_encode(reg, TIME_LIMIT, ticks);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
u32 limit;
rx_config = &endpoint->config.rx;
val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
val |= reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encode(ipa, reg, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
if (rx_config->aggr_close_eof)
val |= reg_bit(reg, SW_EOF_ACTIVE);
} else {
val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
* IPA version 4.5 the tick count is based on the Qtimer, which is
* derived from the 19.2 MHz SoC XO clock. For older IPA versions
* each tick represents 128 cycles of the IPA core clock.
*
* Return the encoded value representing the timeout period provided
* that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
u32 width;
u32 scale;
u64 ticks;
u64 rate;
u32 high;
u32 val;
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
if (ipa->version >= IPA_VERSION_4_5) {
u32 max = reg_field_max(reg, TIMER_LIMIT);
u32 select;
u32 ticks;
ticks = ipa_qtime_val(ipa, microseconds, max, &select);
return reg_encode(reg, TIMER_GRAN_SEL, 1) |
reg_encode(reg, TIMER_LIMIT, ticks);
}
/* Use 64 bit arithmetic to avoid overflow */
rate = ipa_core_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* We still need the result to fit into the field */
WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
* ticks = base << scale;
* The best precision is achieved when the base value is as
* large as possible. Find the highest set bit in the tick
* count, and extract the number of bits in the base field
* such that high bit is included.
*/
high = fls(ticks); /* 1..32 (or warning above) */
width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
ticks += 1 << (scale - 1);
/* High bit was set, so rounding might have affected it */
if (fls(ticks) != high)
scale++;
}
val = reg_encode(reg, TIMER_SCALE, scale);
val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
/* If microseconds is 0, timeout is immediate */
static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
u32 microseconds)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, reg, microseconds);
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
offset = reg_n_offset(reg, endpoint_id);
val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
iowrite32(val, ipa->reg_virt + offset);
/* When enabling, the register must be written twice for IPA v4.5+ */
if (enable && ipa->version >= IPA_VERSION_4_5)
iowrite32(val, ipa->reg_virt + offset);
}
/* Assumes HOL_BLOCK is in disabled state */
static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
u32 microseconds)
{
ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
ipa_endpoint_init_hol_block_en(endpoint, true);
}
static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
{
ipa_endpoint_init_hol_block_en(endpoint, false);
}
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{
u32 endpoint_id = 0;
while (endpoint_id < ipa->endpoint_count) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
ipa_endpoint_init_hol_block_disable(endpoint);
ipa_endpoint_init_hol_block_enable(endpoint, 0);
}
}
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
u32 resource_group = endpoint->config.resource_group;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_SEQ);
/* Low-order byte configures primary packet processing */
val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
/* Second byte (if supported) configures replicated packet processing */
if (ipa->version < IPA_VERSION_4_5)
val |= reg_encode(reg, SEQ_REP_TYPE,
endpoint->config.tx.seq_rep_type);
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/**
* ipa_endpoint_skb_tx() - Transmit a socket buffer
* @endpoint: Endpoint pointer
* @skb: Socket buffer to send
*
* Returns: 0 if successful, or a negative error code
*/
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
{
struct gsi_trans *trans;
u32 nr_frags;
int ret;
/* Make sure source endpoint's TLV FIFO has enough entries to
* hold the linear portion of the skb and all its fragments.
* If not, see if we can linearize it before giving up.
*/
nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags > endpoint->skb_frag_max) {
if (skb_linearize(skb))
return -E2BIG;
nr_frags = 0;
}
trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
if (!trans)
return -EBUSY;
ret = gsi_trans_skb_add(trans, skb);
if (ret)
goto err_trans_free;
trans->data = skb; /* transaction owns skb now */
gsi_trans_commit(trans, !netdev_xmit_more());
return 0;
err_trans_free:
gsi_trans_free(trans);
return -ENOMEM;
}
static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
val |= reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning IPA packet status
* precedes the packet (not present for IPA v4.5+)
*/
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
struct gsi_trans *trans)
{
struct page *page;
u32 buffer_size;
u32 offset;
u32 len;
int ret;
buffer_size = endpoint->config.rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
put_page(page);
else
trans->data = page; /* transaction owns page now */
return ret;
}
/**
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
*
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
* an endpoint can be disabled, in which case buffers are not queued to
* the hardware.
*/
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
struct gsi_trans *trans;
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return;
/* Skip it if it's already active */
if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
return;
while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
bool doorbell;
if (ipa_endpoint_replenish_one(endpoint, trans))
goto try_again_later;
/* Ring the doorbell if we've got a full batch */
doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
gsi_trans_commit(trans, doorbell);
}
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
return;
try_again_later:
gsi_trans_free(trans);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt.
* If the hardware has no receive buffers queued, schedule work to
* try replenishing again.
*/
if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
/* Start replenishing if hardware currently has no buffers */
if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
}
static void ipa_endpoint_replenish_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct ipa_endpoint *endpoint;
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
void *data, u32 len, u32 extra)
{
struct sk_buff *skb;
if (!endpoint->netdev)
return;
skb = __dev_alloc_skb(len, GFP_ATOMIC);
if (skb) {
/* Copy the data into the socket buffer and receive it */
skb_put(skb, len);
memcpy(skb->data, data, len);
skb->truesize += extra;
}
ipa_modem_skb_rx(endpoint->netdev, skb);
}
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
u32 buffer_size = endpoint->config.rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
if (!endpoint->netdev)
return false;
WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
skb = build_skb(page_address(page), buffer_size);
if (skb) {
/* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
}
/* Receive the buffer (or record drop if unable to build it) */
ipa_modem_skb_rx(endpoint->netdev, skb);
return skb != NULL;
}
/* The format of an IPA packet status structure is the same for several
* status types (opcodes). Other types aren't currently supported.
*/
static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
{
switch (opcode) {
case IPA_STATUS_OPCODE_PACKET:
case IPA_STATUS_OPCODE_DROPPED_PACKET:
case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
return true;
default:
return false;
}
}
static bool
ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
{
struct ipa *ipa = endpoint->ipa;
enum ipa_status_opcode opcode;
u32 endpoint_id;
opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
if (!ipa_status_format_packet(opcode))
return true;
endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
if (endpoint_id != endpoint->endpoint_id)
return true;
return false; /* Don't skip this packet, process it */
}
static bool
ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
{
struct ipa_endpoint *command_endpoint;
enum ipa_status_mask status_mask;
struct ipa *ipa = endpoint->ipa;
u32 endpoint_id;
status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
if (!status_mask)
return false; /* No valid tag */
/* The status contains a valid tag. We know the packet was sent to
* this endpoint (already verified by ipa_endpoint_status_skip()).
* If the packet came from the AP->command TX endpoint we know
* this packet was sent as part of the pipeline clear process.
*/
endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
} else {
dev_err(&ipa->pdev->dev,
"unexpected tagged packet from endpoint %u\n",
endpoint_id);
}
return true;
}
/* Return whether the status indicates the packet should be dropped */
static bool
ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
{
enum ipa_status_exception exception;
struct ipa *ipa = endpoint->ipa;
u32 rule;
/* If the status indicates a tagged transfer, we'll drop the packet */
if (ipa_endpoint_status_tag_valid(endpoint, data))
return true;
/* Deaggregation exceptions we drop; all other types we consume */
exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
if (exception)
return exception == IPA_STATUS_EXCEPTION_DEAGGR;
/* Drop the packet if it fails to match a routing rule; otherwise no */
rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
return rule == IPA_STATUS_RULE_MISS;
}
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
struct ipa *ipa = endpoint->ipa;
u32 resid = total_len;
while (resid) {
u32 length;
u32 align;
u32 len;
if (resid < IPA_STATUS_SIZE) {
dev_err(&endpoint->ipa->pdev->dev,
"short message (%u bytes < %zu byte status)\n",
resid, IPA_STATUS_SIZE);
break;
}
/* Skip over status packets that lack packet data */
length = ipa_status_extract(ipa, data, STATUS_LENGTH);
if (!length || ipa_endpoint_status_skip(endpoint, data)) {
data += IPA_STATUS_SIZE;
resid -= IPA_STATUS_SIZE;
continue;
}
/* Compute the amount of buffer space consumed by the packet,
* including the status. If the hardware is configured to
* pad packet data to an aligned boundary, account for that.
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
align = endpoint->config.rx.pad_align ? : 1;
len = IPA_STATUS_SIZE + ALIGN(length, align);
if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, data)) {
void *data2;
u32 extra;
/* Client receives only packet data (no status) */
data2 = data + IPA_STATUS_SIZE;
/* Have the true size reflect the extra unused space in
* the original receive buffer. Distribute the "cost"
* proportionately across all aggregated packets in the
* buffer.
*/
extra = DIV_ROUND_CLOSEST(unused * len, total_len);
ipa_endpoint_skb_copy(endpoint, data2, length, extra);
}
/* Consume status and the full packet it describes */
data += len;
resid -= len;
}
}
void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
struct gsi_trans *trans)
{
struct page *page;
if (endpoint->toward_ipa)
return;
if (trans->cancelled)
goto done;
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
done:
ipa_endpoint_replenish(endpoint);
}
void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct gsi_trans *trans)
{
if (endpoint->toward_ipa) {
struct ipa *ipa = endpoint->ipa;
/* Nothing to do for command transactions */
if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
struct sk_buff *skb = trans->data;
if (skb)
dev_kfree_skb_any(skb);
}
} else {
struct page *page = trans->data;
if (page)
put_page(page);
}
}
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
/* ROUTE_DEF_HDR_OFST is 0 */
val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
{
ipa_endpoint_default_route_set(ipa, 0);
}
/**
* ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
* @endpoint: Endpoint to be reset
*
* If aggregation is active on an RX endpoint when a reset is performed
* on its underlying GSI channel, a special sequence of actions must be
* taken to ensure the IPA pipeline is properly cleared.
*
* Return: 0 if successful, or a negative error code
*/
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
u32 retries;
u32 len = 1;
void *virt;
int ret;
virt = kzalloc(len, GFP_KERNEL);
if (!virt)
return -ENOMEM;
addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, addr)) {
ret = -ENOMEM;
goto out_kfree;
}
/* Force close aggregation before issuing the reset */
ipa_endpoint_force_close(endpoint);
/* Reset and reconfigure the channel with the doorbell engine
* disabled. Then poll until we know aggregation is no longer
* active. We'll re-enable the doorbell (if appropriate) when
* we reset again below.
*/
gsi_channel_reset(gsi, endpoint->channel_id, false);
/* Make sure the channel isn't suspended */
suspended = ipa_endpoint_program_suspend(endpoint, false);
/* Start channel and do a 1 byte read */
ret = gsi_channel_start(gsi, endpoint->channel_id);
if (ret)
goto out_suspend_again;
ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
if (ret)
goto err_endpoint_stop;
/* Wait for aggregation to be closed on the channel */
retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
do {
if (!ipa_endpoint_aggr_active(endpoint))
break;
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */
if (ipa_endpoint_aggr_active(endpoint))
dev_err(dev, "endpoint %u still active during reset\n",
endpoint->endpoint_id);
gsi_trans_read_byte_done(gsi, endpoint->channel_id);
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
goto out_suspend_again;
/* Finally, reset and reconfigure the channel again (re-enabling
* the doorbell engine if appropriate). Sleep for 1 millisecond to
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
goto out_suspend_again;
err_endpoint_stop:
(void)gsi_channel_stop(gsi, endpoint->channel_id);
out_suspend_again:
if (suspended)
(void)ipa_endpoint_program_suspend(endpoint, true);
dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
out_kfree:
kfree(virt);
return ret;
}
static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
{
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
bool special;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
* is active, we need to handle things specially to recover.
* All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
endpoint->config.aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
dev_err(&ipa->pdev->dev,
"error %d resetting channel %u for endpoint %u\n",
ret, endpoint->channel_id, endpoint->endpoint_id);
}
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
if (endpoint->toward_ipa) {
/* Newer versions of IPA use GSI channel flow control
* instead of endpoint DELAY mode to prevent sending data.
* Flow control is disabled for newly-allocated channels,
* and we can assume flow control is not (ever) enabled
* for AP TX channels.
*/
if (endpoint->ipa->version < IPA_VERSION_4_2)
ipa_endpoint_program_delay(endpoint, false);
} else {
/* Ensure suspend mode is off on all AP RX endpoints */
(void)ipa_endpoint_program_suspend(endpoint, false);
}
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_nat(endpoint);
ipa_endpoint_init_hdr(endpoint);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
if (!endpoint->toward_ipa) {
if (endpoint->config.rx.holb_drop)
ipa_endpoint_init_hol_block_enable(endpoint, 0);
else
ipa_endpoint_init_hol_block_disable(endpoint);
}
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
ipa_endpoint_status(endpoint);
}
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret;
ret = gsi_channel_start(gsi, endpoint->channel_id);
if (ret) {
dev_err(&ipa->pdev->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
endpoint->channel_id, endpoint_id);
return ret;
}
if (!endpoint->toward_ipa) {
ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
ipa_endpoint_replenish_enable(endpoint);
}
__set_bit(endpoint_id, ipa->enabled);
return 0;
}
void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret;
if (!test_bit(endpoint_id, ipa->enabled))
return;
__clear_bit(endpoint_id, endpoint->ipa->enabled);
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
}
/* Note that if stop fails, the channel's state is not well-defined */
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret,
endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return;
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
(void)ipa_endpoint_program_suspend(endpoint, true);
}
ret = gsi_channel_suspend(gsi, endpoint->channel_id);
if (ret)
dev_err(dev, "error %d suspending channel %u\n", ret,
endpoint->channel_id);
}
void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return;
if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, false);
ret = gsi_channel_resume(gsi, endpoint->channel_id);
if (ret)
dev_err(dev, "error %d resuming channel %u\n", ret,
endpoint->channel_id);
else if (!endpoint->toward_ipa)
ipa_endpoint_replenish_enable(endpoint);
}
void ipa_endpoint_suspend(struct ipa *ipa)
{
if (!ipa->setup_complete)
return;
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
}
void ipa_endpoint_resume(struct ipa *ipa)
{
if (!ipa->setup_complete)
return;
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
if (ipa->modem_netdev)
ipa_modem_resume(ipa->modem_netdev);
}
static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
{
struct gsi *gsi = &endpoint->ipa->gsi;
u32 channel_id = endpoint->channel_id;
/* Only AP endpoints get set up */
if (endpoint->ee_id != GSI_EE_AP)
return;
endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
if (!endpoint->toward_ipa) {
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work);
}
ipa_endpoint_program(endpoint);
__set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
}
static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
{
__clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
if (!endpoint->toward_ipa)
cancel_delayed_work_sync(&endpoint->replenish_work);
ipa_endpoint_reset(endpoint);
}
void ipa_endpoint_setup(struct ipa *ipa)
{
u32 endpoint_id;
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
}
void ipa_endpoint_teardown(struct ipa *ipa)
{
u32 endpoint_id;
for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
}
void ipa_endpoint_deconfig(struct ipa *ipa)
{
ipa->available_count = 0;
bitmap_free(ipa->available);
ipa->available = NULL;
}
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct reg *reg;
u32 endpoint_id;
u32 hw_limit;
u32 tx_count;
u32 rx_count;
u32 rx_base;
u32 limit;
u32 val;
/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
* Furthermore, the endpoints were not grouped such that TX
* endpoint numbers started with 0 and RX endpoints had numbers
* higher than all TX endpoints, so we can't do the simple
* direction check used for newer hardware below.
*
* For hardware that doesn't support the FLAVOR_0 register,
* just set the available mask to support any endpoint, and
* assume the configuration is valid.
*/
if (ipa->version < IPA_VERSION_3_5) {
ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
if (!ipa->available)
return -ENOMEM;
ipa->available_count = IPA_ENDPOINT_MAX;
bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
return 0;
}
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
val = ioread32(ipa->reg_virt + reg_offset(reg));
/* Our RX is an IPA producer; our TX is an IPA consumer. */
tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
rx_base = reg_decode(reg, PROD_LOWEST, val);
limit = rx_base + rx_count;
if (limit > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints, %u > %u\n",
limit, IPA_ENDPOINT_MAX);
return -EINVAL;
}
/* Until IPA v5.0, the max endpoint ID was 32 */
hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
if (limit > hw_limit) {
dev_err(dev, "unexpected endpoint count, %u > %u\n",
limit, hw_limit);
return -EINVAL;
}
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)
return -ENOMEM;
ipa->available_count = limit;
/* Mark all supported RX and TX endpoints as available */
bitmap_set(ipa->available, 0, tx_count);
bitmap_set(ipa->available, rx_base, rx_count);
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
if (endpoint_id >= limit) {
dev_err(dev, "invalid endpoint id, %u > %u\n",
endpoint_id, limit - 1);
goto err_free_bitmap;
}
if (!test_bit(endpoint_id, ipa->available)) {
dev_err(dev, "unavailable endpoint id %u\n",
endpoint_id);
goto err_free_bitmap;
}
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->toward_ipa) {
if (endpoint_id < tx_count)
continue;
} else if (endpoint_id >= rx_base) {
continue;
}
dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
goto err_free_bitmap;
}
return 0;
err_free_bitmap:
ipa_endpoint_deconfig(ipa);
return -EINVAL;
}
static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
const struct ipa_gsi_endpoint_data *data)
{
struct ipa_endpoint *endpoint;
endpoint = &ipa->endpoint[data->endpoint_id];
if (data->ee_id == GSI_EE_AP)
ipa->channel_map[data->channel_id] = endpoint;
ipa->name_map[name] = endpoint;
endpoint->ipa = ipa;
endpoint->ee_id = data->ee_id;
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
endpoint->config = data->endpoint.config;
__set_bit(endpoint->endpoint_id, ipa->defined);
}
static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{
__clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
memset(endpoint, 0, sizeof(*endpoint));
}
void ipa_endpoint_exit(struct ipa *ipa)
{
u32 endpoint_id;
ipa->filtered = 0;
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
bitmap_free(ipa->enabled);
ipa->enabled = NULL;
bitmap_free(ipa->set_up);
ipa->set_up = NULL;
bitmap_free(ipa->defined);
ipa->defined = NULL;
memset(ipa->name_map, 0, sizeof(ipa->name_map));
memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
}
/* Returns a bitmask of endpoints that support filtering, or 0 on error */
int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
enum ipa_endpoint_name name;
u32 filtered;
BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
/* Number of endpoints is one more than the maximum ID */
ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
if (!ipa->endpoint_count)
return -EINVAL;
/* Initialize endpoint state bitmaps */
ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
if (!ipa->defined)
return -ENOMEM;
ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
if (!ipa->set_up)
goto err_free_defined;
ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
if (!ipa->enabled)
goto err_free_set_up;
filtered = 0;
for (name = 0; name < count; name++, data++) {
if (ipa_gsi_endpoint_data_empty(data))
continue; /* Skip over empty slots */
ipa_endpoint_init_one(ipa, name, data);
if (data->endpoint.filter_support)
filtered |= BIT(data->endpoint_id);
if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
ipa->modem_tx_count++;
}
/* Make sure the set of filtered endpoints is valid */
if (!ipa_filtered_valid(ipa, filtered)) {
ipa_endpoint_exit(ipa);
return -EINVAL;
}
ipa->filtered = filtered;
return 0;
err_free_set_up:
bitmap_free(ipa->set_up);
ipa->set_up = NULL;
err_free_defined:
bitmap_free(ipa->defined);
ipa->defined = NULL;
return -ENOMEM;
}
| linux-master | drivers/net/ipa/ipa_endpoint.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/qrtr.h>
#include <linux/soc/qcom/qmi.h>
#include "ipa.h"
#include "ipa_endpoint.h"
#include "ipa_mem.h"
#include "ipa_table.h"
#include "ipa_modem.h"
#include "ipa_qmi_msg.h"
/**
* DOC: AP/Modem QMI Handshake
*
* The AP and modem perform a "handshake" at initialization time to ensure
* both sides know when everything is ready to begin operating. The AP
* driver (this code) uses two QMI handles (endpoints) for this; a client
* using a service on the modem, and server to service modem requests (and
* to supply an indication message from the AP). Once the handshake is
* complete, the AP and modem may begin IPA operation. This occurs
* only when the AP IPA driver, modem IPA driver, and IPA microcontroller
* are ready.
*
* The QMI service on the modem expects to receive an INIT_DRIVER request from
* the AP, which contains parameters used by the modem during initialization.
* The AP sends this request as soon as it is knows the modem side service
* is available. The modem responds to this request, and if this response
* contains a success result, the AP knows the modem IPA driver is ready.
*
* The modem is responsible for loading firmware on the IPA microcontroller.
* This occurs only during the initial modem boot. The modem sends a
* separate DRIVER_INIT_COMPLETE request to the AP to report that the
* microcontroller is ready. The AP may assume the microcontroller is
* ready and remain so (even if the modem reboots) once it has received
* and responded to this request.
*
* There is one final exchange involved in the handshake. It is required
* on the initial modem boot, but optional (but in practice does occur) on
* subsequent boots. The modem expects to receive a final INIT_COMPLETE
* indication message from the AP when it is about to begin its normal
* operation. The AP will only send this message after it has received
* and responded to an INDICATION_REGISTER request from the modem.
*
* So in summary:
* - Whenever the AP learns the modem has booted and its IPA QMI service
* is available, it sends an INIT_DRIVER request to the modem. The
* modem supplies a success response when it is ready to operate.
* - On the initial boot, the modem sets up the IPA microcontroller, and
* sends a DRIVER_INIT_COMPLETE request to the AP when this is done.
* - When the modem is ready to receive an INIT_COMPLETE indication from
* the AP, it sends an INDICATION_REGISTER request to the AP.
* - On the initial modem boot, everything is ready when:
* - AP has received a success response from its INIT_DRIVER request
* - AP has responded to a DRIVER_INIT_COMPLETE request
* - AP has responded to an INDICATION_REGISTER request from the modem
* - AP has sent an INIT_COMPLETE indication to the modem
* - On subsequent modem boots, everything is ready when:
* - AP has received a success response from its INIT_DRIVER request
* - AP has responded to a DRIVER_INIT_COMPLETE request
* - The INDICATION_REGISTER request and INIT_COMPLETE indication are
* optional for non-initial modem boots, and have no bearing on the
* determination of when things are "ready"
*/
#define IPA_HOST_SERVICE_SVC_ID 0x31
#define IPA_HOST_SVC_VERS 1
#define IPA_HOST_SERVICE_INS_ID 1
#define IPA_MODEM_SERVICE_SVC_ID 0x31
#define IPA_MODEM_SERVICE_INS_ID 2
#define IPA_MODEM_SVC_VERS 1
#define QMI_INIT_DRIVER_TIMEOUT 60000 /* A minute in milliseconds */
/* Send an INIT_COMPLETE indication message to the modem */
static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
{
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
struct qmi_handle *qmi = &ipa_qmi->server_handle;
struct sockaddr_qrtr *sq = &ipa_qmi->modem_sq;
struct ipa_init_complete_ind ind = { };
int ret;
ind.status.result = QMI_RESULT_SUCCESS_V01;
ind.status.error = QMI_ERR_NONE_V01;
ret = qmi_send_indication(qmi, sq, IPA_QMI_INIT_COMPLETE,
IPA_QMI_INIT_COMPLETE_IND_SZ,
ipa_init_complete_ind_ei, &ind);
if (ret)
dev_err(&ipa->pdev->dev,
"error %d sending init complete indication\n", ret);
else
ipa_qmi->indication_sent = true;
}
/* If requested (and not already sent) send the INIT_COMPLETE indication */
static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
{
if (!ipa_qmi->indication_requested)
return;
if (ipa_qmi->indication_sent)
return;
ipa_server_init_complete(ipa_qmi);
}
/* Determine whether everything is ready to start normal operation.
* We know everything (else) is ready when we know the IPA driver on
* the modem is ready, and the microcontroller is ready.
*
* When the modem boots (or reboots), the handshake sequence starts
* with the AP sending the modem an INIT_DRIVER request. Within
* that request, the uc_loaded flag will be zero (false) for an
* initial boot, non-zero (true) for a subsequent (SSR) boot.
*/
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
{
struct ipa *ipa;
int ret;
/* We aren't ready until the modem and microcontroller are */
if (!ipa_qmi->modem_ready || !ipa_qmi->uc_ready)
return;
/* Send the indication message if it was requested */
ipa_qmi_indication(ipa_qmi);
/* The initial boot requires us to send the indication. */
if (ipa_qmi->initial_boot) {
if (!ipa_qmi->indication_sent)
return;
/* The initial modem boot completed successfully */
ipa_qmi->initial_boot = false;
}
/* We're ready. Start up normal operation */
ipa = container_of(ipa_qmi, struct ipa, qmi);
ret = ipa_modem_start(ipa);
if (ret)
dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
}
/* All QMI clients from the modem node are gone (modem shut down or crashed). */
static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
{
struct ipa_qmi *ipa_qmi;
ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
/* The modem client and server go away at the same time */
memset(&ipa_qmi->modem_sq, 0, sizeof(ipa_qmi->modem_sq));
/* initial_boot doesn't change when modem reboots */
/* uc_ready doesn't change when modem reboots */
ipa_qmi->modem_ready = false;
ipa_qmi->indication_requested = false;
ipa_qmi->indication_sent = false;
}
static const struct qmi_ops ipa_server_ops = {
.bye = ipa_server_bye,
};
/* Callback function to handle an INDICATION_REGISTER request message from the
* modem. This informs the AP that the modem is now ready to receive the
* INIT_COMPLETE indication message.
*/
static void ipa_server_indication_register(struct qmi_handle *qmi,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ipa_indication_register_rsp rsp = { };
struct ipa_qmi *ipa_qmi;
struct ipa *ipa;
int ret;
ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
ipa = container_of(ipa_qmi, struct ipa, qmi);
rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
rsp.rsp.error = QMI_ERR_NONE_V01;
ret = qmi_send_response(qmi, sq, txn, IPA_QMI_INDICATION_REGISTER,
IPA_QMI_INDICATION_REGISTER_RSP_SZ,
ipa_indication_register_rsp_ei, &rsp);
if (!ret) {
ipa_qmi->indication_requested = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
dev_err(&ipa->pdev->dev,
"error %d sending register indication response\n", ret);
}
}
/* Respond to a DRIVER_INIT_COMPLETE request message from the modem. */
static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ipa_driver_init_complete_rsp rsp = { };
struct ipa_qmi *ipa_qmi;
struct ipa *ipa;
int ret;
ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
ipa = container_of(ipa_qmi, struct ipa, qmi);
rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
rsp.rsp.error = QMI_ERR_NONE_V01;
ret = qmi_send_response(qmi, sq, txn, IPA_QMI_DRIVER_INIT_COMPLETE,
IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ,
ipa_driver_init_complete_rsp_ei, &rsp);
if (!ret) {
ipa_qmi->uc_ready = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
dev_err(&ipa->pdev->dev,
"error %d sending init complete response\n", ret);
}
}
/* The server handles two request message types sent by the modem. */
static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
{
.type = QMI_REQUEST,
.msg_id = IPA_QMI_INDICATION_REGISTER,
.ei = ipa_indication_register_req_ei,
.decoded_size = IPA_QMI_INDICATION_REGISTER_REQ_SZ,
.fn = ipa_server_indication_register,
},
{
.type = QMI_REQUEST,
.msg_id = IPA_QMI_DRIVER_INIT_COMPLETE,
.ei = ipa_driver_init_complete_req_ei,
.decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
.fn = ipa_server_driver_init_complete,
},
{ },
};
/* Handle an INIT_DRIVER response message from the modem. */
static void ipa_client_init_driver(struct qmi_handle *qmi,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn, const void *decoded)
{
txn->result = 0; /* IPA_QMI_INIT_DRIVER request was successful */
complete(&txn->completion);
}
/* The client handles one response message type sent by the modem. */
static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
{
.type = QMI_RESPONSE,
.msg_id = IPA_QMI_INIT_DRIVER,
.ei = ipa_init_modem_driver_rsp_ei,
.decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ,
.fn = ipa_client_init_driver,
},
{ },
};
/* Return a pointer to an init modem driver request structure, which contains
* configuration parameters for the modem. The modem may be started multiple
* times, but generally these parameters don't change so we can reuse the
* request structure once it's initialized. The only exception is the
* skip_uc_load field, which will be set only after the microcontroller has
* reported it has completed its initialization.
*/
static const struct ipa_init_modem_driver_req *
init_modem_driver_req(struct ipa_qmi *ipa_qmi)
{
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
u32 modem_route_count = ipa->modem_route_count;
static struct ipa_init_modem_driver_req req;
const struct ipa_mem *mem;
/* The microcontroller is initialized on the first boot */
req.skip_uc_load_valid = 1;
req.skip_uc_load = ipa->uc_loaded ? 1 : 0;
/* We only have to initialize most of it once */
if (req.platform_type_valid)
return &req;
req.platform_type_valid = 1;
req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
if (mem->size) {
req.hdr_tbl_info_valid = 1;
req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.end = modem_route_count - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.end = modem_route_count - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
req.v6_filter_tbl_start_valid = 1;
req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
if (mem->size) {
req.modem_mem_info_valid = 1;
req.modem_mem_info.start = ipa->mem_offset + mem->offset;
req.modem_mem_info.size = mem->size;
}
req.ctrl_comm_dest_end_pt_valid = 1;
req.ctrl_comm_dest_end_pt =
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->endpoint_id;
/* skip_uc_load_valid and skip_uc_load are set above */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
if (mem->size) {
req.hdr_proc_ctx_tbl_info_valid = 1;
req.hdr_proc_ctx_tbl_info.start =
ipa->mem_offset + mem->offset;
req.hdr_proc_ctx_tbl_info.end =
req.hdr_proc_ctx_tbl_info.start + mem->size - 1;
}
/* Nothing to report for the compression table (zip_tbl_info) */
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
if (mem->size) {
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
req.v4_hash_route_tbl_info.end = modem_route_count - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
if (mem->size) {
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
req.v6_hash_route_tbl_info.end = modem_route_count - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
if (mem->size) {
req.v4_hash_filter_tbl_start_valid = 1;
req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
if (mem->size) {
req.v6_hash_filter_tbl_start_valid = 1;
req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
/* The stats fields are only valid for IPA v4.0+ */
if (ipa->version >= IPA_VERSION_4_0) {
mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
ipa->mem_offset + mem->offset;
req.hw_stats_quota_size_valid = 1;
req.hw_stats_quota_size = ipa->mem_offset + mem->size;
}
/* If the DROP stats region is defined, include it */
mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
if (mem && mem->size) {
req.hw_stats_drop_base_addr_valid = 1;
req.hw_stats_drop_base_addr =
ipa->mem_offset + mem->offset;
req.hw_stats_drop_size_valid = 1;
req.hw_stats_drop_size = ipa->mem_offset + mem->size;
}
}
return &req;
}
/* Send an INIT_DRIVER request to the modem, and wait for it to complete. */
static void ipa_client_init_driver_work(struct work_struct *work)
{
unsigned long timeout = msecs_to_jiffies(QMI_INIT_DRIVER_TIMEOUT);
const struct ipa_init_modem_driver_req *req;
struct ipa_qmi *ipa_qmi;
struct qmi_handle *qmi;
struct qmi_txn txn;
struct device *dev;
struct ipa *ipa;
int ret;
ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
dev = &ipa->pdev->dev;
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
if (ret < 0) {
dev_err(dev, "error %d preparing init driver request\n", ret);
return;
}
/* Send the request, and if successful wait for its response */
req = init_modem_driver_req(ipa_qmi);
ret = qmi_send_request(qmi, &ipa_qmi->modem_sq, &txn,
IPA_QMI_INIT_DRIVER, IPA_QMI_INIT_DRIVER_REQ_SZ,
ipa_init_modem_driver_req_ei, req);
if (ret)
dev_err(dev, "error %d sending init driver request\n", ret);
else if ((ret = qmi_txn_wait(&txn, timeout)))
dev_err(dev, "error %d awaiting init driver response\n", ret);
if (!ret) {
ipa_qmi->modem_ready = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
/* If any error occurs we need to cancel the transaction */
qmi_txn_cancel(&txn);
}
}
/* The modem server is now available. We will send an INIT_DRIVER request
* to the modem, but can't wait for it to complete in this callback thread.
* Schedule a worker on the global workqueue to do that for us.
*/
static int
ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
{
struct ipa_qmi *ipa_qmi;
ipa_qmi = container_of(qmi, struct ipa_qmi, client_handle);
ipa_qmi->modem_sq.sq_family = AF_QIPCRTR;
ipa_qmi->modem_sq.sq_node = svc->node;
ipa_qmi->modem_sq.sq_port = svc->port;
schedule_work(&ipa_qmi->init_driver_work);
return 0;
}
static const struct qmi_ops ipa_client_ops = {
.new_server = ipa_client_new_server,
};
/* Set up for QMI message exchange */
int ipa_qmi_setup(struct ipa *ipa)
{
struct ipa_qmi *ipa_qmi = &ipa->qmi;
int ret;
ipa_qmi->initial_boot = true;
/* The server handle is used to handle the DRIVER_INIT_COMPLETE
* request on the first modem boot. It also receives the
* INDICATION_REGISTER request on the first boot and (optionally)
* subsequent boots. The INIT_COMPLETE indication message is
* sent over the server handle if requested.
*/
ret = qmi_handle_init(&ipa_qmi->server_handle,
IPA_QMI_SERVER_MAX_RCV_SZ, &ipa_server_ops,
ipa_server_msg_handlers);
if (ret)
return ret;
ret = qmi_add_server(&ipa_qmi->server_handle, IPA_HOST_SERVICE_SVC_ID,
IPA_HOST_SVC_VERS, IPA_HOST_SERVICE_INS_ID);
if (ret)
goto err_server_handle_release;
/* The client handle is only used for sending an INIT_DRIVER request
* to the modem, and receiving its response message.
*/
ret = qmi_handle_init(&ipa_qmi->client_handle,
IPA_QMI_CLIENT_MAX_RCV_SZ, &ipa_client_ops,
ipa_client_msg_handlers);
if (ret)
goto err_server_handle_release;
/* We need this ready before the service lookup is added */
INIT_WORK(&ipa_qmi->init_driver_work, ipa_client_init_driver_work);
ret = qmi_add_lookup(&ipa_qmi->client_handle, IPA_MODEM_SERVICE_SVC_ID,
IPA_MODEM_SVC_VERS, IPA_MODEM_SERVICE_INS_ID);
if (ret)
goto err_client_handle_release;
return 0;
err_client_handle_release:
/* Releasing the handle also removes registered lookups */
qmi_handle_release(&ipa_qmi->client_handle);
memset(&ipa_qmi->client_handle, 0, sizeof(ipa_qmi->client_handle));
err_server_handle_release:
/* Releasing the handle also removes registered services */
qmi_handle_release(&ipa_qmi->server_handle);
memset(&ipa_qmi->server_handle, 0, sizeof(ipa_qmi->server_handle));
return ret;
}
/* Tear down IPA QMI handles */
void ipa_qmi_teardown(struct ipa *ipa)
{
cancel_work_sync(&ipa->qmi.init_driver_work);
qmi_handle_release(&ipa->qmi.client_handle);
memset(&ipa->qmi.client_handle, 0, sizeof(ipa->qmi.client_handle));
qmi_handle_release(&ipa->qmi.server_handle);
memset(&ipa->qmi.server_handle, 0, sizeof(ipa->qmi.server_handle));
}
| linux-master | drivers/net/ipa/ipa_qmi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/platform_device.h>
#include <linux/io.h>
#include "gsi.h"
#include "reg.h"
#include "gsi_reg.h"
/* Is this register ID valid for the current GSI version? */
static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
{
switch (reg_id) {
case INTER_EE_SRC_CH_IRQ_MSK:
case INTER_EE_SRC_EV_CH_IRQ_MSK:
return gsi->version >= IPA_VERSION_3_5;
case HW_PARAM_2:
return gsi->version >= IPA_VERSION_3_5_1;
case HW_PARAM_4:
return gsi->version >= IPA_VERSION_5_0;
case CH_C_CNTXT_0:
case CH_C_CNTXT_1:
case CH_C_CNTXT_2:
case CH_C_CNTXT_3:
case CH_C_QOS:
case CH_C_SCRATCH_0:
case CH_C_SCRATCH_1:
case CH_C_SCRATCH_2:
case CH_C_SCRATCH_3:
case EV_CH_E_CNTXT_0:
case EV_CH_E_CNTXT_1:
case EV_CH_E_CNTXT_2:
case EV_CH_E_CNTXT_3:
case EV_CH_E_CNTXT_4:
case EV_CH_E_CNTXT_8:
case EV_CH_E_CNTXT_9:
case EV_CH_E_CNTXT_10:
case EV_CH_E_CNTXT_11:
case EV_CH_E_CNTXT_12:
case EV_CH_E_CNTXT_13:
case EV_CH_E_SCRATCH_0:
case EV_CH_E_SCRATCH_1:
case CH_C_DOORBELL_0:
case EV_CH_E_DOORBELL_0:
case GSI_STATUS:
case CH_CMD:
case EV_CH_CMD:
case GENERIC_CMD:
case CNTXT_TYPE_IRQ:
case CNTXT_TYPE_IRQ_MSK:
case CNTXT_SRC_CH_IRQ:
case CNTXT_SRC_CH_IRQ_MSK:
case CNTXT_SRC_CH_IRQ_CLR:
case CNTXT_SRC_EV_CH_IRQ:
case CNTXT_SRC_EV_CH_IRQ_MSK:
case CNTXT_SRC_EV_CH_IRQ_CLR:
case CNTXT_SRC_IEOB_IRQ:
case CNTXT_SRC_IEOB_IRQ_MSK:
case CNTXT_SRC_IEOB_IRQ_CLR:
case CNTXT_GLOB_IRQ_STTS:
case CNTXT_GLOB_IRQ_EN:
case CNTXT_GLOB_IRQ_CLR:
case CNTXT_GSI_IRQ_STTS:
case CNTXT_GSI_IRQ_EN:
case CNTXT_GSI_IRQ_CLR:
case CNTXT_INTSET:
case ERROR_LOG:
case ERROR_LOG_CLR:
case CNTXT_SCRATCH_0:
return true;
default:
return false;
}
}
const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id)
{
if (WARN(!gsi_reg_id_valid(gsi, reg_id), "invalid reg %u\n", reg_id))
return NULL;
return reg(gsi->regs, reg_id);
}
static const struct regs *gsi_regs(struct gsi *gsi)
{
switch (gsi->version) {
case IPA_VERSION_3_1:
return &gsi_regs_v3_1;
case IPA_VERSION_3_5_1:
return &gsi_regs_v3_5_1;
case IPA_VERSION_4_2:
return &gsi_regs_v4_0;
case IPA_VERSION_4_5:
case IPA_VERSION_4_7:
return &gsi_regs_v4_5;
case IPA_VERSION_4_9:
return &gsi_regs_v4_9;
case IPA_VERSION_4_11:
return &gsi_regs_v4_11;
case IPA_VERSION_5_0:
return &gsi_regs_v5_0;
default:
return NULL;
}
}
/* Sets gsi->virt and I/O maps the "gsi" memory range for registers */
int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
return -ENODEV;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
return -EINVAL;
}
gsi->regs = gsi_regs(gsi);
if (!gsi->regs) {
dev_err(dev, "unsupported IPA version %u (?)\n", gsi->version);
return -EINVAL;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
return -ENOMEM;
}
return 0;
}
/* Inverse of gsi_reg_init() */
void gsi_reg_exit(struct gsi *gsi)
{
iounmap(gsi->virt);
gsi->virt = NULL;
gsi->regs = NULL;
}
| linux-master | drivers/net/ipa/gsi_reg.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
#include <linux/dma-direction.h>
#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
#include "ipa_endpoint.h"
#include "ipa_table.h"
#include "ipa_cmd.h"
#include "ipa_mem.h"
/**
* DOC: IPA Immediate Commands
*
* The AP command TX endpoint is used to issue immediate commands to the IPA.
* An immediate command is generally used to request the IPA do something
* other than data transfer to another endpoint.
*
* Immediate commands are represented by GSI transactions just like other
* transfer requests, and use a single GSI TRE. Each immediate command
* has a well-defined format, having a payload of a known length. This
* allows the transfer element's length field to be used to hold an
* immediate command's opcode. The payload for a command resides in AP
* memory and is described by a single scatterlist entry in its transaction.
* Commands do not require a transaction completion callback, and are
* always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
enum pipeline_clear_options {
pipeline_clear_hps = 0x0,
pipeline_clear_src_grp = 0x1,
pipeline_clear_full = 0x2,
};
/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
struct ipa_cmd_hw_ip_fltrt_init {
__le64 hash_rules_addr;
__le64 flags;
__le64 nhash_rules_addr;
};
/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
/* IPA_CMD_HDR_INIT_LOCAL */
struct ipa_cmd_hw_hdr_init_local {
__le64 hdr_table_addr;
__le32 flags;
__le32 reserved;
};
/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
/* IPA_CMD_REGISTER_WRITE */
/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
struct ipa_cmd_register_write {
__le16 flags; /* Unused/reserved prior to IPA v4.0 */
__le16 offset;
__le32 value;
__le32 value_mask;
__le32 clear_options; /* Unused/reserved for IPA v4.0+ */
};
/* Field masks for ipa_cmd_register_write structure fields */
/* The next field is present for IPA v4.0+ */
#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
/* The next field is not present for IPA v4.0+ */
#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
/* The next field and its values are not present for IPA v4.0+ */
#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
/* IPA_CMD_IP_PACKET_INIT */
struct ipa_cmd_ip_packet_init {
u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
u8 reserved[7];
};
/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
/* IPA_CMD_DMA_SHARED_MEM */
/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
struct ipa_cmd_hw_dma_mem_mem {
__le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
__le16 size;
__le16 local_addr;
__le16 flags;
__le64 system_addr;
};
/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
#define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
/* The next two fields are not present for IPA v4.0+ */
#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
/* IPA_CMD_IP_PACKET_TAG_STATUS */
struct ipa_cmd_ip_packet_tag_status {
__le64 tag;
};
#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
/* Immediate command payload */
union ipa_cmd_payload {
struct ipa_cmd_hw_ip_fltrt_init table_init;
struct ipa_cmd_hw_hdr_init_local hdr_init_local;
struct ipa_cmd_register_write register_write;
struct ipa_cmd_ip_packet_init ip_packet_init;
struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
};
static void ipa_cmd_validate_build(void)
{
/* The size of a filter table needs to fit into fields in the
* ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
* might not be used, non-hashed and hashed tables have the same
* maximum size. IPv4 and IPv6 filter tables have the same number
* of entries.
*/
/* Hashed and non-hashed fields are assumed to be the same size */
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
/* Prior to IPA v5.0, we supported no more than 32 endpoints,
* and this was reflected in some 5-bit fields that held
* endpoint numbers. Starting with IPA v5.0, the widths of
* these fields were extended to 8 bits, meaning up to 256
* endpoints. If the driver claims to support more than
* that it's an error.
*/
BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
}
/* Validate a memory region holding a table */
bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route)
{
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
struct device *dev = &ipa->pdev->dev;
u32 size;
size = route ? ipa->route_count : ipa->filter_count + 1;
size *= sizeof(__le64);
/* Size must fit in the immediate command field that holds it */
if (size > size_max) {
dev_err(dev, "%s table region size too large\n", table);
dev_err(dev, " (0x%04x > 0x%04x)\n", size, size_max);
return false;
}
/* Offset must fit in the immediate command field that holds it */
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
dev_err(dev, "%s table region offset too large\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
return false;
}
return true;
}
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
u32 offset;
u32 size;
/* In ipa_cmd_hdr_init_local_add() we record the offset and size of
* the header table memory area in an immediate command. Make sure
* the offset and size fit in the fields that need to hold them, and
* that the entire range is within the overall IPA memory range.
*/
offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
/* The header memory area contains both the modem and AP header
* regions. The modem portion defines the address of the region.
*/
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
offset = mem->offset;
size = mem->size;
/* Make sure the offset fits in the IPA command */
if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "header table region offset too large\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, offset, offset_max);
return false;
}
/* Add the size of the AP portion (if defined) to the combined size */
mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
if (mem)
size += mem->size;
/* Make sure the combined size fits in the IPA command */
if (size > size_max) {
dev_err(dev, "header table region size too large\n");
dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
return false;
}
return true;
}
/* Indicate whether an offset can be used with a register_write command */
static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
const char *name, u32 offset)
{
struct ipa_cmd_register_write *payload;
struct device *dev = &ipa->pdev->dev;
u32 offset_max;
u32 bit_count;
/* The maximum offset in a register_write immediate command depends
* on the version of IPA. A 16 bit offset is always supported,
* but starting with IPA v4.0 some additional high-order bits are
* allowed.
*/
bit_count = BITS_PER_BYTE * sizeof(payload->offset);
if (ipa->version >= IPA_VERSION_4_0)
bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
BUILD_BUG_ON(bit_count > 32);
offset_max = ~0U >> (32 - bit_count);
/* Make sure the offset can be represented by the field(s)
* that holds it. Also make sure the offset is not outside
* the overall IPA memory range.
*/
if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
name, ipa->mem_offset, offset, offset_max);
return false;
}
return true;
}
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
const struct reg *reg;
const char *name;
u32 offset;
/* If hashed tables are supported, ensure the hash flush register
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
if (ipa->version < IPA_VERSION_5_0)
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
else
reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
offset = reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
}
/* Each endpoint can have a status endpoint associated with it,
* and this is recorded in an endpoint register. If the modem
* crashes, we reset the status endpoint for all modem endpoints
* using a register write IPA immediate command. Make sure the
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
reg = ipa_reg(ipa, ENDP_STATUS);
offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
return true;
}
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
/* Command payloads are allocated one at a time, but a single
* transaction can require up to the maximum supported by the
* channel; treat them as if they were allocated all at once.
*/
return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
sizeof(union ipa_cmd_payload),
tre_max, channel->trans_tre_max);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
}
static union ipa_cmd_payload *
ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
{
struct gsi_trans_info *trans_info;
struct ipa_endpoint *endpoint;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
}
/* If hash_size is 0, hash_offset and hash_addr ignored. */
void ipa_cmd_table_init_add(struct gsi_trans *trans,
enum ipa_cmd_opcode opcode, u16 size, u32 offset,
dma_addr_t addr, u16 hash_size, u32 hash_offset,
dma_addr_t hash_addr)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_cmd_hw_ip_fltrt_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
u64 val;
/* Record the non-hash table offset and size */
offset += ipa->mem_offset;
val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
/* The hash table offset and address are zero if its size is 0 */
if (hash_size) {
/* Record the hash table offset and size */
hash_offset += ipa->mem_offset;
val |= u64_encode_bits(hash_offset,
IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
val |= u64_encode_bits(hash_size,
IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
}
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->table_init;
/* Fill in all offsets and sizes and the non-hash table address */
if (hash_size)
payload->hash_rules_addr = cpu_to_le64(hash_addr);
payload->flags = cpu_to_le64(val);
payload->nhash_rules_addr = cpu_to_le64(addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
/* Initialize header space in IPA-local memory */
void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
dma_addr_t addr)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
struct ipa_cmd_hw_hdr_init_local *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
u32 flags;
offset += ipa->mem_offset;
/* With this command we tell the IPA where in its local memory the
* header tables reside. The content of the buffer provided is
* also written via DMA into that space. The IPA hardware owns
* the table, but the AP must initialize it.
*/
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->hdr_init_local;
payload->hdr_table_addr = cpu_to_le64(addr);
flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
payload->flags = cpu_to_le32(flags);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
u32 mask, bool clear_full)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_cmd_register_write *payload;
union ipa_cmd_payload *cmd_payload;
u32 opcode = IPA_CMD_REGISTER_WRITE;
dma_addr_t payload_addr;
u32 clear_option;
u32 options;
u16 flags;
/* pipeline_clear_src_grp is not used */
clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
/* IPA v4.0+ represents the pipeline clear options in the opcode. It
* also supports a larger offset by encoding additional high-order
* bits in the payload flags field.
*/
if (ipa->version >= IPA_VERSION_4_0) {
u16 offset_high;
u32 val;
/* Opcode encodes pipeline clear options */
/* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
val = u16_encode_bits(clear_option,
REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
opcode |= val;
/* Extract the high 4 bits from the offset */
offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
offset &= (1 << 16) - 1;
/* Extract the top 4 bits and encode it into the flags field */
flags = u16_encode_bits(offset_high,
REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
options = 0; /* reserved */
} else {
flags = 0; /* SKIP_CLEAR flag is always 0 */
options = u16_encode_bits(clear_option,
REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
}
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->register_write;
payload->flags = cpu_to_le16(flags);
payload->offset = cpu_to_le16((u16)offset);
payload->value = cpu_to_le32(value);
payload->value_mask = cpu_to_le32(mask);
payload->clear_options = cpu_to_le32(options);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
/* Skip IP packet processing on the next data transfer on a TX channel */
static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
struct ipa_cmd_ip_packet_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
if (ipa->version < IPA_VERSION_5_0) {
payload->dest_endpoint =
u8_encode_bits(endpoint_id,
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
} else {
payload->dest_endpoint = endpoint_id;
}
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
/* Use a DMA command to read or write a block of IPA-resident memory */
void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
dma_addr_t addr, bool toward_ipa)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
struct ipa_cmd_hw_dma_mem_mem *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
u16 flags;
/* size and offset must fit in 16 bit fields */
WARN_ON(!size);
WARN_ON(size > U16_MAX);
WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
offset += ipa->mem_offset;
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->dma_shared_mem;
/* payload->clear_after_read was reserved prior to IPA v4.0. It's
* never needed for current code, so it's 0 regardless of version.
*/
payload->size = cpu_to_le16(size);
payload->local_addr = cpu_to_le16(offset);
/* payload->flags:
* direction: 0 = write to IPA, 1 read from IPA
* Starting at v4.0 these are reserved; either way, all zero:
* pipeline clear: 0 = wait for pipeline clear (don't skip)
* clear_options: 0 = pipeline_clear_hps
* Instead, for v4.0+ these are encoded in the opcode. But again
* since both values are 0 we won't bother OR'ing them in.
*/
flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
payload->flags = cpu_to_le16(flags);
payload->system_addr = cpu_to_le64(addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
struct ipa_cmd_ip_packet_tag_status *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
/* Issue a small command TX data transfer */
static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
/* Just transfer a zero-filled payload structure */
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
}
/* Add immediate commands to a transaction to clear the hardware pipeline */
void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
/* This will complete when the transfer is received */
reinit_completion(&ipa->completion);
/* Issue a no-op register write command (mask 0 means no write) */
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
/* Send a data packet through the IPA pipeline. The packet_init
* command says to send the next packet directly to the exception
* endpoint without any other IPA processing. The tag_status
* command requests that status be generated on completion of
* that transfer, and that it will be tagged with a value.
* Finally, the transfer command sends a small packet of data
* (instead of a command) using the command endpoint.
*/
endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
ipa_cmd_ip_tag_status_add(trans);
ipa_cmd_transfer_add(trans);
}
/* Returns the number of commands required to clear the pipeline */
u32 ipa_cmd_pipeline_clear_count(void)
{
return 4;
}
void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
{
wait_for_completion(&ipa->completion);
}
/* Allocate a transaction for the command TX endpoint */
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
{
struct ipa_endpoint *endpoint;
if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
return NULL;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
tre_count, DMA_NONE);
}
/* Init function for immediate commands; there is no ipa_cmd_exit() */
int ipa_cmd_init(struct ipa *ipa)
{
ipa_cmd_validate_build();
if (!ipa_cmd_header_init_local_valid(ipa))
return -EINVAL;
if (!ipa_cmd_register_write_valid(ipa))
return -EINVAL;
return 0;
}
| linux-master | drivers/net/ipa/ipa_cmd.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021-2022 Linaro Ltd. */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include "ipa.h"
#include "ipa_version.h"
#include "ipa_sysfs.h"
static const char *ipa_version_string(struct ipa *ipa)
{
switch (ipa->version) {
case IPA_VERSION_3_0:
return "3.0";
case IPA_VERSION_3_1:
return "3.1";
case IPA_VERSION_3_5:
return "3.5";
case IPA_VERSION_3_5_1:
return "3.5.1";
case IPA_VERSION_4_0:
return "4.0";
case IPA_VERSION_4_1:
return "4.1";
case IPA_VERSION_4_2:
return "4.2";
case IPA_VERSION_4_5:
return "4.5";
case IPA_VERSION_4_7:
return "4.7";
case IPA_VERSION_4_9:
return "4.9";
case IPA_VERSION_4_11:
return "4.11";
case IPA_VERSION_5_0:
return "5.0";
default:
return "0.0"; /* Won't happen (checked at probe time) */
}
}
static ssize_t
version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ipa_version_string(ipa));
}
static DEVICE_ATTR_RO(version);
static struct attribute *ipa_attrs[] = {
&dev_attr_version.attr,
NULL
};
const struct attribute_group ipa_attribute_group = {
.attrs = ipa_attrs,
};
static const char *ipa_offload_string(struct ipa *ipa)
{
return ipa->version < IPA_VERSION_4_5 ? "MAPv4" : "MAPv5";
}
static ssize_t rx_offload_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
}
static DEVICE_ATTR_RO(rx_offload);
static ssize_t tx_offload_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
}
static DEVICE_ATTR_RO(tx_offload);
static struct attribute *ipa_feature_attrs[] = {
&dev_attr_rx_offload.attr,
&dev_attr_tx_offload.attr,
NULL
};
const struct attribute_group ipa_feature_attribute_group = {
.name = "feature",
.attrs = ipa_feature_attrs,
};
static umode_t ipa_endpoint_id_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct ipa *ipa = dev_get_drvdata(kobj_to_dev(kobj));
struct device_attribute *dev_attr;
struct dev_ext_attribute *ea;
bool visible;
/* An endpoint id attribute is only visible if it's defined */
dev_attr = container_of(attr, struct device_attribute, attr);
ea = container_of(dev_attr, struct dev_ext_attribute, attr);
visible = !!ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
return visible ? attr->mode : 0;
}
static ssize_t endpoint_id_attr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
struct ipa_endpoint *endpoint;
struct dev_ext_attribute *ea;
ea = container_of(attr, struct dev_ext_attribute, attr);
endpoint = ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
return sysfs_emit(buf, "%u\n", endpoint->endpoint_id);
}
#define ENDPOINT_ID_ATTR(_n, _endpoint_name) \
static struct dev_ext_attribute dev_attr_endpoint_id_ ## _n = { \
.attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
.var = (void *)(_endpoint_name), \
}
ENDPOINT_ID_ATTR(modem_rx, IPA_ENDPOINT_AP_MODEM_RX);
ENDPOINT_ID_ATTR(modem_tx, IPA_ENDPOINT_AP_MODEM_TX);
static struct attribute *ipa_endpoint_id_attrs[] = {
&dev_attr_endpoint_id_modem_rx.attr.attr,
&dev_attr_endpoint_id_modem_tx.attr.attr,
NULL
};
const struct attribute_group ipa_endpoint_id_attribute_group = {
.name = "endpoint_id",
.is_visible = ipa_endpoint_id_is_visible,
.attrs = ipa_endpoint_id_attrs,
};
/* Reuse endpoint ID attributes for the legacy modem endpoint IDs */
#define MODEM_ATTR(_n, _endpoint_name) \
static struct dev_ext_attribute dev_attr_modem_ ## _n = { \
.attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
.var = (void *)(_endpoint_name), \
}
MODEM_ATTR(rx_endpoint_id, IPA_ENDPOINT_AP_MODEM_RX);
MODEM_ATTR(tx_endpoint_id, IPA_ENDPOINT_AP_MODEM_TX);
static struct attribute *ipa_modem_attrs[] = {
&dev_attr_modem_rx_endpoint_id.attr.attr,
&dev_attr_modem_tx_endpoint_id.attr.attr,
NULL,
};
const struct attribute_group ipa_modem_attribute_group = {
.name = "modem",
.attrs = ipa_modem_attrs,
};
| linux-master | drivers/net/ipa/ipa_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include "ipa.h"
#include "ipa_version.h"
#include "ipa_endpoint.h"
#include "ipa_table.h"
#include "ipa_reg.h"
#include "ipa_mem.h"
#include "ipa_cmd.h"
#include "gsi.h"
#include "gsi_trans.h"
/**
* DOC: IPA Filter and Route Tables
*
* The IPA has tables defined in its local (IPA-resident) memory that define
* filter and routing rules. An entry in either of these tables is a little
* endian 64-bit "slot" that holds the address of a rule definition. (The
* size of these slots is 64 bits regardless of the host DMA address size.)
*
* Separate tables (both filter and route) are used for IPv4 and IPv6. There
* is normally another set of "hashed" filter and route tables, which are
* used with a hash of message metadata. Hashed operation is not supported
* by all IPA hardware (IPA v4.2 doesn't support hashed tables).
*
* Rules can be in local memory or in DRAM (system memory). The offset of
* an object (such as a route or filter table) in IPA-resident memory must
* 128-byte aligned. An object in system memory (such as a route or filter
* rule) must be at an 8-byte aligned address. We currently only place
* route or filter rules in system memory.
*
* A rule consists of a contiguous block of 32-bit values terminated with
* 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
* represents "no filtering" or "no routing," and is the reset value for
* filter or route table rules.
*
* Each filter rule is associated with an AP or modem TX endpoint, though
* not all TX endpoints support filtering. The first 64-bit slot in a
* filter table is a bitmap indicating which endpoints have entries in
* the table. Each set bit in this bitmap indicates the presence of the
* address of a filter rule in the memory following the bitmap. Until IPA
* v5.0, the low-order bit (bit 0) in this bitmap represents a special
* global filter, which applies to all traffic. Otherwise the position of
* each set bit represents an endpoint for which a filter rule is defined.
*
* The global rule is not used in current code, and support for it is
* removed starting at IPA v5.0. For IPA v5.0+, the endpoint bitmap
* position defines the endpoint ID--i.e. if bit 1 is set in the endpoint
* bitmap, endpoint 1 has a filter rule. Older versions of IPA represent
* the presence of a filter rule for endpoint X by bit (X + 1) being set.
* I.e., bit 1 set indicates the presence of a filter rule for endpoint 0,
* and bit 3 set means there is a filter rule present for endpoint 2.
*
* Each filter table entry has the address of a set of equations that
* implement a filter rule. So following the endpoint bitmap there
* will be such an address/entry for each endpoint with a set bit in
* the bitmap.
*
* The AP initializes all entries in a filter table to refer to a "zero"
* rule. Once initialized, the modem and AP update the entries for
* endpoints they "own" directly. Currently the AP does not use the IPA
* filtering functionality.
*
* This diagram shows an example of a filter table with an endpoint
* bitmap as defined prior to IPA v5.0.
*
* IPA Filter Table
* ----------------------
* endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5)
* |--------------------|
* 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
* |--------------------|
* 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
* |--------------------|
* (unused) | | (Unused space in filter table)
* |--------------------|
* . . .
* |--------------------|
* (unused) | | (Unused space in filter table)
* ----------------------
*
* The set of available route rules is divided about equally between the AP
* and modem. The AP initializes all entries in a route table to refer to
* a "zero entry". Once initialized, the modem and AP are responsible for
* updating their own entries. All entries in a route table are usable,
* though the AP currently does not use the IPA routing functionality.
*
* IPA Route Table
* ----------------------
* 1st modem route | 0x0001234500001100 | DMA address for first route rule
* |--------------------|
* 2nd modem route | 0x0001234500001140 | DMA address for second route rule
* |--------------------|
* . . .
* |--------------------|
* Last modem route| 0x0001234500002280 | DMA address for Nth route rule
* |--------------------|
* 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
* |--------------------|
* 2nd AP route | 0x0001234500001140 | DMA address for next route rule
* |--------------------|
* . . .
* |--------------------|
* Last AP route | 0x0001234500002280 | DMA address for last route rule
* ----------------------
*/
/* Filter or route rules consist of a set of 32-bit values followed by a
* 32-bit all-zero rule list terminator. The "zero rule" is simply an
* all-zero rule followed by the list terminator.
*/
#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
/* Check things that can be validated at build time. */
static void ipa_table_validate_build(void)
{
/* Filter and route tables contain DMA addresses that refer
* to filter or route rules. But the size of a table entry
* is 64 bits regardless of what the size of an AP DMA address
* is. A fixed constant defines the size of an entry, and
* code in ipa_table_init() uses a pointer to __le64 to
* initialize tables.
*/
BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
/* A "zero rule" is used to represent no filtering or no routing.
* It is a 64-bit block of zeroed memory. Code in ipa_table_init()
* assumes that it can be written using a pointer to __le64.
*/
BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
}
static const struct ipa_mem *
ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
{
enum ipa_mem_id mem_id;
mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED
: IPA_MEM_V4_FILTER_HASHED
: ipv6 ? IPA_MEM_V6_FILTER
: IPA_MEM_V4_FILTER
: hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED
: IPA_MEM_V4_ROUTE_HASHED
: ipv6 ? IPA_MEM_V6_ROUTE
: IPA_MEM_V4_ROUTE;
return ipa_mem_find(ipa, mem_id);
}
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
struct device *dev = &ipa->pdev->dev;
u32 count;
if (!filtered) {
dev_err(dev, "at least one filtering endpoint is required\n");
return false;
}
count = hweight64(filtered);
if (count > ipa->filter_count) {
dev_err(dev, "too many filtering endpoints (%u > %u)\n",
count, ipa->filter_count);
return false;
}
return true;
}
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
u32 skip;
if (!count)
return 0;
WARN_ON(count > max_t(u32, ipa->filter_count, ipa->route_count));
/* Skip over the zero rule and possibly the filter mask */
skip = filter_mask ? 1 : 2;
return ipa->table_addr + skip * sizeof(*ipa->table_virt);
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
bool hashed, bool ipv6, u16 first, u16 count)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem;
dma_addr_t addr;
u32 offset;
u16 size;
/* Nothing to do if the memory region is doesn't exist or is empty */
mem = ipa_table_mem(ipa, filter, hashed, ipv6);
if (!mem || !mem->size)
return;
if (filter)
first++; /* skip over bitmap */
offset = mem->offset + first * sizeof(__le64);
size = count * sizeof(__le64);
addr = ipa_table_addr(ipa, false, count);
ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
}
/* Reset entries in a single filter table belonging to either the AP or
* modem to refer to the zero entry. The memory region supplied will be
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
{
u64 ep_mask = ipa->filtered;
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for %s filter reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
while (ep_mask) {
u32 endpoint_id = __ffs(ep_mask);
struct ipa_endpoint *endpoint;
ep_mask ^= BIT(endpoint_id);
endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->ee_id != ee_id)
continue;
ipa_table_reset_add(trans, true, hashed, ipv6, endpoint_id, 1);
}
gsi_trans_commit_wait(trans);
return 0;
}
/* Theoretically, each filter table could have more filter slots to
* update than the maximum number of commands in a transaction. So
* we do each table separately.
*/
static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
ret = ipa_filter_reset_table(ipa, false, false, modem);
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, false, true, modem);
if (ret || !ipa_table_hash_support(ipa))
return ret;
ret = ipa_filter_reset_table(ipa, true, false, modem);
if (ret)
return ret;
return ipa_filter_reset_table(ipa, true, true, modem);
}
/* The AP routes and modem routes are each contiguous within the
* table. We can update each table with a single command, and we
* won't exceed the per-transaction command limit.
* */
static int ipa_route_reset(struct ipa *ipa, bool modem)
{
bool hash_support = ipa_table_hash_support(ipa);
u32 modem_route_count = ipa->modem_route_count;
struct gsi_trans *trans;
u16 first;
u16 count;
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for %s route reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
if (modem) {
first = 0;
count = modem_route_count;
} else {
first = modem_route_count;
count = ipa->route_count - modem_route_count;
}
ipa_table_reset_add(trans, false, false, false, first, count);
ipa_table_reset_add(trans, false, false, true, first, count);
if (hash_support) {
ipa_table_reset_add(trans, false, true, false, first, count);
ipa_table_reset_add(trans, false, true, true, first, count);
}
gsi_trans_commit_wait(trans);
return 0;
}
void ipa_table_reset(struct ipa *ipa, bool modem)
{
struct device *dev = &ipa->pdev->dev;
const char *ee_name;
int ret;
ee_name = modem ? "modem" : "AP";
/* Report errors, but reset filter and route tables */
ret = ipa_filter_reset(ipa, modem);
if (ret)
dev_err(dev, "error %d resetting filter table for %s\n",
ret, ee_name);
ret = ipa_route_reset(ipa, modem);
if (ret)
dev_err(dev, "error %d resetting route table for %s\n",
ret, ee_name);
}
int ipa_table_hash_flush(struct ipa *ipa)
{
struct gsi_trans *trans;
const struct reg *reg;
u32 val;
if (!ipa_table_hash_support(ipa))
return 0;
trans = ipa_cmd_trans_alloc(ipa, 1);
if (!trans) {
dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
return -EBUSY;
}
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
val = reg_bit(reg, IPV6_ROUTER_HASH);
val |= reg_bit(reg, IPV6_FILTER_HASH);
val |= reg_bit(reg, IPV4_ROUTER_HASH);
val |= reg_bit(reg, IPV4_FILTER_HASH);
} else {
reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
/* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
val = reg_bit(reg, ROUTER_CACHE);
val |= reg_bit(reg, FILTER_CACHE);
}
ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false);
gsi_trans_commit_wait(trans);
return 0;
}
static void ipa_table_init_add(struct gsi_trans *trans, bool filter, bool ipv6)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *hash_mem;
enum ipa_cmd_opcode opcode;
const struct ipa_mem *mem;
dma_addr_t hash_addr;
dma_addr_t addr;
u32 hash_offset;
u32 zero_offset;
u16 hash_count;
u32 zero_size;
u16 hash_size;
u16 count;
u16 size;
opcode = filter ? ipv6 ? IPA_CMD_IP_V6_FILTER_INIT
: IPA_CMD_IP_V4_FILTER_INIT
: ipv6 ? IPA_CMD_IP_V6_ROUTING_INIT
: IPA_CMD_IP_V4_ROUTING_INIT;
/* The non-hashed region will exist (see ipa_table_mem_valid()) */
mem = ipa_table_mem(ipa, filter, false, ipv6);
hash_mem = ipa_table_mem(ipa, filter, true, ipv6);
hash_offset = hash_mem ? hash_mem->offset : 0;
/* Compute the number of table entries to initialize */
if (filter) {
/* The number of filtering endpoints determines number of
* entries in the filter table; we also add one more "slot"
* to hold the bitmap itself. The size of the hashed filter
* table is either the same as the non-hashed one, or zero.
*/
count = 1 + hweight64(ipa->filtered);
hash_count = hash_mem && hash_mem->size ? count : 0;
} else {
/* The size of a route table region determines the number
* of entries it has.
*/
count = mem->size / sizeof(__le64);
hash_count = hash_mem ? hash_mem->size / sizeof(__le64) : 0;
}
size = count * sizeof(__le64);
hash_size = hash_count * sizeof(__le64);
addr = ipa_table_addr(ipa, filter, count);
hash_addr = ipa_table_addr(ipa, filter, hash_count);
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
hash_size, hash_offset, hash_addr);
if (!filter)
return;
/* Zero the unused space in the filter table */
zero_offset = mem->offset + size;
zero_size = mem->size - size;
ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
ipa->zero_addr, true);
if (!hash_size)
return;
/* Zero the unused space in the hashed filter table */
zero_offset = hash_offset + hash_size;
zero_size = hash_mem->size - hash_size;
ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
ipa->zero_addr, true);
}
int ipa_table_setup(struct ipa *ipa)
{
struct gsi_trans *trans;
/* We will need at most 8 TREs:
* - IPv4:
* - One for route table initialization (non-hashed and hashed)
* - One for filter table initialization (non-hashed and hashed)
* - One to zero unused entries in the non-hashed filter table
* - One to zero unused entries in the hashed filter table
* - IPv6:
* - One for route table initialization (non-hashed and hashed)
* - One for filter table initialization (non-hashed and hashed)
* - One to zero unused entries in the non-hashed filter table
* - One to zero unused entries in the hashed filter table
* All platforms support at least 8 TREs in a transaction.
*/
trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
return -EBUSY;
}
ipa_table_init_add(trans, false, false);
ipa_table_init_add(trans, false, true);
ipa_table_init_add(trans, true, false);
ipa_table_init_add(trans, true, true);
gsi_trans_commit_wait(trans);
return 0;
}
/**
* ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
* @endpoint: Endpoint whose filter hash tuple should be zeroed
*
* Endpoint must be for the AP (not modem) and support filtering. Updates
* the filter hash values without changing route ones.
*/
static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
const struct reg *reg;
u32 offset;
u32 val;
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
offset = reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
val &= ~reg_fmask(reg, FILTER_HASH_MSK_ALL);
} else {
/* IPA v5.0 separates filter and router cache configuration */
reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
offset = reg_n_offset(reg, endpoint_id);
/* Zero all filter-related fields */
val = 0;
}
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
/* Configure a hashed filter table; there is no ipa_filter_deconfig() */
static void ipa_filter_config(struct ipa *ipa, bool modem)
{
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
u64 ep_mask = ipa->filtered;
if (!ipa_table_hash_support(ipa))
return;
while (ep_mask) {
u32 endpoint_id = __ffs(ep_mask);
struct ipa_endpoint *endpoint;
ep_mask ^= BIT(endpoint_id);
endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->ee_id == ee_id)
ipa_filter_tuple_zero(endpoint);
}
}
static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
{
return route_id < ipa->modem_route_count;
}
/**
* ipa_route_tuple_zero() - Zero a hashed route table entry tuple
* @ipa: IPA pointer
* @route_id: Route table entry whose hash tuple should be zeroed
*
* Updates the route hash values without changing filter ones.
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
const struct reg *reg;
u32 offset;
u32 val;
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
offset = reg_n_offset(reg, route_id);
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
val &= ~reg_fmask(reg, ROUTER_HASH_MSK_ALL);
} else {
/* IPA v5.0 separates filter and router cache configuration */
reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
offset = reg_n_offset(reg, route_id);
/* Zero all route-related fields */
val = 0;
}
iowrite32(val, ipa->reg_virt + offset);
}
/* Configure a hashed route table; there is no ipa_route_deconfig() */
static void ipa_route_config(struct ipa *ipa, bool modem)
{
u32 route_id;
if (!ipa_table_hash_support(ipa))
return;
for (route_id = 0; route_id < ipa->route_count; route_id++)
if (ipa_route_id_modem(ipa, route_id) == modem)
ipa_route_tuple_zero(ipa, route_id);
}
/* Configure a filter and route tables; there is no ipa_table_deconfig() */
void ipa_table_config(struct ipa *ipa)
{
ipa_filter_config(ipa, false);
ipa_filter_config(ipa, true);
ipa_route_config(ipa, false);
ipa_route_config(ipa, true);
}
/* Verify the sizes of all IPA table filter or routing table memory regions
* are valid. If valid, this records the size of the routing table.
*/
bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
{
bool hash_support = ipa_table_hash_support(ipa);
const struct ipa_mem *mem_hashed;
const struct ipa_mem *mem_ipv4;
const struct ipa_mem *mem_ipv6;
u32 count;
/* IPv4 and IPv6 non-hashed tables are expected to be defined and
* have the same size. Both must have at least two entries (and
* would normally have more than that).
*/
mem_ipv4 = ipa_table_mem(ipa, filter, false, false);
if (!mem_ipv4)
return false;
mem_ipv6 = ipa_table_mem(ipa, filter, false, true);
if (!mem_ipv6)
return false;
if (mem_ipv4->size != mem_ipv6->size)
return false;
/* Compute and record the number of entries for each table type */
count = mem_ipv4->size / sizeof(__le64);
if (count < 2)
return false;
if (filter)
ipa->filter_count = count - 1; /* Filter map in first entry */
else
ipa->route_count = count;
/* Table offset and size must fit in TABLE_INIT command fields */
if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter))
return false;
/* Make sure the regions are big enough */
if (filter) {
/* Filter tables must able to hold the endpoint bitmap plus
* an entry for each endpoint that supports filtering
*/
if (count < 1 + hweight64(ipa->filtered))
return false;
} else {
/* Routing tables must be able to hold all modem entries,
* plus at least one entry for the AP.
*/
if (count < ipa->modem_route_count + 1)
return false;
}
/* If hashing is supported, hashed tables are expected to be defined,
* and have the same size as non-hashed tables. If hashing is not
* supported, hashed tables are expected to have zero size (or not
* be defined).
*/
mem_hashed = ipa_table_mem(ipa, filter, true, false);
if (hash_support) {
if (!mem_hashed || mem_hashed->size != mem_ipv4->size)
return false;
} else {
if (mem_hashed && mem_hashed->size)
return false;
}
/* Same check for IPv6 tables */
mem_hashed = ipa_table_mem(ipa, filter, true, true);
if (hash_support) {
if (!mem_hashed || mem_hashed->size != mem_ipv6->size)
return false;
} else {
if (mem_hashed && mem_hashed->size)
return false;
}
return true;
}
/* Initialize a coherent DMA allocation containing initialized filter and
* route table data. This is used when initializing or resetting the IPA
* filter or route table.
*
* The first entry in a filter table contains a bitmap indicating which
* endpoints contain entries in the table. In addition to that first entry,
* there is a fixed maximum number of entries that follow. Filter table
* entries are 64 bits wide, and (other than the bitmap) contain the DMA
* address of a filter rule. A "zero rule" indicates no filtering, and
* consists of 64 bits of zeroes. When a filter table is initialized (or
* reset) its entries are made to refer to the zero rule.
*
* Each entry in a route table is the DMA address of a routing rule. For
* routing there is also a 64-bit "zero rule" that means no routing, and
* when a route table is initialized or reset, its entries are made to refer
* to the zero rule. The zero rule is shared for route and filter tables.
*
* +-------------------+
* --> | zero rule |
* / |-------------------|
* | | filter mask |
* |\ |-------------------|
* | ---- zero rule address | \
* |\ |-------------------| |
* | ---- zero rule address | | Max IPA filter count
* | |-------------------| > or IPA route count,
* | ... | whichever is greater
* \ |-------------------| |
* ---- zero rule address | /
* +-------------------+
*/
int ipa_table_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
size_t size;
u32 count;
ipa_table_validate_build();
count = max_t(u32, ipa->filter_count, ipa->route_count);
/* The IPA hardware requires route and filter table rules to be
* aligned on a 128-byte boundary. We put the "zero rule" at the
* base of the table area allocated here. The DMA address returned
* by dma_alloc_coherent() is guaranteed to be a power-of-2 number
* of pages, which satisfies the rule alignment requirement.
*/
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (!virt)
return -ENOMEM;
ipa->table_virt = virt;
ipa->table_addr = addr;
/* First slot is the zero rule */
*virt++ = 0;
/* Next is the filter table bitmap. The "soft" bitmap value might
* need to be converted to the hardware representation by shifting
* it left one position. Prior to IPA v5.0, bit 0 repesents global
* filtering, which is possible but not used. IPA v5.0+ eliminated
* that option, so there's no shifting required.
*/
if (ipa->version < IPA_VERSION_5_0)
*virt++ = cpu_to_le64(ipa->filtered << 1);
else
*virt++ = cpu_to_le64(ipa->filtered);
/* All the rest contain the DMA address of the zero rule */
le_addr = cpu_to_le64(addr);
while (count--)
*virt++ = le_addr;
return 0;
}
void ipa_table_exit(struct ipa *ipa)
{
u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
struct device *dev = &ipa->pdev->dev;
size_t size;
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
ipa->table_addr = 0;
ipa->table_virt = NULL;
}
| linux-master | drivers/net/ipa/ipa_table.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/soc/qcom/smem.h>
#include "ipa.h"
#include "ipa_reg.h"
#include "ipa_data.h"
#include "ipa_cmd.h"
#include "ipa_mem.h"
#include "ipa_table.h"
#include "gsi_trans.h"
/* "Canary" value placed between memory regions to detect overflow */
#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
{
u32 i;
for (i = 0; i < ipa->mem_count; i++) {
const struct ipa_mem *mem = &ipa->mem[i];
if (mem->id == mem_id)
return mem;
}
return NULL;
}
/* Add an immediate command to a transaction that zeroes a memory region */
static void
ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr = ipa->zero_addr;
if (!mem->size)
return;
ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
}
/**
* ipa_mem_setup() - Set up IPA AP and modem shared memory areas
* @ipa: IPA pointer
*
* Set up the shared memory regions in IPA local memory. This involves
* zero-filling memory regions, and in the case of header memory, telling
* the IPA where it's located.
*
* This function performs the initial setup of this memory. If the modem
* crashes, its regions are re-zeroed in ipa_mem_zero_modem().
*
* The AP informs the modem where its portions of memory are located
* in a QMI exchange that occurs at modem startup.
*
* There is no need for a matching ipa_mem_teardown() function.
*
* Return: 0 if successful, or a negative error code
*/
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
u16 size;
u32 val;
/* Get a transaction to define the header memory region and to zero
* the processing context and modem memory regions.
*/
trans = ipa_cmd_trans_alloc(ipa, 4);
if (!trans) {
dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
return -EBUSY;
}
/* Initialize IPA-local header memory. The AP header region, if
* present, is contiguous with and follows the modem header region,
* and they are initialized together.
*/
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
offset = mem->offset;
size = mem->size;
mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
if (mem)
size += mem->size;
ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
val = reg_encode(reg, IPA_BASE_ADDR, offset);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
return 0;
}
/* Is the given memory region ID is valid for the current IPA version? */
static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
{
enum ipa_version version = ipa->version;
switch (mem_id) {
case IPA_MEM_UC_SHARED:
case IPA_MEM_UC_INFO:
case IPA_MEM_V4_FILTER_HASHED:
case IPA_MEM_V4_FILTER:
case IPA_MEM_V6_FILTER_HASHED:
case IPA_MEM_V6_FILTER:
case IPA_MEM_V4_ROUTE_HASHED:
case IPA_MEM_V4_ROUTE:
case IPA_MEM_V6_ROUTE_HASHED:
case IPA_MEM_V6_ROUTE:
case IPA_MEM_MODEM_HEADER:
case IPA_MEM_AP_HEADER:
case IPA_MEM_MODEM_PROC_CTX:
case IPA_MEM_AP_PROC_CTX:
case IPA_MEM_MODEM:
case IPA_MEM_UC_EVENT_RING:
case IPA_MEM_PDN_CONFIG:
case IPA_MEM_STATS_QUOTA_MODEM:
case IPA_MEM_STATS_QUOTA_AP:
case IPA_MEM_END_MARKER: /* pseudo region */
break;
case IPA_MEM_STATS_TETHERING:
case IPA_MEM_STATS_DROP:
if (version < IPA_VERSION_4_0)
return false;
break;
case IPA_MEM_STATS_V4_FILTER:
case IPA_MEM_STATS_V6_FILTER:
case IPA_MEM_STATS_V4_ROUTE:
case IPA_MEM_STATS_V6_ROUTE:
if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
return false;
break;
case IPA_MEM_AP_V4_FILTER:
case IPA_MEM_AP_V6_FILTER:
if (version != IPA_VERSION_5_0)
return false;
break;
case IPA_MEM_NAT_TABLE:
case IPA_MEM_STATS_FILTER_ROUTE:
if (version < IPA_VERSION_4_5)
return false;
break;
default:
return false;
}
return true;
}
/* Must the given memory region be present in the configuration? */
static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
{
switch (mem_id) {
case IPA_MEM_UC_SHARED:
case IPA_MEM_UC_INFO:
case IPA_MEM_V4_FILTER_HASHED:
case IPA_MEM_V4_FILTER:
case IPA_MEM_V6_FILTER_HASHED:
case IPA_MEM_V6_FILTER:
case IPA_MEM_V4_ROUTE_HASHED:
case IPA_MEM_V4_ROUTE:
case IPA_MEM_V6_ROUTE_HASHED:
case IPA_MEM_V6_ROUTE:
case IPA_MEM_MODEM_HEADER:
case IPA_MEM_MODEM_PROC_CTX:
case IPA_MEM_AP_PROC_CTX:
case IPA_MEM_MODEM:
return true;
case IPA_MEM_PDN_CONFIG:
case IPA_MEM_STATS_QUOTA_MODEM:
return ipa->version >= IPA_VERSION_4_0;
case IPA_MEM_STATS_TETHERING:
return ipa->version >= IPA_VERSION_4_0 &&
ipa->version != IPA_VERSION_5_0;
default:
return false; /* Anything else is optional */
}
}
static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id = mem->id;
u16 size_multiple;
/* Make sure the memory region is valid for this version of IPA */
if (!ipa_mem_id_valid(ipa, mem_id)) {
dev_err(dev, "region id %u not valid\n", mem_id);
return false;
}
if (!mem->size && !mem->canary_count) {
dev_err(dev, "empty memory region %u\n", mem_id);
return false;
}
/* Other than modem memory, sizes must be a multiple of 8 */
size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
if (mem->size % size_multiple)
dev_err(dev, "region %u size not a multiple of %u bytes\n",
mem_id, size_multiple);
else if (mem->offset % 8)
dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
else if (mem->offset < mem->canary_count * sizeof(__le32))
dev_err(dev, "region %u offset too small for %hu canaries\n",
mem_id, mem->canary_count);
else if (mem_id == IPA_MEM_END_MARKER && mem->size)
dev_err(dev, "non-zero end marker region size\n");
else
return true;
return false;
}
/* Verify each defined memory region is valid. */
static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id;
u32 i;
if (mem_data->local_count > IPA_MEM_COUNT) {
dev_err(dev, "too many memory regions (%u > %u)\n",
mem_data->local_count, IPA_MEM_COUNT);
return false;
}
for (i = 0; i < mem_data->local_count; i++) {
const struct ipa_mem *mem = &mem_data->local[i];
if (__test_and_set_bit(mem->id, regions)) {
dev_err(dev, "duplicate memory region %u\n", mem->id);
return false;
}
/* Defined regions have non-zero size and/or canary count */
if (!ipa_mem_valid_one(ipa, mem))
return false;
}
/* Now see if any required regions are not defined */
for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
if (ipa_mem_id_required(ipa, mem_id))
dev_err(dev, "required memory region %u missing\n",
mem_id);
}
return true;
}
/* Do all memory regions fit within the IPA local memory? */
static bool ipa_mem_size_valid(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
u32 limit = ipa->mem_size;
u32 i;
for (i = 0; i < ipa->mem_count; i++) {
const struct ipa_mem *mem = &ipa->mem[i];
if (mem->offset + mem->size <= limit)
continue;
dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
mem->id, limit);
return false;
}
return true;
}
/**
* ipa_mem_config() - Configure IPA shared memory
* @ipa: IPA pointer
*
* Return: 0 if successful, or a negative error code
*/
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
const struct reg *reg;
dma_addr_t addr;
u32 mem_size;
void *virt;
u32 val;
u32 i;
/* Check the advertised location and size of the shared memory area */
reg = ipa_reg(ipa, SHARED_MEM_SIZE);
val = ioread32(ipa->reg_virt + reg_offset(reg));
/* The fields in the register are in 8 byte units */
ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
/* Make sure the end is within the region's mapped space */
mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
mem_size);
ipa->mem_size = mem_size;
} else if (ipa->mem_offset + mem_size > ipa->mem_size) {
dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
mem_size);
}
/* We know our memory size; make sure regions are all in range */
if (!ipa_mem_size_valid(ipa))
return -EINVAL;
/* Prealloc DMA memory for zeroing regions */
virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
if (!virt)
return -ENOMEM;
ipa->zero_addr = addr;
ipa->zero_virt = virt;
ipa->zero_size = IPA_MEM_MAX;
/* For each defined region, write "canary" values in the
* space prior to the region's base address if indicated.
*/
for (i = 0; i < ipa->mem_count; i++) {
u16 canary_count = ipa->mem[i].canary_count;
__le32 *canary;
if (!canary_count)
continue;
/* Write canary values in the space before the region */
canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
do
*--canary = IPA_MEM_CANARY_VAL;
while (--canary_count);
}
/* Verify the microcontroller ring alignment (if defined) */
mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
if (mem && mem->offset % 1024) {
dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
goto err_dma_free;
}
return 0;
err_dma_free:
dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
return -EINVAL;
}
/* Inverse of ipa_mem_config() */
void ipa_mem_deconfig(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
ipa->zero_size = 0;
ipa->zero_virt = NULL;
ipa->zero_addr = 0;
}
/**
* ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
* @ipa: IPA pointer
*
* Zero regions of IPA-local memory used by the modem. These are configured
* (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
* restarts via SSR we need to re-initialize them. A QMI message tells the
* modem where to find regions of IPA local memory it needs to know about
* (these included).
*/
int ipa_mem_zero_modem(struct ipa *ipa)
{
struct gsi_trans *trans;
/* Get a transaction to zero the modem memory, modem header,
* and modem processing context regions.
*/
trans = ipa_cmd_trans_alloc(ipa, 3);
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction to zero modem memory\n");
return -EBUSY;
}
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
return 0;
}
/**
* ipa_imem_init() - Initialize IMEM memory used by the IPA
* @ipa: IPA pointer
* @addr: Physical address of the IPA region in IMEM
* @size: Size (bytes) of the IPA region in IMEM
*
* IMEM is a block of shared memory separate from system DRAM, and
* a portion of this memory is available for the IPA to use. The
* modem accesses this memory directly, but the IPA accesses it
* via the IOMMU, using the AP's credentials.
*
* If this region exists (size > 0) we map it for read/write access
* through the IOMMU using the IPA device.
*
* Note: @addr and @size are not guaranteed to be page-aligned.
*/
static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
{
struct device *dev = &ipa->pdev->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
int ret;
if (!size)
return 0; /* IMEM memory not used */
domain = iommu_get_domain_for_dev(dev);
if (!domain) {
dev_err(dev, "no IOMMU domain found for IMEM\n");
return -EINVAL;
}
/* Align the address down and the size up to page boundaries */
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret)
return ret;
ipa->imem_iova = iova;
ipa->imem_size = size;
return 0;
}
static void ipa_imem_exit(struct ipa *ipa)
{
struct iommu_domain *domain;
struct device *dev;
if (!ipa->imem_size)
return;
dev = &ipa->pdev->dev;
domain = iommu_get_domain_for_dev(dev);
if (domain) {
size_t size;
size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
if (size != ipa->imem_size)
dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
size, ipa->imem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
}
ipa->imem_size = 0;
ipa->imem_iova = 0;
}
/**
* ipa_smem_init() - Initialize SMEM memory used by the IPA
* @ipa: IPA pointer
* @item: Item ID of SMEM memory
* @size: Size (bytes) of SMEM memory region
*
* SMEM is a managed block of shared DRAM, from which numbered "items"
* can be allocated. One item is designated for use by the IPA.
*
* The modem accesses SMEM memory directly, but the IPA accesses it
* via the IOMMU, using the AP's credentials.
*
* If size provided is non-zero, we allocate it and map it for
* access through the IOMMU.
*
* Note: @size and the item address are is not guaranteed to be page-aligned.
*/
static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
{
struct device *dev = &ipa->pdev->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
phys_addr_t addr;
size_t actual;
void *virt;
int ret;
if (!size)
return 0; /* SMEM memory not used */
/* SMEM is memory shared between the AP and another system entity
* (in this case, the modem). An allocation from SMEM is persistent
* until the AP reboots; there is no way to free an allocated SMEM
* region. Allocation only reserves the space; to use it you need
* to "get" a pointer it (this does not imply reference counting).
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
if (ret && ret != -EEXIST) {
dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
ret, size, item);
return ret;
}
/* Now get the address of the SMEM memory region */
virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
if (IS_ERR(virt)) {
ret = PTR_ERR(virt);
dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
return ret;
}
/* In case the region was already allocated, verify the size */
if (ret && actual != size) {
dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
item, actual, size);
return -EINVAL;
}
domain = iommu_get_domain_for_dev(dev);
if (!domain) {
dev_err(dev, "no IOMMU domain found for SMEM\n");
return -EINVAL;
}
/* Align the address down and the size up to a page boundary */
addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret)
return ret;
ipa->smem_iova = iova;
ipa->smem_size = size;
return 0;
}
static void ipa_smem_exit(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev(dev);
if (domain) {
size_t size;
size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
if (size != ipa->smem_size)
dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
size, ipa->smem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
}
ipa->smem_size = 0;
ipa->smem_iova = 0;
}
/* Perform memory region-related initialization */
int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
struct device *dev = &ipa->pdev->dev;
struct resource *res;
int ret;
/* Make sure the set of defined memory regions is valid */
if (!ipa_mem_valid(ipa, mem_data))
return -EINVAL;
ipa->mem_count = mem_data->local_count;
ipa->mem = mem_data->local;
/* Check the route and filter table memory regions */
if (!ipa_table_mem_valid(ipa, false))
return -EINVAL;
if (!ipa_table_mem_valid(ipa, true))
return -EINVAL;
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
return ret;
}
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-shared");
if (!res) {
dev_err(dev,
"DT error getting \"ipa-shared\" memory property\n");
return -ENODEV;
}
ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
if (!ipa->mem_virt) {
dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
return -ENOMEM;
}
ipa->mem_addr = res->start;
ipa->mem_size = resource_size(res);
ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
if (ret)
goto err_unmap;
ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
if (ret)
goto err_imem_exit;
return 0;
err_imem_exit:
ipa_imem_exit(ipa);
err_unmap:
memunmap(ipa->mem_virt);
return ret;
}
/* Inverse of ipa_mem_init() */
void ipa_mem_exit(struct ipa *ipa)
{
ipa_smem_exit(ipa);
ipa_imem_exit(ipa);
memunmap(ipa->mem_virt);
}
| linux-master | drivers/net/ipa/ipa_mem.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
#include <linux/etherdevice.h>
#include <net/pkt_sched.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc/qcom_rproc.h>
#include "ipa.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
#include "ipa_table.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
#include "ipa_smp2p.h"
#include "ipa_qmi.h"
#include "ipa_uc.h"
#include "ipa_power.h"
#define IPA_NETDEV_NAME "rmnet_ipa%d"
#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
#define IPA_NETDEV_TIMEOUT 10 /* seconds */
enum ipa_modem_state {
IPA_MODEM_STATE_STOPPED = 0,
IPA_MODEM_STATE_STARTING,
IPA_MODEM_STATE_RUNNING,
IPA_MODEM_STATE_STOPPING,
};
/**
* struct ipa_priv - IPA network device private data
* @ipa: IPA pointer
* @work: Work structure used to wake the modem netdev TX queue
*/
struct ipa_priv {
struct ipa *ipa;
struct work_struct work;
};
/** ipa_open() - Opens the modem network interface */
static int ipa_open(struct net_device *netdev)
{
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
struct device *dev;
int ret;
dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_power_put;
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
if (ret)
goto err_power_put;
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
if (ret)
goto err_disable_tx;
netif_start_queue(netdev);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return 0;
err_disable_tx:
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
err_power_put:
pm_runtime_put_noidle(dev);
return ret;
}
/** ipa_stop() - Stops the modem network interface. */
static int ipa_stop(struct net_device *netdev)
{
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
struct device *dev;
int ret;
dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out_power_put;
netif_stop_queue(netdev);
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return 0;
}
/** ipa_start_xmit() - Transmits an skb.
* @skb: skb to be transmitted
* @dev: network device
*
* Return codes:
* NETDEV_TX_OK: Success
* NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
*/
static netdev_tx_t
ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct net_device_stats *stats = &netdev->stats;
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa_endpoint *endpoint;
struct ipa *ipa = priv->ipa;
u32 skb_len = skb->len;
struct device *dev;
int ret;
if (!skb_len)
goto err_drop_skb;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
/* The hardware must be powered for us to transmit */
dev = &ipa->pdev->dev;
ret = pm_runtime_get(dev);
if (ret < 1) {
/* If a resume won't happen, just drop the packet */
if (ret < 0 && ret != -EINPROGRESS) {
ipa_power_modem_queue_active(ipa);
pm_runtime_put_noidle(dev);
goto err_drop_skb;
}
/* No power (yet). Stop the network stack from transmitting
* until we're resumed; ipa_modem_resume() arranges for the
* TX queue to be started again.
*/
ipa_power_modem_queue_stop(ipa);
pm_runtime_put_noidle(dev);
return NETDEV_TX_BUSY;
}
ipa_power_modem_queue_active(ipa);
ret = ipa_endpoint_skb_tx(endpoint, skb);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
if (ret) {
if (ret != -E2BIG)
return NETDEV_TX_BUSY;
goto err_drop_skb;
}
stats->tx_packets++;
stats->tx_bytes += skb_len;
return NETDEV_TX_OK;
err_drop_skb:
dev_kfree_skb_any(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
}
void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
{
struct net_device_stats *stats = &netdev->stats;
if (skb) {
skb->dev = netdev;
skb->protocol = htons(ETH_P_MAP);
stats->rx_packets++;
stats->rx_bytes += skb->len;
(void)netif_receive_skb(skb);
} else {
stats->rx_dropped++;
}
}
static const struct net_device_ops ipa_modem_ops = {
.ndo_open = ipa_open,
.ndo_stop = ipa_stop,
.ndo_start_xmit = ipa_start_xmit,
};
/** ipa_modem_netdev_setup() - netdev setup function for the modem */
static void ipa_modem_netdev_setup(struct net_device *netdev)
{
netdev->netdev_ops = &ipa_modem_ops;
netdev->header_ops = NULL;
netdev->type = ARPHRD_RAWIP;
netdev->hard_header_len = 0;
netdev->min_header_len = ETH_HLEN;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IPA_MTU;
netdev->mtu = netdev->max_mtu;
netdev->addr_len = 0;
netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
netdev->priv_flags |= IFF_TX_SKB_SHARING;
eth_broadcast_addr(netdev->broadcast);
/* The endpoint is configured for QMAP */
netdev->needed_headroom = sizeof(struct rmnet_map_header);
netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
netdev->hw_features = NETIF_F_SG;
}
/** ipa_modem_suspend() - suspend callback
* @netdev: Network device
*
* Suspend the modem's endpoints.
*/
void ipa_modem_suspend(struct net_device *netdev)
{
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
if (!(netdev->flags & IFF_UP))
return;
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
}
/**
* ipa_modem_wake_queue_work() - enable modem netdev queue
* @work: Work structure
*
* Re-enable transmit on the modem network device. This is called
* in (power management) work queue context, scheduled when resuming
* the modem. We can't enable the queue directly in ipa_modem_resume()
* because transmits restart the instant the queue is awakened; but the
* device power state won't be ACTIVE until *after* ipa_modem_resume()
* returns.
*/
static void ipa_modem_wake_queue_work(struct work_struct *work)
{
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
ipa_power_modem_queue_wake(priv->ipa);
}
/** ipa_modem_resume() - resume callback for runtime_pm
* @dev: pointer to device
*
* Resume the modem's endpoints.
*/
void ipa_modem_resume(struct net_device *netdev)
{
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
if (!(netdev->flags & IFF_UP))
return;
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
/* Arrange for the TX queue to be restarted */
(void)queue_pm_work(&priv->work);
}
int ipa_modem_start(struct ipa *ipa)
{
enum ipa_modem_state state;
struct net_device *netdev;
struct ipa_priv *priv;
int ret;
/* Only attempt to start the modem if it's stopped */
state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
IPA_MODEM_STATE_STARTING);
/* Silently ignore attempts when running, or when changing state */
if (state != IPA_MODEM_STATE_STOPPED)
return 0;
netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
if (!netdev) {
ret = -ENOMEM;
goto out_set_state;
}
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
if (ret) {
ipa->modem_netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
free_netdev(netdev);
}
out_set_state:
if (ret)
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
else
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
smp_mb__after_atomic();
return ret;
}
int ipa_modem_stop(struct ipa *ipa)
{
struct net_device *netdev = ipa->modem_netdev;
enum ipa_modem_state state;
/* Only attempt to stop the modem if it's running */
state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
IPA_MODEM_STATE_STOPPING);
/* Silently ignore attempts when already stopped */
if (state == IPA_MODEM_STATE_STOPPED)
return 0;
/* If we're somewhere between stopped and starting, we're busy */
if (state != IPA_MODEM_STATE_RUNNING)
return -EBUSY;
/* Clean up the netdev and endpoints if it was started */
if (netdev) {
struct ipa_priv *priv = netdev_priv(netdev);
cancel_work_sync(&priv->work);
/* If it was opened, stop it first */
if (netdev->flags & IFF_UP)
(void)ipa_stop(netdev);
unregister_netdev(netdev);
ipa->modem_netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
free_netdev(netdev);
}
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
smp_mb__after_atomic();
return 0;
}
/* Treat a "clean" modem stop the same as a crash */
static void ipa_modem_crashed(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
int ret;
/* Prevent the modem from triggering a call to ipa_setup() */
ipa_smp2p_irq_disable_setup(ipa);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power to handle crash\n", ret);
goto out_power_put;
}
ipa_endpoint_modem_pause_all(ipa, true);
ipa_endpoint_modem_hol_block_clear_all(ipa);
ipa_table_reset(ipa, true);
ret = ipa_table_hash_flush(ipa);
if (ret)
dev_err(dev, "error %d flushing hash caches\n", ret);
ret = ipa_endpoint_modem_exception_reset_all(ipa);
if (ret)
dev_err(dev, "error %d resetting exception endpoint\n", ret);
ipa_endpoint_modem_pause_all(ipa, false);
ret = ipa_modem_stop(ipa);
if (ret)
dev_err(dev, "error %d stopping modem\n", ret);
/* Now prepare for the next modem boot */
ret = ipa_mem_zero_modem(ipa);
if (ret)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
}
static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct ipa *ipa = container_of(nb, struct ipa, nb);
struct qcom_ssr_notify_data *notify_data = data;
struct device *dev = &ipa->pdev->dev;
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
dev_info(dev, "received modem starting event\n");
ipa_uc_power(ipa);
ipa_smp2p_notify_reset(ipa);
break;
case QCOM_SSR_AFTER_POWERUP:
dev_info(dev, "received modem running event\n");
break;
case QCOM_SSR_BEFORE_SHUTDOWN:
dev_info(dev, "received modem %s event\n",
notify_data->crashed ? "crashed" : "stopping");
if (ipa->setup_complete)
ipa_modem_crashed(ipa);
break;
case QCOM_SSR_AFTER_SHUTDOWN:
dev_info(dev, "received modem offline event\n");
break;
default:
dev_err(dev, "received unrecognized event %lu\n", action);
break;
}
return NOTIFY_OK;
}
int ipa_modem_config(struct ipa *ipa)
{
void *notifier;
ipa->nb.notifier_call = ipa_modem_notify;
notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
if (IS_ERR(notifier))
return PTR_ERR(notifier);
ipa->notifier = notifier;
return 0;
}
void ipa_modem_deconfig(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
int ret;
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
if (ret)
dev_err(dev, "error %d unregistering notifier", ret);
ipa->notifier = NULL;
memset(&ipa->nb, 0, sizeof(ipa->nb));
}
| linux-master | drivers/net/ipa/ipa_modem.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include "ipa.h"
#include "ipa_uc.h"
#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
*
* The IPA incorporates a microcontroller that is able to do some additional
* handling/offloading of network activity. The current code makes
* essentially no use of the microcontroller, but it still requires some
* initialization. It needs to be notified in the event the AP crashes.
*
* The microcontroller can generate two interrupts to the AP. One interrupt
* is used to indicate that a response to a request from the AP is available.
* The other is used to notify the AP of the occurrence of an event. In
* addition, the AP can interrupt the microcontroller by writing a register.
*
* A 128 byte block of structured memory within the IPA SRAM is used together
* with these interrupts to implement the communication interface between the
* AP and the IPA microcontroller. Each side writes data to the shared area
* before interrupting its peer, which will read the written data in response
* to the interrupt. Some information found in the shared area is currently
* unused. All remaining space in the shared area is reserved, and must not
* be read or written by the AP.
*/
/* Supports hardware interface version 0x2000 */
/* Delay to allow a the microcontroller to save state when crashing */
#define IPA_SEND_DELAY 100 /* microseconds */
/**
* struct ipa_uc_mem_area - AP/microcontroller shared memory area
* @command: command code (AP->microcontroller)
* @reserved0: reserved bytes; avoid reading or writing
* @command_param: low 32 bits of command parameter (AP->microcontroller)
* @command_param_hi: high 32 bits of command parameter (AP->microcontroller)
*
* @response: response code (microcontroller->AP)
* @reserved1: reserved bytes; avoid reading or writing
* @response_param: response parameter (microcontroller->AP)
*
* @event: event code (microcontroller->AP)
* @reserved2: reserved bytes; avoid reading or writing
* @event_param: event parameter (microcontroller->AP)
*
* @first_error_address: address of first error-source on SNOC
* @hw_state: state of hardware (including error type information)
* @warning_counter: counter of non-fatal hardware errors
* @reserved3: reserved bytes; avoid reading or writing
* @interface_version: hardware-reported interface version
* @reserved4: reserved bytes; avoid reading or writing
*
* A shared memory area at the base of IPA resident memory is used for
* communication with the microcontroller. The region is 128 bytes in
* size, but only the first 40 bytes (structured this way) are used.
*/
struct ipa_uc_mem_area {
u8 command; /* enum ipa_uc_command */
u8 reserved0[3];
__le32 command_param;
__le32 command_param_hi;
u8 response; /* enum ipa_uc_response */
u8 reserved1[3];
__le32 response_param;
u8 event; /* enum ipa_uc_event */
u8 reserved2[3];
__le32 event_param;
__le32 first_error_address;
u8 hw_state;
u8 warning_counter;
__le16 reserved3;
__le16 interface_version;
__le16 reserved4;
};
/** enum ipa_uc_command - commands from the AP to the microcontroller */
enum ipa_uc_command {
IPA_UC_COMMAND_NO_OP = 0x0,
IPA_UC_COMMAND_UPDATE_FLAGS = 0x1,
IPA_UC_COMMAND_DEBUG_RUN_TEST = 0x2,
IPA_UC_COMMAND_DEBUG_GET_INFO = 0x3,
IPA_UC_COMMAND_ERR_FATAL = 0x4,
IPA_UC_COMMAND_CLK_GATE = 0x5,
IPA_UC_COMMAND_CLK_UNGATE = 0x6,
IPA_UC_COMMAND_MEMCPY = 0x7,
IPA_UC_COMMAND_RESET_PIPE = 0x8,
IPA_UC_COMMAND_REG_WRITE = 0x9,
IPA_UC_COMMAND_GSI_CH_EMPTY = 0xa,
};
/** enum ipa_uc_response - microcontroller response codes */
enum ipa_uc_response {
IPA_UC_RESPONSE_NO_OP = 0x0,
IPA_UC_RESPONSE_INIT_COMPLETED = 0x1,
IPA_UC_RESPONSE_CMD_COMPLETED = 0x2,
IPA_UC_RESPONSE_DEBUG_GET_INFO = 0x3,
};
/** enum ipa_uc_event - common cpu events reported by the microcontroller */
enum ipa_uc_event {
IPA_UC_EVENT_NO_OP = 0x0,
IPA_UC_EVENT_ERROR = 0x1,
IPA_UC_EVENT_LOG_INFO = 0x2,
};
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
{
const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
u32 offset = ipa->mem_offset + mem->offset;
return ipa->mem_virt + offset;
}
/* Microcontroller event IPA interrupt handler */
static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
struct device *dev = &ipa->pdev->dev;
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
else if (shared->event != IPA_UC_EVENT_LOG_INFO)
dev_err(dev, "unsupported microcontroller event %u\n",
shared->event);
/* The LOG_INFO event can be safely ignored */
}
/* Microcontroller response IPA interrupt handler */
static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
struct device *dev = &ipa->pdev->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
* should only receive responses from the microcontroller when it has
* sent it a request message.
*
* We can drop the power reference taken in ipa_uc_power() once we
* know the microcontroller has finished its initialization.
*/
switch (shared->response) {
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
} else {
dev_warn(dev, "unexpected init_completed response\n");
}
break;
default:
dev_warn(dev, "unsupported microcontroller response %u\n",
shared->response);
break;
}
}
void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
/* Silently ignore anything unrecognized */
if (irq_id == IPA_IRQ_UC_0)
ipa_uc_event_handler(ipa);
else if (irq_id == IPA_IRQ_UC_1)
ipa_uc_response_hdlr(ipa);
}
/* Configure the IPA microcontroller subsystem */
void ipa_uc_config(struct ipa *ipa)
{
ipa->uc_powered = false;
ipa->uc_loaded = false;
ipa_interrupt_enable(ipa, IPA_IRQ_UC_0);
ipa_interrupt_enable(ipa, IPA_IRQ_UC_1);
}
/* Inverse of ipa_uc_config() */
void ipa_uc_deconfig(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
if (ipa->uc_loaded)
ipa_power_retention(ipa, false);
if (!ipa->uc_powered)
return;
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
}
/* Take a proxy power reference for the microcontroller */
void ipa_uc_power(struct ipa *ipa)
{
static bool already;
struct device *dev;
int ret;
if (already)
return;
already = true; /* Only do this on first boot */
/* This power reference dropped in ipa_uc_response_hdlr() above */
dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
dev_err(dev, "error %d getting proxy power\n", ret);
} else {
ipa->uc_powered = true;
}
}
/* Send a command to the microcontroller */
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
const struct reg *reg;
u32 val;
/* Fill in the command data */
shared->command = command;
shared->command_param = cpu_to_le32(command_param);
shared->command_param_hi = 0;
shared->response = 0;
shared->response_param = 0;
/* Use an interrupt to tell the microcontroller the command is ready */
reg = ipa_reg(ipa, IPA_IRQ_UC);
val = reg_bit(reg, UC_INTR);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
void ipa_uc_panic_notifier(struct ipa *ipa)
{
if (!ipa->uc_loaded)
return;
send_uc_command(ipa, IPA_UC_COMMAND_ERR_FATAL, 0);
/* give uc enough time to save state */
udelay(IPA_SEND_DELAY);
}
| linux-master | drivers/net/ipa/ipa_uc.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include "ipa.h"
#include "ipa_data.h"
#include "ipa_reg.h"
#include "ipa_resource.h"
/**
* DOC: IPA Resources
*
* The IPA manages a set of resources internally for various purposes.
* A given IPA version has a fixed number of resource types, and a fixed
* total number of resources of each type. "Source" resource types
* are separate from "destination" resource types.
*
* Each version of IPA also has some number of resource groups. Each
* endpoint is assigned to a resource group, and all endpoints in the
* same group share pools of each type of resource. A subset of the
* total resources of each type is assigned for use by each group.
*/
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
u32 group_count;
u32 i;
u32 j;
/* We program at most 8 source or destination resource group limits */
BUILD_BUG_ON(IPA_RESOURCE_GROUP_MAX > 8);
group_count = data->rsrc_group_src_count;
if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
return false;
/* Return an error if a non-zero resource limit is specified
* for a resource group not supported by hardware.
*/
for (i = 0; i < data->resource_src_count; i++) {
const struct ipa_resource *resource;
resource = &data->resource_src[i];
for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
group_count = data->rsrc_group_dst_count;
if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
return false;
for (i = 0; i < data->resource_dst_count; i++) {
const struct ipa_resource *resource;
resource = &data->resource_dst[i];
for (j = group_count; j < IPA_RESOURCE_GROUP_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
return true;
}
static void
ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
const struct reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
val = reg_encode(reg, X_MIN_LIM, xlimits->min);
val |= reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
val |= reg_encode(reg, Y_MIN_LIM, ylimits->min);
val |= reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
const struct ipa_resource_data *data)
{
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
const struct reg *reg;
resource = &data->resource_src[resource_type];
reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[0], ylimits);
if (group_count < 3)
return;
reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[2], ylimits);
if (group_count < 5)
return;
reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[4], ylimits);
if (group_count < 7)
return;
reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[6], ylimits);
}
static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
const struct ipa_resource_data *data)
{
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
const struct reg *reg;
resource = &data->resource_dst[resource_type];
reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[0], ylimits);
if (group_count < 3)
return;
reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[2], ylimits);
if (group_count < 5)
return;
reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[4], ylimits);
if (group_count < 7)
return;
reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
ipa_resource_config_common(ipa, resource_type, reg,
&resource->limits[6], ylimits);
}
/* Configure resources; there is no ipa_resource_deconfig() */
int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
{
u32 i;
if (!ipa_resource_limits_valid(ipa, data))
return -EINVAL;
for (i = 0; i < data->resource_src_count; i++)
ipa_resource_config_src(ipa, i, data);
for (i = 0; i < data->resource_dst_count; i++)
ipa_resource_config_dst(ipa, i, data);
return 0;
}
| linux-master | drivers/net/ipa/ipa_resource.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
#include "ipa_resource.h"
#include "ipa_cmd.h"
#include "ipa_reg.h"
#include "ipa_mem.h"
#include "ipa_table.h"
#include "ipa_smp2p.h"
#include "ipa_modem.h"
#include "ipa_uc.h"
#include "ipa_interrupt.h"
#include "gsi_trans.h"
#include "ipa_sysfs.h"
/**
* DOC: The IP Accelerator
*
* This driver supports the Qualcomm IP Accelerator (IPA), which is a
* networking component found in many Qualcomm SoCs. The IPA is connected
* to the application processor (AP), but is also connected (and partially
* controlled by) other "execution environments" (EEs), such as a modem.
*
* The IPA is the conduit between the AP and the modem that carries network
* traffic. This driver presents a network interface representing the
* connection of the modem to external (e.g. LTE) networks.
*
* The IPA provides protocol checksum calculation, offloading this work
* from the AP. The IPA offers additional functionality, including routing,
* filtering, and NAT support, but that more advanced functionality is not
* currently supported. Despite that, some resources--including routing
* tables and filter tables--are defined in this driver because they must
* be initialized even when the advanced hardware features are not used.
*
* There are two distinct layers that implement the IPA hardware, and this
* is reflected in the organization of the driver. The generic software
* interface (GSI) is an integral component of the IPA, providing a
* well-defined communication layer between the AP subsystem and the IPA
* core. The GSI implements a set of "channels" used for communication
* between the AP and the IPA.
*
* The IPA layer uses GSI channels to implement its "endpoints". And while
* a GSI channel carries data between the AP and the IPA, a pair of IPA
* endpoints is used to carry traffic between two EEs. Specifically, the main
* modem network interface is implemented by two pairs of endpoints: a TX
* endpoint on the AP coupled with an RX endpoint on the modem; and another
* RX endpoint on the AP receiving data from a TX endpoint on the modem.
*/
/* The name of the GSI firmware file relative to /lib/firmware */
#define IPA_FW_PATH_DEFAULT "ipa_fws.mdt"
#define IPA_PAS_ID 15
/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
#define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */
#define TAG_TIMESTAMP_SHIFT 14
#define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */
/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
/**
* enum ipa_firmware_loader: How GSI firmware gets loaded
*
* @IPA_LOADER_DEFER: System not ready; try again later
* @IPA_LOADER_SELF: AP loads GSI firmware
* @IPA_LOADER_MODEM: Modem loads GSI firmware, signals when done
* @IPA_LOADER_SKIP: Neither AP nor modem need to load GSI firmware
* @IPA_LOADER_INVALID: GSI firmware loader specification is invalid
*/
enum ipa_firmware_loader {
IPA_LOADER_DEFER,
IPA_LOADER_SELF,
IPA_LOADER_MODEM,
IPA_LOADER_SKIP,
IPA_LOADER_INVALID,
};
/**
* ipa_setup() - Set up IPA hardware
* @ipa: IPA pointer
*
* Perform initialization that requires issuing immediate commands on
* the command TX endpoint. If the modem is doing GSI firmware load
* and initialization, this function will be called when an SMP2P
* interrupt has been signaled by the modem. Otherwise it will be
* called from ipa_probe() after GSI firmware has been successfully
* loaded, authenticated, and started by Trust Zone.
*/
int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
struct device *dev = &ipa->pdev->dev;
int ret;
ret = gsi_setup(&ipa->gsi);
if (ret)
return ret;
ret = ipa_power_setup(ipa);
if (ret)
goto err_gsi_teardown;
ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other
* initialization, so we enable first.
*/
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ret = ipa_endpoint_enable_one(command_endpoint);
if (ret)
goto err_endpoint_teardown;
ret = ipa_mem_setup(ipa); /* No matching teardown required */
if (ret)
goto err_command_disable;
ret = ipa_table_setup(ipa); /* No matching teardown required */
if (ret)
goto err_command_disable;
/* Enable the exception handling endpoint, and tell the hardware
* to use it by default.
*/
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ret = ipa_endpoint_enable_one(exception_endpoint);
if (ret)
goto err_command_disable;
ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
/* We're all set. Now prepare for communication with the modem */
ret = ipa_qmi_setup(ipa);
if (ret)
goto err_default_route_clear;
ipa->setup_complete = true;
dev_info(dev, "IPA driver setup completed successfully\n");
return 0;
err_default_route_clear:
ipa_endpoint_default_route_clear(ipa);
ipa_endpoint_disable_one(exception_endpoint);
err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
ipa_power_teardown(ipa);
err_gsi_teardown:
gsi_teardown(&ipa->gsi);
return ret;
}
/**
* ipa_teardown() - Inverse of ipa_setup()
* @ipa: IPA pointer
*/
static void ipa_teardown(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
/* We're going to tear everything down, as if setup never completed */
ipa->setup_complete = false;
ipa_qmi_teardown(ipa);
ipa_endpoint_default_route_clear(ipa);
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_endpoint_disable_one(exception_endpoint);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
ipa_power_teardown(ipa);
gsi_teardown(&ipa->gsi);
}
static void
ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
{
const struct reg *reg;
u32 val;
/* IPA v4.5+ has no backward compatibility register */
if (ipa->version >= IPA_VERSION_4_5)
return;
reg = ipa_reg(ipa, IPA_BCR);
val = data->backward_compat;
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_tx(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
const struct reg *reg;
u32 offset;
u32 val;
if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
return;
/* Disable PA mask to allow HOLB drop */
reg = ipa_reg(ipa, IPA_TX_CFG);
offset = reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
val &= ~reg_bit(reg, PA_MASK_EN);
iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_hardware_config_clkon(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
const struct reg *reg;
u32 val;
if (version >= IPA_VERSION_4_5)
return;
if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
return;
/* Implement some hardware workarounds */
reg = ipa_reg(ipa, CLKON_CFG);
if (version == IPA_VERSION_3_1) {
/* Disable MISC clock gating */
val = reg_bit(reg, CLKON_MISC);
} else { /* IPA v4.0+ */
/* Enable open global clocks in the CLKON configuration */
val = reg_bit(reg, CLKON_GLOBAL);
val |= reg_bit(reg, GLOBAL_2X_CLK);
}
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
const struct reg *reg;
u32 offset;
u32 val;
/* Nothing to configure prior to IPA v4.0 */
if (ipa->version < IPA_VERSION_4_0)
return;
reg = ipa_reg(ipa, COMP_CFG);
offset = reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
/* For IPA v4.5+ FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
iowrite32(val, ipa->reg_virt + offset);
}
/* Configure DDR and (possibly) PCIe max read/write QSB values */
static void
ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
const struct reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
if (data->qsb_count > 1)
data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
/* Max outstanding write accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_WRITES);
val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_READS);
val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
data0->max_reads_beats);
if (data->qsb_count > 1) {
val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
data1->max_reads_beats);
}
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY
* field to represent the given number of microseconds. The value is one
* less than the number of timer ticks in the requested period. 0 is not
* a valid granularity value (so for example @usec must be at least 16 for
* a TIMER_FREQUENCY of 32000).
*/
static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
{
return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
/* IPA uses unified Qtime starting at IPA v4.5, implementing various
* timestamps and timers independent of the IPA core clock rate. The
* Qtimer is based on a 56-bit timestamp incremented at each tick of
* a 19.2 MHz SoC crystal oscillator (XO clock).
*
* For IPA timestamps (tag, NAT, data path logging) a lower resolution
* timestamp is achieved by shifting the Qtimer timestamp value right
* some number of bits to produce the low-order bits of the coarser
* granularity timestamp.
*
* For timers, a common timer clock is derived from the XO clock using
* a divider (we use 192, to produce a 100kHz timer clock). From
* this common clock, three "pulse generators" are used to produce
* timer ticks at a configurable frequency. IPA timers (such as
* those used for aggregation or head-of-line block handling) now
* define their period based on one of these pulse generators.
*/
static void ipa_qtime_config(struct ipa *ipa)
{
const struct reg *reg;
u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
if (ipa->version >= IPA_VERSION_5_0) {
val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
} else {
val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
}
iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
offset = reg_offset(reg);
val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
val |= reg_bit(reg, DIV_ENABLE);
iowrite32(val, ipa->reg_virt + offset);
}
/* Before IPA v4.5 timing is controlled by a counter register */
static void ipa_hardware_config_counter(struct ipa *ipa)
{
u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, COUNTER_CFG);
/* If defined, EOT_COAL_GRANULARITY is 0 */
val = reg_encode(reg, AGGR_GRANULARITY, granularity);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_timing(struct ipa *ipa)
{
if (ipa->version < IPA_VERSION_4_5)
ipa_hardware_config_counter(ipa);
else
ipa_qtime_config(ipa);
}
static void ipa_hardware_config_hashing(struct ipa *ipa)
{
const struct reg *reg;
/* Other than IPA v4.2, all versions enable "hashing". Starting
* with IPA v5.0, the filter and router tables are implemented
* differently, but the default configuration enables this feature
* (now referred to as "cacheing"), so there's nothing to do here.
*/
if (ipa->version != IPA_VERSION_4_2)
return;
/* IPA v4.2 does not support hashed tables, so disable them */
reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
/* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
* IPV4_FILTER_HASH are all zero.
*/
iowrite32(0, ipa->reg_virt + reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
const struct reg *reg;
u32 val;
if (ipa->version < IPA_VERSION_3_5_1)
return;
reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
enter_idle_debounce_thresh);
if (const_non_idle_enable)
val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/**
* ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
* @ipa: IPA pointer
*
* Configures when the IPA signals it is idle to the global clock
* controller, which can respond by scaling down the clock to save
* power.
*/
static void ipa_hardware_dcd_config(struct ipa *ipa)
{
/* Recommended values for IPA 3.5 and later according to IPA HPG */
ipa_idle_indication_cfg(ipa, 256, false);
}
static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
{
/* Power-on reset values */
ipa_idle_indication_cfg(ipa, 0, true);
}
/**
* ipa_hardware_config() - Primitive hardware initialization
* @ipa: IPA pointer
* @data: IPA configuration data
*/
static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
{
ipa_hardware_config_bcr(ipa, data);
ipa_hardware_config_tx(ipa);
ipa_hardware_config_clkon(ipa);
ipa_hardware_config_comp(ipa);
ipa_hardware_config_qsb(ipa, data);
ipa_hardware_config_timing(ipa);
ipa_hardware_config_hashing(ipa);
ipa_hardware_dcd_config(ipa);
}
/**
* ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
* @ipa: IPA pointer
*
* This restores the power-on reset values (even if they aren't different)
*/
static void ipa_hardware_deconfig(struct ipa *ipa)
{
/* Mostly we just leave things as we set them. */
ipa_hardware_dcd_deconfig(ipa);
}
/**
* ipa_config() - Configure IPA hardware
* @ipa: IPA pointer
* @data: IPA configuration data
*
* Perform initialization requiring IPA power to be enabled.
*/
static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
{
int ret;
ipa_hardware_config(ipa, data);
ret = ipa_mem_config(ipa);
if (ret)
goto err_hardware_deconfig;
ipa->interrupt = ipa_interrupt_config(ipa);
if (IS_ERR(ipa->interrupt)) {
ret = PTR_ERR(ipa->interrupt);
ipa->interrupt = NULL;
goto err_mem_deconfig;
}
ipa_uc_config(ipa);
ret = ipa_endpoint_config(ipa);
if (ret)
goto err_uc_deconfig;
ipa_table_config(ipa); /* No deconfig required */
/* Assign resource limitation to each group; no deconfig required */
ret = ipa_resource_config(ipa, data->resource_data);
if (ret)
goto err_endpoint_deconfig;
ret = ipa_modem_config(ipa);
if (ret)
goto err_endpoint_deconfig;
return 0;
err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
err_uc_deconfig:
ipa_uc_deconfig(ipa);
ipa_interrupt_deconfig(ipa->interrupt);
ipa->interrupt = NULL;
err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
return ret;
}
/**
* ipa_deconfig() - Inverse of ipa_config()
* @ipa: IPA pointer
*/
static void ipa_deconfig(struct ipa *ipa)
{
ipa_modem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_uc_deconfig(ipa);
ipa_interrupt_deconfig(ipa->interrupt);
ipa->interrupt = NULL;
ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
}
static int ipa_firmware_load(struct device *dev)
{
const struct firmware *fw;
struct device_node *node;
struct resource res;
phys_addr_t phys;
const char *path;
ssize_t size;
void *virt;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
dev_err(dev, "DT error getting \"memory-region\" property\n");
return -EINVAL;
}
ret = of_address_to_resource(node, 0, &res);
of_node_put(node);
if (ret) {
dev_err(dev, "error %d getting \"memory-region\" resource\n",
ret);
return ret;
}
/* Use name from DTB if specified; use default for *any* error */
ret = of_property_read_string(dev->of_node, "firmware-name", &path);
if (ret) {
dev_dbg(dev, "error %d getting \"firmware-name\" resource\n",
ret);
path = IPA_FW_PATH_DEFAULT;
}
ret = request_firmware(&fw, path, dev);
if (ret) {
dev_err(dev, "error %d requesting \"%s\"\n", ret, path);
return ret;
}
phys = res.start;
size = (size_t)resource_size(&res);
virt = memremap(phys, size, MEMREMAP_WC);
if (!virt) {
dev_err(dev, "unable to remap firmware memory\n");
ret = -ENOMEM;
goto out_release_firmware;
}
ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL);
if (ret)
dev_err(dev, "error %d loading \"%s\"\n", ret, path);
else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
dev_err(dev, "error %d authenticating \"%s\"\n", ret, path);
memunmap(virt);
out_release_firmware:
release_firmware(fw);
return ret;
}
static const struct of_device_id ipa_match[] = {
{
.compatible = "qcom,msm8998-ipa",
.data = &ipa_data_v3_1,
},
{
.compatible = "qcom,sdm845-ipa",
.data = &ipa_data_v3_5_1,
},
{
.compatible = "qcom,sc7180-ipa",
.data = &ipa_data_v4_2,
},
{
.compatible = "qcom,sdx55-ipa",
.data = &ipa_data_v4_5,
},
{
.compatible = "qcom,sm6350-ipa",
.data = &ipa_data_v4_7,
},
{
.compatible = "qcom,sm8350-ipa",
.data = &ipa_data_v4_9,
},
{
.compatible = "qcom,sc7280-ipa",
.data = &ipa_data_v4_11,
},
{
.compatible = "qcom,sdx65-ipa",
.data = &ipa_data_v5_0,
},
{ },
};
MODULE_DEVICE_TABLE(of, ipa_match);
/* Check things that can be validated at build time. This just
* groups these things BUILD_BUG_ON() calls don't clutter the rest
* of the code.
* */
static void ipa_validate_build(void)
{
/* At one time we assumed a 64-bit build, allowing some do_div()
* calls to be replaced by simple division or modulo operations.
* We currently only perform divide and modulo operations on u32,
* u16, or size_t objects, and of those only size_t has any chance
* of being a 64-bit value. (It should be guaranteed 32 bits wide
* on a 32-bit build, but there is no harm in verifying that.)
*/
BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4);
/* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
BUILD_BUG_ON(GSI_EE_AP != 0);
/* There's no point if we have no channels or event rings */
BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
/* GSI hardware design limits */
BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
/* The number of TREs in a transaction is limited by the channel's
* TLV FIFO size. A transaction structure uses 8-bit fields
* to represents the number of TREs it has allocated and used.
*/
BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
/* This is used as a divisor */
BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
}
static enum ipa_firmware_loader ipa_firmware_loader(struct device *dev)
{
bool modem_init;
const char *str;
int ret;
/* Look up the old and new properties by name */
modem_init = of_property_read_bool(dev->of_node, "modem-init");
ret = of_property_read_string(dev->of_node, "qcom,gsi-loader", &str);
/* If the new property doesn't exist, it's legacy behavior */
if (ret == -EINVAL) {
if (modem_init)
return IPA_LOADER_MODEM;
goto out_self;
}
/* Any other error on the new property means it's poorly defined */
if (ret)
return IPA_LOADER_INVALID;
/* New property value exists; if old one does too, that's invalid */
if (modem_init)
return IPA_LOADER_INVALID;
/* Modem loads GSI firmware for "modem" */
if (!strcmp(str, "modem"))
return IPA_LOADER_MODEM;
/* No GSI firmware load is needed for "skip" */
if (!strcmp(str, "skip"))
return IPA_LOADER_SKIP;
/* Any value other than "self" is an error */
if (strcmp(str, "self"))
return IPA_LOADER_INVALID;
out_self:
/* We need Trust Zone to load firmware; make sure it's available */
if (qcom_scm_is_available())
return IPA_LOADER_SELF;
return IPA_LOADER_DEFER;
}
/**
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
*
* Return: 0 if successful, or a negative error code (possibly
* EPROBE_DEFER)
*
* This is the main entry point for the IPA driver. Initialization proceeds
* in several stages:
* - The "init" stage involves activities that can be initialized without
* access to the IPA hardware.
* - The "config" stage requires IPA power to be active so IPA registers
* can be accessed, but does not require the use of IPA immediate commands.
* - The "setup" stage uses IPA immediate commands, and so requires the GSI
* layer to be initialized.
*
* A Boolean Device Tree "modem-init" property determines whether GSI
* initialization will be performed by the AP (Trust Zone) or the modem.
* If the AP does GSI initialization, the setup phase is entered after
* this has completed successfully. Otherwise the modem initializes
* the GSI layer and signals it has finished by sending an SMP2P interrupt
* to the AP; this triggers the start if IPA setup.
*/
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
struct ipa *ipa;
int ret;
ipa_validate_build();
/* Get configuration data early; needed for power initialization */
data = of_device_get_match_data(dev);
if (!data) {
dev_err(dev, "matched hardware not supported\n");
return -ENODEV;
}
if (!ipa_version_supported(data->version)) {
dev_err(dev, "unsupported IPA version %u\n", data->version);
return -EINVAL;
}
if (!data->modem_route_count) {
dev_err(dev, "modem_route_count cannot be zero\n");
return -EINVAL;
}
loader = ipa_firmware_loader(dev);
if (loader == IPA_LOADER_INVALID)
return -EINVAL;
if (loader == IPA_LOADER_DEFER)
return -EPROBE_DEFER;
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
power = ipa_power_init(dev, data->power_data);
if (IS_ERR(power))
return PTR_ERR(power);
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) {
ret = -ENOMEM;
goto err_power_exit;
}
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
ipa->power = power;
ipa->version = data->version;
ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
if (ret)
goto err_kfree_ipa;
ret = ipa_mem_init(ipa, data->mem_data);
if (ret)
goto err_reg_exit;
ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
data->endpoint_data);
if (ret)
goto err_mem_exit;
/* Result is a non-zero mask of endpoints that support filtering */
ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
if (ret)
goto err_gsi_exit;
ret = ipa_table_init(ipa);
if (ret)
goto err_endpoint_exit;
ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
/* Power needs to be active for config and setup */
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto err_power_put;
ret = ipa_config(ipa, data);
if (ret)
goto err_power_put;
dev_info(dev, "IPA driver initialized");
/* If the modem is loading GSI firmware, it will trigger a call to
* ipa_setup() when it has finished. In that case we're done here.
*/
if (loader == IPA_LOADER_MODEM)
goto done;
if (loader == IPA_LOADER_SELF) {
/* The AP is loading GSI firmware; do so now */
ret = ipa_firmware_load(dev);
if (ret)
goto err_deconfig;
} /* Otherwise loader == IPA_LOADER_SKIP */
/* GSI firmware is loaded; proceed to setup */
ret = ipa_setup(ipa);
if (ret)
goto err_deconfig;
done:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return 0;
err_deconfig:
ipa_deconfig(ipa);
err_power_put:
pm_runtime_put_noidle(dev);
ipa_smp2p_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
err_endpoint_exit:
ipa_endpoint_exit(ipa);
err_gsi_exit:
gsi_exit(&ipa->gsi);
err_mem_exit:
ipa_mem_exit(ipa);
err_reg_exit:
ipa_reg_exit(ipa);
err_kfree_ipa:
kfree(ipa);
err_power_exit:
ipa_power_exit(power);
return ret;
}
static int ipa_remove(struct platform_device *pdev)
{
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
struct ipa_power *power = ipa->power;
struct device *dev = &pdev->dev;
int ret;
/* Prevent the modem from triggering a call to ipa_setup(). This
* also ensures a modem-initiated setup that's underway completes.
*/
ipa_smp2p_irq_disable_setup(ipa);
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
/* If starting or stopping is in progress, try once more */
if (ret == -EBUSY) {
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
ret = ipa_modem_stop(ipa);
}
if (ret)
return ret;
ipa_teardown(ipa);
}
ipa_deconfig(ipa);
out_power_put:
pm_runtime_put_noidle(dev);
ipa_smp2p_exit(ipa);
ipa_table_exit(ipa);
ipa_endpoint_exit(ipa);
gsi_exit(&ipa->gsi);
ipa_mem_exit(ipa);
ipa_reg_exit(ipa);
kfree(ipa);
ipa_power_exit(power);
dev_info(dev, "IPA driver removed");
return 0;
}
static void ipa_shutdown(struct platform_device *pdev)
{
int ret;
ret = ipa_remove(pdev);
if (ret)
dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
}
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,
&ipa_endpoint_id_attribute_group,
&ipa_modem_attribute_group,
NULL,
};
static struct platform_driver ipa_driver = {
.probe = ipa_probe,
.remove = ipa_remove,
.shutdown = ipa_shutdown,
.driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
.of_match_table = ipa_match,
.dev_groups = ipa_attribute_groups,
},
};
module_platform_driver(ipa_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");
| linux-master | drivers/net/ipa/ipa_main.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/io.h>
#include "ipa.h"
#include "ipa_reg.h"
/* Is this register ID valid for the current IPA version? */
static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
{
enum ipa_version version = ipa->version;
switch (reg_id) {
case FILT_ROUT_HASH_EN:
return version == IPA_VERSION_4_2;
case FILT_ROUT_HASH_FLUSH:
return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2;
case FILT_ROUT_CACHE_FLUSH:
case ENDP_FILTER_CACHE_CFG:
case ENDP_ROUTER_CACHE_CFG:
return version >= IPA_VERSION_5_0;
case IPA_BCR:
case COUNTER_CFG:
return version < IPA_VERSION_4_5;
case IPA_TX_CFG:
case FLAVOR_0:
case IDLE_INDICATION_CFG:
return version >= IPA_VERSION_3_5;
case QTIME_TIMESTAMP_CFG:
case TIMERS_XO_CLK_DIV_CFG:
case TIMERS_PULSE_GRAN_CFG:
return version >= IPA_VERSION_4_5;
case SRC_RSRC_GRP_45_RSRC_TYPE:
case DST_RSRC_GRP_45_RSRC_TYPE:
return version <= IPA_VERSION_3_1 ||
version == IPA_VERSION_4_5 ||
version == IPA_VERSION_5_0;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
return version <= IPA_VERSION_3_1 ||
version == IPA_VERSION_5_0;
case ENDP_FILTER_ROUTER_HSH_CFG:
return version < IPA_VERSION_5_0 &&
version != IPA_VERSION_4_2;
case IRQ_SUSPEND_EN:
case IRQ_SUSPEND_CLR:
return version >= IPA_VERSION_3_1;
case COMP_CFG:
case CLKON_CFG:
case ROUTE:
case SHARED_MEM_SIZE:
case QSB_MAX_WRITES:
case QSB_MAX_READS:
case STATE_AGGR_ACTIVE:
case LOCAL_PKT_PROC_CNTXT:
case AGGR_FORCE_CLOSE:
case SRC_RSRC_GRP_01_RSRC_TYPE:
case SRC_RSRC_GRP_23_RSRC_TYPE:
case DST_RSRC_GRP_01_RSRC_TYPE:
case DST_RSRC_GRP_23_RSRC_TYPE:
case ENDP_INIT_CTRL:
case ENDP_INIT_CFG:
case ENDP_INIT_NAT:
case ENDP_INIT_HDR:
case ENDP_INIT_HDR_EXT:
case ENDP_INIT_HDR_METADATA_MASK:
case ENDP_INIT_MODE:
case ENDP_INIT_AGGR:
case ENDP_INIT_HOL_BLOCK_EN:
case ENDP_INIT_HOL_BLOCK_TIMER:
case ENDP_INIT_DEAGGR:
case ENDP_INIT_RSRC_GRP:
case ENDP_INIT_SEQ:
case ENDP_STATUS:
case IPA_IRQ_STTS:
case IPA_IRQ_EN:
case IPA_IRQ_CLR:
case IPA_IRQ_UC:
case IRQ_SUSPEND_INFO:
return true; /* These should be defined for all versions */
default:
return false;
}
}
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
{
if (WARN(!ipa_reg_id_valid(ipa, reg_id), "invalid reg %u\n", reg_id))
return NULL;
return reg(ipa->regs, reg_id);
}
static const struct regs *ipa_regs(enum ipa_version version)
{
switch (version) {
case IPA_VERSION_3_1:
return &ipa_regs_v3_1;
case IPA_VERSION_3_5_1:
return &ipa_regs_v3_5_1;
case IPA_VERSION_4_2:
return &ipa_regs_v4_2;
case IPA_VERSION_4_5:
return &ipa_regs_v4_5;
case IPA_VERSION_4_7:
return &ipa_regs_v4_7;
case IPA_VERSION_4_9:
return &ipa_regs_v4_9;
case IPA_VERSION_4_11:
return &ipa_regs_v4_11;
case IPA_VERSION_5_0:
return &ipa_regs_v5_0;
default:
return NULL;
}
}
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct regs *regs;
struct resource *res;
regs = ipa_regs(ipa->version);
if (!regs)
return -EINVAL;
if (WARN_ON(regs->reg_count > IPA_REG_ID_COUNT))
return -EINVAL;
/* Setup IPA register memory */
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-reg");
if (!res) {
dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
return -ENODEV;
}
ipa->reg_virt = ioremap(res->start, resource_size(res));
if (!ipa->reg_virt) {
dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
return -ENOMEM;
}
ipa->regs = regs;
return 0;
}
void ipa_reg_exit(struct ipa *ipa)
{
iounmap(ipa->reg_virt);
}
| linux-master | drivers/net/ipa/ipa_reg.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
#include <linux/dma-direction.h>
#include "gsi.h"
#include "gsi_private.h"
#include "gsi_trans.h"
#include "ipa_gsi.h"
#include "ipa_data.h"
#include "ipa_cmd.h"
/**
* DOC: GSI Transactions
*
* A GSI transaction abstracts the behavior of a GSI channel by representing
* everything about a related group of IPA operations in a single structure.
* (A "operation" in this sense is either a data transfer or an IPA immediate
* command.) Most details of interaction with the GSI hardware are managed
* by the GSI transaction core, allowing users to simply describe operations
* to be performed. When a transaction has completed a callback function
* (dependent on the type of endpoint associated with the channel) allows
* cleanup of resources associated with the transaction.
*
* To perform an operation (or set of them), a user of the GSI transaction
* interface allocates a transaction, indicating the number of TREs required
* (one per operation). If sufficient TREs are available, they are reserved
* for use in the transaction and the allocation succeeds. This way
* exhaustion of the available TREs in a channel ring is detected as early
* as possible. Any other resources that might be needed to complete a
* transaction are also allocated when the transaction is allocated.
*
* Operations performed as part of a transaction are represented in an array
* of Linux scatterlist structures, allocated with the transaction. These
* scatterlist structures are initialized by "adding" operations to the
* transaction. If a buffer in an operation must be mapped for DMA, this is
* done at the time it is added to the transaction. It is possible for a
* mapping error to occur when an operation is added. In this case the
* transaction should simply be freed; this correctly releases resources
* associated with the transaction.
*
* Once all operations have been successfully added to a transaction, the
* transaction is committed. Committing transfers ownership of the entire
* transaction to the GSI transaction core. The GSI transaction code
* formats the content of the scatterlist array into the channel ring
* buffer and informs the hardware that new TREs are available to process.
*
* The last TRE in each transaction is marked to interrupt the AP when the
* GSI hardware has completed it. Because transfers described by TREs are
* performed strictly in order, signaling the completion of just the last
* TRE in the transaction is sufficient to indicate the full transaction
* is complete.
*
* When a transaction is complete, ipa_gsi_trans_complete() is called by the
* GSI code into the IPA layer, allowing it to perform any final cleanup
* required before the transaction is freed.
*/
/* Hardware values representing a transfer element type */
enum gsi_tre_type {
GSI_RE_XFER = 0x2,
GSI_RE_IMMD_CMD = 0x3,
};
/* An entry in a channel ring */
struct gsi_tre {
__le64 addr; /* DMA address */
__le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
__le16 reserved;
__le32 flags; /* TRE_FLAGS_* */
};
/* gsi_tre->flags mask values (in CPU byte order) */
#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
u32 max_alloc)
{
size_t alloc_size;
void *virt;
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
/* By allocating a few extra entries in our pool (one less
* than the maximum number that will be requested in a
* single allocation), we can always satisfy requests without
* ever worrying about straddling the end of the pool array.
* If there aren't enough entries starting at the free index,
* we just allocate free entries from the beginning of the pool.
*/
alloc_size = size_mul(count + max_alloc - 1, size);
alloc_size = kmalloc_size_roundup(alloc_size);
virt = kzalloc(alloc_size, GFP_KERNEL);
if (!virt)
return -ENOMEM;
pool->base = virt;
/* If the allocator gave us any extra memory, use it */
pool->count = alloc_size / size;
pool->free = 0;
pool->max_alloc = max_alloc;
pool->size = size;
pool->addr = 0; /* Only used for DMA pools */
return 0;
}
void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
{
kfree(pool->base);
memset(pool, 0, sizeof(*pool));
}
/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
* allocations will succeed. The immediate commands in a transaction can
* require up to max_alloc elements from the pool. But we only allow
* allocation of a single element from a DMA pool at a time.
*/
int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
size_t size, u32 count, u32 max_alloc)
{
size_t total_size;
dma_addr_t addr;
void *virt;
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
/* Don't let allocations cross a power-of-two boundary */
size = __roundup_pow_of_two(size);
total_size = (count + max_alloc - 1) * size;
/* The allocator will give us a power-of-2 number of pages
* sufficient to satisfy our request. Round up our requested
* size to avoid any unused space in the allocation. This way
* gsi_trans_pool_exit_dma() can assume the total allocated
* size is exactly (count * size).
*/
total_size = PAGE_SIZE << get_order(total_size);
virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
if (!virt)
return -ENOMEM;
pool->base = virt;
pool->count = total_size / size;
pool->free = 0;
pool->size = size;
pool->max_alloc = max_alloc;
pool->addr = addr;
return 0;
}
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
{
size_t total_size = pool->count * pool->size;
dma_free_coherent(dev, total_size, pool->base, pool->addr);
memset(pool, 0, sizeof(*pool));
}
/* Return the byte offset of the next free entry in the pool */
static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
{
u32 offset;
WARN_ON(!count);
WARN_ON(count > pool->max_alloc);
/* Allocate from beginning if wrap would occur */
if (count > pool->count - pool->free)
pool->free = 0;
offset = pool->free * pool->size;
pool->free += count;
memset(pool->base + offset, 0, count * pool->size);
return offset;
}
/* Allocate a contiguous block of zeroed entries from a pool */
void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
{
return pool->base + gsi_trans_pool_alloc_common(pool, count);
}
/* Allocate a single zeroed entry from a DMA pool */
void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
{
u32 offset = gsi_trans_pool_alloc_common(pool, 1);
*addr = pool->addr + offset;
return pool->base + offset;
}
/* Map a TRE ring entry index to the transaction it is associated with */
static void gsi_trans_map(struct gsi_trans *trans, u32 index)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
/* The completion event will indicate the last TRE used */
index += trans->used_count - 1;
/* Note: index *must* be used modulo the ring count here */
channel->trans_info.map[index % channel->tre_ring.count] = trans;
}
/* Return the transaction mapped to a given ring entry */
struct gsi_trans *
gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
{
/* Note: index *must* be used modulo the ring count here */
return channel->trans_info.map[index % channel->tre_ring.count];
}
/* Return the oldest completed transaction for a channel (or null) */
struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
u16 trans_id = trans_info->completed_id;
if (trans_id == trans_info->pending_id) {
gsi_channel_update(channel);
if (trans_id == trans_info->pending_id)
return NULL;
}
return &trans_info->trans[trans_id %= channel->tre_count];
}
/* Move a transaction from allocated to committed state */
static void gsi_trans_move_committed(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
/* This allocated transaction is now committed */
trans_info->allocated_id++;
}
/* Move committed transactions to pending state */
static void gsi_trans_move_pending(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
u16 trans_index = trans - &trans_info->trans[0];
u16 delta;
/* These committed transactions are now pending */
delta = trans_index - trans_info->committed_id + 1;
trans_info->committed_id += delta % channel->tre_count;
}
/* Move pending transactions to completed state */
void gsi_trans_move_complete(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
u16 trans_index = trans - trans_info->trans;
u16 delta;
/* These pending transactions are now completed */
delta = trans_index - trans_info->pending_id + 1;
delta %= channel->tre_count;
trans_info->pending_id += delta;
}
/* Move a transaction from completed to polled state */
void gsi_trans_move_polled(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
/* This completed transaction is now polled */
trans_info->completed_id++;
}
/* Reserve some number of TREs on a channel. Returns true if successful */
static bool
gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
{
int avail = atomic_read(&trans_info->tre_avail);
int new;
do {
new = avail - (int)tre_count;
if (unlikely(new < 0))
return false;
} while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
return true;
}
/* Release previously-reserved TRE entries to a channel */
static void
gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
{
atomic_add(tre_count, &trans_info->tre_avail);
}
/* Return true if no transactions are allocated, false otherwise */
bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
{
u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
struct gsi_trans_info *trans_info;
trans_info = &gsi->channel[channel_id].trans_info;
return atomic_read(&trans_info->tre_avail) == tre_max;
}
/* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count,
enum dma_data_direction direction)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
u16 trans_index;
if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
/* If we can't reserve the TREs for the transaction, we're done */
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
trans_index = trans_info->free_id % channel->tre_count;
trans = &trans_info->trans[trans_index];
memset(trans, 0, sizeof(*trans));
/* Initialize non-zero fields in the transaction */
trans->gsi = gsi;
trans->channel_id = channel_id;
trans->rsvd_count = tre_count;
init_completion(&trans->completion);
/* Allocate the scatterlist */
trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
sg_init_marker(trans->sgl, tre_count);
trans->direction = direction;
refcount_set(&trans->refcount, 1);
/* This free transaction is now allocated */
trans_info->free_id++;
return trans;
}
/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
struct gsi_trans_info *trans_info;
if (!refcount_dec_and_test(&trans->refcount))
return;
/* Unused transactions are allocated but never committed, pending,
* completed, or polled.
*/
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
if (!trans->used_count) {
trans_info->allocated_id++;
trans_info->committed_id++;
trans_info->pending_id++;
trans_info->completed_id++;
} else {
ipa_gsi_trans_release(trans);
}
/* This transaction is now free */
trans_info->polled_id++;
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
*/
gsi_trans_tre_release(trans_info, trans->rsvd_count);
}
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
u32 which = trans->used_count++;
struct scatterlist *sg;
WARN_ON(which >= trans->rsvd_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
* using dma_alloc_coherent(). We therefore do *not* map them
* for DMA (unlike what we do for pages and skbs).
*
* When a transaction completes, the SGL is normally unmapped.
* A command transaction has direction DMA_NONE, which tells
* gsi_trans_complete() to skip the unmapping step.
*
* The only things we use directly in a command scatter/gather
* entry are the DMA address and length. We still need the SG
* table flags to be maintained though, so assign a NULL page
* pointer for that purpose.
*/
sg = &trans->sgl[which];
sg_assign_page(sg, NULL);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
trans->cmd_opcode[which] = opcode;
}
/* Add a page transfer to a transaction. It will fill the only TRE. */
int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
u32 offset)
{
struct scatterlist *sg = &trans->sgl[0];
int ret;
if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
if (WARN_ON(trans->used_count))
return -EINVAL;
sg_set_page(sg, page, size, offset);
ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
if (!ret)
return -ENOMEM;
trans->used_count++; /* Transaction now owns the (DMA mapped) page */
return 0;
}
/* Add an SKB transfer to a transaction. No other TREs will be used. */
int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
{
struct scatterlist *sg = &trans->sgl[0];
u32 used_count;
int ret;
if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
if (WARN_ON(trans->used_count))
return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (ret < 0)
return ret;
used_count = ret;
ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
if (!ret)
return -ENOMEM;
/* Transaction now owns the (DMA mapped) skb */
trans->used_count += used_count;
return 0;
}
/* Compute the length/opcode value to use for a TRE */
static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
{
return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
: cpu_to_le16((u16)opcode);
}
/* Compute the flags value to use for a given TRE */
static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
{
enum gsi_tre_type tre_type;
u32 tre_flags;
tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
/* Last TRE contains interrupt flags */
if (last_tre) {
/* All transactions end in a transfer completion interrupt */
tre_flags |= TRE_FLAGS_IEOT_FMASK;
/* Don't interrupt when outbound commands are acknowledged */
if (bei)
tre_flags |= TRE_FLAGS_BEI_FMASK;
} else { /* All others indicate there's more to come */
tre_flags |= TRE_FLAGS_CHAIN_FMASK;
}
return cpu_to_le32(tre_flags);
}
static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
u32 len, bool last_tre, bool bei,
enum ipa_cmd_opcode opcode)
{
struct gsi_tre tre;
tre.addr = cpu_to_le64(addr);
tre.len_opcode = gsi_tre_len_opcode(opcode, len);
tre.reserved = 0;
tre.flags = gsi_tre_flags(last_tre, bei, opcode);
/* ARM64 can write 16 bytes as a unit with a single instruction.
* Doing the assignment this way is an attempt to make that happen.
*/
*dest_tre = tre;
}
/**
* __gsi_trans_commit() - Common GSI transaction commit code
* @trans: Transaction to commit
* @ring_db: Whether to tell the hardware about these queued transfers
*
* Formats channel ring TRE entries based on the content of the scatterlist.
* Maps a transaction pointer to the last ring entry used for the transaction,
* so it can be recovered when it completes. Moves the transaction to
* pending state. Finally, updates the channel ring pointer and optionally
* rings the doorbell.
*/
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_ring *tre_ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
struct gsi_tre *dest_tre;
struct scatterlist *sg;
u32 byte_count = 0;
u8 *cmd_opcode;
u32 avail;
u32 i;
WARN_ON(!trans->used_count);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
* If there is no info array we're doing a simple data
* transfer request, whose opcode is IPA_CMD_NONE.
*/
cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
avail = tre_ring->count - tre_ring->index % tre_ring->count;
dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
for_each_sg(trans->sgl, sg, trans->used_count, i) {
bool last_tre = i == trans->used_count - 1;
dma_addr_t addr = sg_dma_address(sg);
u32 len = sg_dma_len(sg);
byte_count += len;
if (!avail--)
dest_tre = gsi_ring_virt(tre_ring, 0);
if (cmd_opcode)
opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
}
/* Associate the TRE with the transaction */
gsi_trans_map(trans, tre_ring->index);
tre_ring->index += trans->used_count;
trans->len = byte_count;
if (channel->toward_ipa)
gsi_trans_tx_committed(trans);
gsi_trans_move_committed(trans);
/* Ring doorbell if requested, or if all TREs are allocated */
if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
/* Report what we're handing off to hardware for TX channels */
if (channel->toward_ipa)
gsi_trans_tx_queued(trans);
gsi_trans_move_pending(trans);
gsi_channel_doorbell(channel);
}
}
/* Commit a GSI transaction */
void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
if (trans->used_count)
__gsi_trans_commit(trans, ring_db);
else
gsi_trans_free(trans);
}
/* Commit a GSI transaction and wait for it to complete */
void gsi_trans_commit_wait(struct gsi_trans *trans)
{
if (!trans->used_count)
goto out_trans_free;
refcount_inc(&trans->refcount);
__gsi_trans_commit(trans, true);
wait_for_completion(&trans->completion);
out_trans_free:
gsi_trans_free(trans);
}
/* Process the completion of a transaction; called while polling */
void gsi_trans_complete(struct gsi_trans *trans)
{
/* If the entire SGL was mapped when added, unmap it now */
if (trans->direction != DMA_NONE)
dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
trans->direction);
ipa_gsi_trans_complete(trans);
complete(&trans->completion);
gsi_trans_free(trans);
}
/* Cancel a channel's pending transactions */
void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
u16 trans_id = trans_info->pending_id;
/* channel->gsi->mutex is held by caller */
/* If there are no pending transactions, we're done */
if (trans_id == trans_info->committed_id)
return;
/* Mark all pending transactions cancelled */
do {
struct gsi_trans *trans;
trans = &trans_info->trans[trans_id % channel->tre_count];
trans->cancelled = true;
} while (++trans_id != trans_info->committed_id);
/* All pending transactions are now completed */
trans_info->pending_id = trans_info->committed_id;
/* Schedule NAPI polling to complete the cancelled transactions */
napi_schedule(&channel->napi);
}
/* Issue a command to read a single byte from a channel */
int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
struct gsi_ring *tre_ring = &channel->tre_ring;
struct gsi_trans_info *trans_info;
struct gsi_tre *dest_tre;
trans_info = &channel->trans_info;
/* First reserve the TRE, if possible */
if (!gsi_trans_tre_reserve(trans_info, 1))
return -EBUSY;
/* Now fill the reserved TRE and tell the hardware */
dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
tre_ring->index++;
gsi_channel_doorbell(channel);
return 0;
}
/* Mark a gsi_trans_read_byte() request done */
void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
gsi_trans_tre_release(&channel->trans_info, 1);
}
/* Initialize a channel's GSI transaction info */
int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 tre_count = channel->tre_count;
struct gsi_trans_info *trans_info;
u32 tre_max;
int ret;
/* Ensure the size of a channel element is what's expected */
BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
trans_info = &channel->trans_info;
/* The tre_avail field is what ultimately limits the number of
* outstanding transactions and their resources. A transaction
* allocation succeeds only if the TREs available are sufficient
* for what the transaction might need.
*/
tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
atomic_set(&trans_info->tre_avail, tre_max);
/* We can't use more TREs than the number available in the ring.
* This limits the number of transactions that can be outstanding.
* Worst case is one TRE per transaction (but we actually limit
* it to something a little less than that). By allocating a
* power-of-two number of transactions we can use an index
* modulo that number to determine the next one that's free.
* Transactions are allocated one at a time.
*/
trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
GFP_KERNEL);
if (!trans_info->trans)
return -ENOMEM;
trans_info->free_id = 0; /* all modulo channel->tre_count */
trans_info->allocated_id = 0;
trans_info->committed_id = 0;
trans_info->pending_id = 0;
trans_info->completed_id = 0;
trans_info->polled_id = 0;
/* A completion event contains a pointer to the TRE that caused
* the event (which will be the last one used by the transaction).
* Each entry in this map records the transaction associated
* with a corresponding completed TRE.
*/
trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
GFP_KERNEL);
if (!trans_info->map) {
ret = -ENOMEM;
goto err_trans_free;
}
/* A transaction uses a scatterlist array to represent the data
* transfers implemented by the transaction. Each scatterlist
* element is used to fill a single TRE when the transaction is
* committed. So we need as many scatterlist elements as the
* maximum number of TREs that can be outstanding.
*/
ret = gsi_trans_pool_init(&trans_info->sg_pool,
sizeof(struct scatterlist),
tre_max, channel->trans_tre_max);
if (ret)
goto err_map_free;
return 0;
err_map_free:
kfree(trans_info->map);
err_trans_free:
kfree(trans_info->trans);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
return ret;
}
/* Inverse of gsi_channel_trans_init() */
void gsi_channel_trans_exit(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
gsi_trans_pool_exit(&trans_info->sg_pool);
kfree(trans_info->trans);
kfree(trans_info->map);
}
| linux-master | drivers/net/ipa/gsi_trans.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
/* DOC: IPA Interrupts
*
* The IPA has an interrupt line distinct from the interrupt used by the GSI
* code. Whereas GSI interrupts are generally related to channel events (like
* transfer completions), IPA interrupts are related to other events related
* to the IPA. Some of the IPA interrupts come from a microcontroller
* embedded in the IPA. Each IPA interrupt type can be both masked and
* acknowledged independent of the others.
*
* Two of the IPA interrupts are initiated by the microcontroller. A third
* can be generated to signal the need for a wakeup/resume when an IPA
* endpoint has been suspended. There are other IPA events, but at this
* time only these three are supported.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include "ipa.h"
#include "ipa_reg.h"
#include "ipa_endpoint.h"
#include "ipa_power.h"
#include "ipa_uc.h"
#include "ipa_interrupt.h"
/**
* struct ipa_interrupt - IPA interrupt information
* @ipa: IPA pointer
* @irq: Linux IRQ number used for IPA interrupts
* @enabled: Mask indicating which interrupts are enabled
*/
struct ipa_interrupt {
struct ipa *ipa;
u32 irq;
u32 enabled;
};
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
struct ipa *ipa = interrupt->ipa;
const struct reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
reg = ipa_reg(ipa, IPA_IRQ_CLR);
offset = reg_offset(reg);
switch (irq_id) {
case IPA_IRQ_UC_0:
case IPA_IRQ_UC_1:
/* For microcontroller interrupts, clear the interrupt right
* away, "to avoid clearing unhandled interrupts."
*/
iowrite32(mask, ipa->reg_virt + offset);
ipa_uc_interrupt_handler(ipa, irq_id);
break;
case IPA_IRQ_TX_SUSPEND:
/* Clearing the SUSPEND_TX interrupt also clears the
* register that tells us which suspended endpoint(s)
* caused the interrupt, so defer clearing until after
* the handler has been called.
*/
ipa_power_suspend_handler(ipa, irq_id);
fallthrough;
default: /* Silently ignore (and clear) any other condition */
iowrite32(mask, ipa->reg_virt + offset);
break;
}
}
/* IPA IRQ handler is threaded */
static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
{
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
const struct reg *reg;
struct device *dev;
u32 pending;
u32 offset;
u32 mask;
int ret;
dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
/* The status register indicates which conditions are present,
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
reg = ipa_reg(ipa, IPA_IRQ_STTS);
offset = reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
u32 irq_id = __ffs(mask);
mask ^= BIT(irq_id);
ipa_interrupt_process(interrupt, irq_id);
} while (mask);
pending = ioread32(ipa->reg_virt + offset);
}
/* If any disabled interrupts are pending, clear them */
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
reg = ipa_reg(ipa, IPA_IRQ_CLR);
iowrite32(pending, ipa->reg_virt + reg_offset(reg));
}
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return IRQ_HANDLED;
}
static void ipa_interrupt_enabled_update(struct ipa *ipa)
{
const struct reg *reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(ipa->interrupt->enabled, ipa->reg_virt + reg_offset(reg));
}
/* Enable an IPA interrupt type */
void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
{
/* Update the IPA interrupt mask to enable it */
ipa->interrupt->enabled |= BIT(ipa_irq);
ipa_interrupt_enabled_update(ipa);
}
/* Disable an IPA interrupt type */
void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
{
/* Update the IPA interrupt mask to disable it */
ipa->interrupt->enabled &= ~BIT(ipa_irq);
ipa_interrupt_enabled_update(ipa);
}
void ipa_interrupt_irq_disable(struct ipa *ipa)
{
disable_irq(ipa->interrupt->irq);
}
void ipa_interrupt_irq_enable(struct ipa *ipa)
{
enable_irq(ipa->interrupt->irq);
}
/* Common function used to enable/disable TX_SUSPEND for an endpoint */
static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 endpoint_id, bool enable)
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
const struct reg *reg;
u32 offset;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0)
return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
offset = reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
else
val &= ~mask;
iowrite32(val, ipa->reg_virt + offset);
}
/* Enable TX_SUSPEND for an endpoint */
void
ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id)
{
ipa_interrupt_suspend_control(interrupt, endpoint_id, true);
}
/* Disable TX_SUSPEND for an endpoint */
void
ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
{
ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
}
/* Clear the suspend interrupt for all endpoints that signaled it */
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
u32 unit_count;
u32 unit;
unit_count = roundup(ipa->endpoint_count, 32);
for (unit = 0; unit < unit_count; unit++) {
const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
continue;
reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
}
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
{
ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
}
/* Configure the IPA interrupt framework */
struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
const struct reg *reg;
unsigned int irq;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
if (ret <= 0) {
dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
ret);
return ERR_PTR(ret ? : -EINVAL);
}
irq = ret;
interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
if (!interrupt)
return ERR_PTR(-ENOMEM);
interrupt->ipa = ipa;
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
if (ret) {
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
goto err_kfree;
}
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
goto err_free_irq;
}
return interrupt;
err_free_irq:
free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
return ERR_PTR(ret);
}
/* Inverse of ipa_interrupt_config() */
void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
{
struct device *dev = &interrupt->ipa->pdev->dev;
dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
| linux-master | drivers/net/ipa/ipa_interrupt.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/pm_runtime.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include "ipa_smp2p.h"
#include "ipa.h"
#include "ipa_uc.h"
/**
* DOC: IPA SMP2P communication with the modem
*
* SMP2P is a primitive communication mechanism available between the AP and
* the modem. The IPA driver uses this for two purposes: to enable the modem
* to state that the GSI hardware is ready to use; and to communicate the
* state of IPA power in the event of a crash.
*
* GSI needs to have early initialization completed before it can be used.
* This initialization is done either by Trust Zone or by the modem. In the
* latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver
* when the GSI is ready to use.
*
* The modem is also able to inquire about the current state of IPA
* power by trigging another SMP2P interrupt to the AP. We communicate
* whether power is enabled using two SMP2P state bits--one to indicate
* the power state (on or off), and a second to indicate the power state
* bit is valid. The modem will poll the valid bit until it is set, and
* at that time records whether the AP has IPA power enabled.
*
* Finally, if the AP kernel panics, we update the SMP2P state bits even if
* we never receive an interrupt from the modem requesting this.
*/
/**
* struct ipa_smp2p - IPA SMP2P information
* @ipa: IPA pointer
* @valid_state: SMEM state indicating enabled state is valid
* @enabled_state: SMEM state to indicate power is enabled
* @valid_bit: Valid bit in 32-bit SMEM state mask
* @enabled_bit: Enabled bit in 32-bit SMEM state mask
* @enabled_bit: Enabled bit in 32-bit SMEM state mask
* @clock_query_irq: IPA interrupt triggered by modem for power query
* @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
* @power_on: Whether IPA power is on
* @notified: Whether modem has been notified of power state
* @setup_disabled: Whether setup ready interrupt handler is disabled
* @mutex: Mutex protecting ready-interrupt/shutdown interlock
* @panic_notifier: Panic notifier structure
*/
struct ipa_smp2p {
struct ipa *ipa;
struct qcom_smem_state *valid_state;
struct qcom_smem_state *enabled_state;
u32 valid_bit;
u32 enabled_bit;
u32 clock_query_irq;
u32 setup_ready_irq;
bool power_on;
bool notified;
bool setup_disabled;
struct mutex mutex;
struct notifier_block panic_notifier;
};
/**
* ipa_smp2p_notify() - use SMP2P to tell modem about IPA power state
* @smp2p: SMP2P information
*
* This is called either when the modem has requested it (by triggering
* the modem power query IPA interrupt) or whenever the AP is shutting down
* (via a panic notifier). It sets the two SMP2P state bits--one saying
* whether the IPA power is on, and the other indicating the first bit
* is valid.
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
dev = &smp2p->ipa->pdev->dev;
smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
/* Signal whether the IPA power is enabled */
mask = BIT(smp2p->enabled_bit);
value = smp2p->power_on ? mask : 0;
qcom_smem_state_update_bits(smp2p->enabled_state, mask, value);
/* Now indicate that the enabled flag is valid */
mask = BIT(smp2p->valid_bit);
value = mask;
qcom_smem_state_update_bits(smp2p->valid_state, mask, value);
smp2p->notified = true;
}
/* Threaded IRQ handler for modem "ipa-clock-query" SMP2P interrupt */
static irqreturn_t ipa_smp2p_modem_clk_query_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
ipa_smp2p_notify(smp2p);
return IRQ_HANDLED;
}
static int ipa_smp2p_panic_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct ipa_smp2p *smp2p;
smp2p = container_of(nb, struct ipa_smp2p, panic_notifier);
ipa_smp2p_notify(smp2p);
if (smp2p->power_on)
ipa_uc_panic_notifier(smp2p->ipa);
return NOTIFY_DONE;
}
static int ipa_smp2p_panic_notifier_register(struct ipa_smp2p *smp2p)
{
/* IPA panic handler needs to run before modem shuts down */
smp2p->panic_notifier.notifier_call = ipa_smp2p_panic_notifier;
smp2p->panic_notifier.priority = INT_MAX; /* Do it early */
return atomic_notifier_chain_register(&panic_notifier_list,
&smp2p->panic_notifier);
}
static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&smp2p->panic_notifier);
}
/* Threaded IRQ handler for modem "ipa-setup-ready" SMP2P interrupt */
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
struct device *dev;
int ret;
/* Ignore any (spurious) interrupts received after the first */
if (smp2p->ipa->setup_complete)
return IRQ_HANDLED;
/* Power needs to be active for setup */
dev = &smp2p->ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power for setup\n", ret);
goto out_power_put;
}
/* An error here won't cause driver shutdown, so warn if one occurs */
ret = ipa_setup(smp2p->ipa);
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return IRQ_HANDLED;
}
/* Initialize SMP2P interrupts */
static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
irq_handler_t handler)
{
struct device *dev = &smp2p->ipa->pdev->dev;
unsigned int irq;
int ret;
ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
if (ret <= 0)
return ret ? : -EINVAL;
irq = ret;
ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
if (ret) {
dev_err(dev, "error %d requesting \"%s\" IRQ\n", ret, name);
return ret;
}
return irq;
}
static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
{
free_irq(irq, smp2p);
}
/* Drop the power reference if it was taken in ipa_smp2p_notify() */
static void ipa_smp2p_power_release(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
if (!ipa->smp2p->power_on)
return;
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->smp2p->power_on = false;
}
/* Initialize the IPA SMP2P subsystem */
int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
{
struct qcom_smem_state *enabled_state;
struct device *dev = &ipa->pdev->dev;
struct qcom_smem_state *valid_state;
struct ipa_smp2p *smp2p;
u32 enabled_bit;
u32 valid_bit;
int ret;
valid_state = qcom_smem_state_get(dev, "ipa-clock-enabled-valid",
&valid_bit);
if (IS_ERR(valid_state))
return PTR_ERR(valid_state);
if (valid_bit >= 32) /* BITS_PER_U32 */
return -EINVAL;
enabled_state = qcom_smem_state_get(dev, "ipa-clock-enabled",
&enabled_bit);
if (IS_ERR(enabled_state))
return PTR_ERR(enabled_state);
if (enabled_bit >= 32) /* BITS_PER_U32 */
return -EINVAL;
smp2p = kzalloc(sizeof(*smp2p), GFP_KERNEL);
if (!smp2p)
return -ENOMEM;
smp2p->ipa = ipa;
/* These fields are needed by the power query interrupt
* handler, so initialize them now.
*/
mutex_init(&smp2p->mutex);
smp2p->valid_state = valid_state;
smp2p->valid_bit = valid_bit;
smp2p->enabled_state = enabled_state;
smp2p->enabled_bit = enabled_bit;
/* We have enough information saved to handle notifications */
ipa->smp2p = smp2p;
ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
ipa_smp2p_modem_clk_query_isr);
if (ret < 0)
goto err_null_smp2p;
smp2p->clock_query_irq = ret;
ret = ipa_smp2p_panic_notifier_register(smp2p);
if (ret)
goto err_irq_exit;
if (modem_init) {
/* Result will be non-zero (negative for error) */
ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
ipa_smp2p_modem_setup_ready_isr);
if (ret < 0)
goto err_notifier_unregister;
smp2p->setup_ready_irq = ret;
}
return 0;
err_notifier_unregister:
ipa_smp2p_panic_notifier_unregister(smp2p);
err_irq_exit:
ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
err_null_smp2p:
ipa->smp2p = NULL;
mutex_destroy(&smp2p->mutex);
kfree(smp2p);
return ret;
}
void ipa_smp2p_exit(struct ipa *ipa)
{
struct ipa_smp2p *smp2p = ipa->smp2p;
if (smp2p->setup_ready_irq)
ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq);
ipa_smp2p_panic_notifier_unregister(smp2p);
ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
/* We won't get notified any more; drop power reference (if any) */
ipa_smp2p_power_release(ipa);
ipa->smp2p = NULL;
mutex_destroy(&smp2p->mutex);
kfree(smp2p);
}
void ipa_smp2p_irq_disable_setup(struct ipa *ipa)
{
struct ipa_smp2p *smp2p = ipa->smp2p;
if (!smp2p->setup_ready_irq)
return;
mutex_lock(&smp2p->mutex);
if (!smp2p->setup_disabled) {
disable_irq(smp2p->setup_ready_irq);
smp2p->setup_disabled = true;
}
mutex_unlock(&smp2p->mutex);
}
/* Reset state tracking whether we have notified the modem */
void ipa_smp2p_notify_reset(struct ipa *ipa)
{
struct ipa_smp2p *smp2p = ipa->smp2p;
u32 mask;
if (!smp2p->notified)
return;
ipa_smp2p_power_release(ipa);
/* Reset the power enabled valid flag */
mask = BIT(smp2p->valid_bit);
qcom_smem_state_update_bits(smp2p->valid_state, mask, 0);
/* Mark the power disabled for good measure... */
mask = BIT(smp2p->enabled_bit);
qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0);
smp2p->notified = false;
}
| linux-master | drivers/net/ipa/ipa_smp2p.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include "gsi.h"
#include "reg.h"
#include "gsi_reg.h"
#include "gsi_private.h"
#include "gsi_trans.h"
#include "ipa_gsi.h"
#include "ipa_data.h"
#include "ipa_version.h"
/**
* DOC: The IPA Generic Software Interface
*
* The generic software interface (GSI) is an integral component of the IPA,
* providing a well-defined communication layer between the AP subsystem
* and the IPA core. The modem uses the GSI layer as well.
*
* -------- ---------
* | | | |
* | AP +<---. .----+ Modem |
* | +--. | | .->+ |
* | | | | | | | |
* -------- | | | | ---------
* v | v |
* --+-+---+-+--
* | GSI |
* |-----------|
* | |
* | IPA |
* | |
* -------------
*
* In the above diagram, the AP and Modem represent "execution environments"
* (EEs), which are independent operating environments that use the IPA for
* data transfer.
*
* Each EE uses a set of unidirectional GSI "channels," which allow transfer
* of data to or from the IPA. A channel is implemented as a ring buffer,
* with a DRAM-resident array of "transfer elements" (TREs) available to
* describe transfers to or from other EEs through the IPA. A transfer
* element can also contain an immediate command, requesting the IPA perform
* actions other than data transfer.
*
* Each TRE refers to a block of data--also located in DRAM. After writing
* one or more TREs to a channel, the writer (either the IPA or an EE) writes
* a doorbell register to inform the receiving side how many elements have
* been written.
*
* Each channel has a GSI "event ring" associated with it. An event ring
* is implemented very much like a channel ring, but is always directed from
* the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
* events by adding an entry to the event ring associated with the channel.
* The GSI then writes its doorbell for the event ring, causing the target
* EE to be interrupted. Each entry in an event ring contains a pointer
* to the channel TRE whose completion the event represents.
*
* Each TRE in a channel ring has a set of flags. One flag indicates whether
* the completion of the transfer operation generates an entry (and possibly
* an interrupt) in the channel's event ring. Other flags allow transfer
* elements to be chained together, forming a single logical transaction.
* TRE flags are used to control whether and when interrupts are generated
* to signal completion of channel transfers.
*
* Elements in channel and event rings are completed (or consumed) strictly
* in order. Completion of one entry implies the completion of all preceding
* entries. A single completion interrupt can therefore communicate the
* completion of many transfers.
*
* Note that all GSI registers are little-endian, which is the assumed
* endianness of I/O space accesses. The accessor functions perform byte
* swapping if needed (i.e., for a big endian CPU).
*/
/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
#define GSI_CMD_TIMEOUT 50 /* milliseconds */
#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
/* An entry in an event ring */
struct gsi_event {
__le64 xfer_ptr;
__le16 len;
u8 reserved1;
u8 code;
__le16 reserved2;
u8 type;
u8 chid;
};
/** gsi_channel_scratch_gpi - GPI protocol scratch register
* @max_outstanding_tre:
* Defines the maximum number of TREs allowed in a single transaction
* on a channel (in bytes). This determines the amount of prefetch
* performed by the hardware. We configure this to equal the size of
* the TLV FIFO for the channel.
* @outstanding_threshold:
* Defines the threshold (in bytes) determining when the sequencer
* should update the channel doorbell. We configure this to equal
* the size of two TREs.
*/
struct gsi_channel_scratch_gpi {
u64 reserved1;
u16 reserved2;
u16 max_outstanding_tre;
u16 reserved3;
u16 outstanding_threshold;
};
/** gsi_channel_scratch - channel scratch configuration area
*
* The exact interpretation of this register is protocol-specific.
* We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
*/
union gsi_channel_scratch {
struct gsi_channel_scratch_gpi gpi;
struct {
u32 word1;
u32 word2;
u32 word3;
u32 word4;
} data;
};
/* Check things that can be validated at build time. */
static void gsi_validate_build(void)
{
/* This is used as a divisor */
BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
/* Code assumes the size of channel and event ring element are
* the same (and fixed). Make sure the size of an event ring
* element is what's expected.
*/
BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
/* Hardware requires a 2^n ring size. We ensure the number of
* elements in an event ring is a power of 2 elsewhere; this
* ensure the elements themselves meet the requirement.
*/
BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
}
/* Return the channel id associated with a given channel */
static u32 gsi_channel_id(struct gsi_channel *channel)
{
return channel - &channel->gsi->channel[0];
}
/* An initialized channel has a non-null GSI pointer */
static bool gsi_channel_initialized(struct gsi_channel *channel)
{
return !!channel->gsi;
}
/* Encode the channel protocol for the CH_C_CNTXT_0 register */
static u32 ch_c_cntxt_0_type_encode(enum ipa_version version,
const struct reg *reg,
enum gsi_channel_type type)
{
u32 val;
val = reg_encode(reg, CHTYPE_PROTOCOL, type);
if (version < IPA_VERSION_4_5 || version >= IPA_VERSION_5_0)
return val;
type >>= hweight32(reg_fmask(reg, CHTYPE_PROTOCOL));
return val | reg_encode(reg, CHTYPE_PROTOCOL_MSB, type);
}
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK);
gsi->type_enabled_bitmap = val;
iowrite32(val, gsi->virt + reg_offset(reg));
}
static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
}
static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
}
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
* ring being operated on has this interrupt enabled.
*/
static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val = BIT(evt_ring_id);
const struct reg *reg;
/* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them.
*/
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
iowrite32(~0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
}
/* Disable event ring control interrupts */
static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
{
const struct reg *reg;
gsi_irq_type_disable(gsi, GSI_EV_CTRL);
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
}
/* Channel commands are performed one at a time. Their completion is
* signaled by the channel control GSI interrupt type, which is only
* enabled when we issue a channel command. Only the channel being
* operated on has this interrupt enabled.
*/
static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
{
u32 val = BIT(channel_id);
const struct reg *reg;
/* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them.
*/
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
iowrite32(~0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
}
/* Disable channel control interrupts */
static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
{
const struct reg *reg;
gsi_irq_type_disable(gsi, GSI_CH_CTRL);
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
}
static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
const struct reg *reg;
u32 val;
gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + reg_offset(reg));
/* Enable the interrupt type if this is the first channel enabled */
if (enable_ieob)
gsi_irq_type_enable(gsi, GSI_IEOB);
}
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
const struct reg *reg;
u32 val;
gsi->ieob_enabled_bitmap &= ~event_mask;
/* Disable the interrupt type if this was the last enabled channel */
if (!gsi->ieob_enabled_bitmap)
gsi_irq_type_disable(gsi, GSI_IEOB);
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + reg_offset(reg));
}
static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
{
gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
}
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
const struct reg *reg;
u32 val;
/* Global interrupts include hardware error reports. Enable
* that so we can at least report the error should it occur.
*/
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
/* General GSI interrupts are reported to all EEs; if they occur
* they are unrecoverable (without reset). A breakpoint interrupt
* also exists, but we don't support that. We want to be notified
* of errors so we can report them, even if they can't be handled.
*/
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
val = BUS_ERROR;
val |= CMD_FIFO_OVRFLOW;
val |= MCS_STACK_OVRFLOW;
iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
}
/* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi)
{
const struct reg *reg;
gsi_irq_type_update(gsi, 0);
/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
}
/* Return the virtual address associated with a ring index */
void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
{
/* Note: index *must* be used modulo the ring count here */
return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
}
/* Return the 32-bit DMA address associated with a ring index */
static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
{
return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
}
/* Return the ring index of a 32-bit ring offset */
static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
{
return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
}
/* Issue a GSI command by writing a value to a register, then wait for
* completion to be signaled. Returns true if the command completes
* or false if it times out.
*/
static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
{
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
struct completion *completion = &gsi->completion;
reinit_completion(completion);
iowrite32(val, gsi->virt + reg);
return !!wait_for_completion_timeout(completion, timeout);
}
/* Return the hardware's notion of the current state of an event ring */
static enum gsi_evt_ring_state
gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
{
const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
u32 val;
val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id));
return reg_decode(reg, EV_CHSTATE, val);
}
/* Issue an event ring command and wait for it to complete */
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
struct device *dev = gsi->dev;
const struct reg *reg;
bool timeout;
u32 val;
/* Enable the completion interrupt for the command */
gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
reg = gsi_reg(gsi, EV_CH_CMD);
val = reg_encode(reg, EV_CHID, evt_ring_id);
val |= reg_encode(reg, EV_OPCODE, opcode);
timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ev_ctrl_disable(gsi);
if (!timeout)
return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
enum gsi_evt_ring_state state;
/* Get initial event ring state */
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
evt_ring_id, state);
return -EINVAL;
}
gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
/* If successful the event ring state will have changed */
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state == GSI_EVT_RING_STATE_ALLOCATED)
return 0;
dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
evt_ring_id, state);
return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
enum gsi_evt_ring_state state;
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
evt_ring_id, state);
return;
}
gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
/* If successful the event ring state will have changed */
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state == GSI_EVT_RING_STATE_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
evt_ring_id, state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
enum gsi_evt_ring_state state;
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
evt_ring_id, state);
return;
}
gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
/* If successful the event ring state will have changed */
state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
evt_ring_id, state);
}
/* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0);
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
void __iomem *virt = gsi->virt;
u32 val;
reg = gsi_reg(gsi, CH_C_CNTXT_0);
val = ioread32(virt + reg_n_offset(reg, channel_id));
return reg_decode(reg, CHSTATE, val);
}
/* Issue a channel command and wait for it to complete */
static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
const struct reg *reg;
bool timeout;
u32 val;
/* Enable the completion interrupt for the command */
gsi_irq_ch_ctrl_enable(gsi, channel_id);
reg = gsi_reg(gsi, CH_CMD);
val = reg_encode(reg, CH_CHID, channel_id);
val |= reg_encode(reg, CH_OPCODE, opcode);
timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ch_ctrl_disable(gsi);
if (!timeout)
return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
}
/* Allocate GSI channel in NOT_ALLOCATED state */
static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
/* Get initial channel state */
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
dev_err(dev, "channel %u bad state %u before alloc\n",
channel_id, state);
return -EINVAL;
}
gsi_channel_command(channel, GSI_CH_ALLOCATE);
/* If successful the channel state will have changed */
state = gsi_channel_state(channel);
if (state == GSI_CHANNEL_STATE_ALLOCATED)
return 0;
dev_err(dev, "channel %u bad state %u after alloc\n",
channel_id, state);
return -EIO;
}
/* Start an ALLOCATED channel */
static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
state != GSI_CHANNEL_STATE_STOPPED) {
dev_err(dev, "channel %u bad state %u before start\n",
gsi_channel_id(channel), state);
return -EINVAL;
}
gsi_channel_command(channel, GSI_CH_START);
/* If successful the channel state will have changed */
state = gsi_channel_state(channel);
if (state == GSI_CHANNEL_STATE_STARTED)
return 0;
dev_err(dev, "channel %u bad state %u after start\n",
gsi_channel_id(channel), state);
return -EIO;
}
/* Stop a GSI channel in STARTED state */
static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
state = gsi_channel_state(channel);
/* Channel could have entered STOPPED state since last call
* if it timed out. If so, we're done.
*/
if (state == GSI_CHANNEL_STATE_STOPPED)
return 0;
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
dev_err(dev, "channel %u bad state %u before stop\n",
gsi_channel_id(channel), state);
return -EINVAL;
}
gsi_channel_command(channel, GSI_CH_STOP);
/* If successful the channel state will have changed */
state = gsi_channel_state(channel);
if (state == GSI_CHANNEL_STATE_STOPPED)
return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
dev_err(dev, "channel %u bad state %u after stop\n",
gsi_channel_id(channel), state);
return -EIO;
}
/* Reset a GSI channel in ALLOCATED or ERROR state. */
static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
/* A short delay is required before a RESET command */
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
state != GSI_CHANNEL_STATE_ERROR) {
/* No need to reset a channel already in ALLOCATED state */
if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u before reset\n",
gsi_channel_id(channel), state);
return;
}
gsi_channel_command(channel, GSI_CH_RESET);
/* If successful the channel state will have changed */
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
/* Deallocate an ALLOCATED GSI channel */
static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
dev_err(dev, "channel %u bad state %u before dealloc\n",
channel_id, state);
return;
}
gsi_channel_command(channel, GSI_CH_DE_ALLOC);
/* If successful the channel state will have changed */
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
* The index argument (modulo the ring count) is the first unfilled entry, so
* we supply one less than that with the doorbell. Update the event ring
* index field with the value provided.
*/
static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0);
struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
u32 val;
ring->index = index; /* Next unused entry */
/* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(ring, (index - 1) % ring->count);
iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
}
/* Program an event ring for use */
static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
const struct reg *reg;
u32 val;
reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
/* We program all event rings as GPI type/protocol */
val = reg_encode(reg, EV_CHTYPE, GSI_CHANNEL_TYPE_GPI);
/* EV_EE field is 0 (GSI_EE_AP) */
val |= reg_bit(reg, EV_INTYPE);
val |= reg_encode(reg, EV_ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_1);
val = reg_encode(reg, R_LENGTH, ring->count * GSI_RING_ELEMENT_SIZE);
iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the event ring,
* respectively.
*/
reg = gsi_reg(gsi, EV_CH_E_CNTXT_2);
val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_3);
val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
reg = gsi_reg(gsi, EV_CH_E_CNTXT_8);
val = reg_encode(reg, EV_MODT, GSI_EVT_RING_INT_MODT);
val |= reg_encode(reg, EV_MODC, 1); /* comes from channel */
/* EV_MOD_CNT is 0 (no counter-based interrupt coalescing) */
iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* No MSI write data, and MSI high and low address is 0 */
reg = gsi_reg(gsi, EV_CH_E_CNTXT_9);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_10);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_11);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* We don't need to get event read pointer updates */
reg = gsi_reg(gsi, EV_CH_E_CNTXT_12);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_13);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Finally, tell the hardware our "last processed" event (arbitrary) */
gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
}
/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
u32 pending_id = trans_info->pending_id;
struct gsi_trans *trans;
u16 trans_id;
if (channel->toward_ipa && pending_id != trans_info->free_id) {
/* There is a small chance a TX transaction got allocated
* just before we disabled transmits, so check for that.
* The last allocated, committed, or pending transaction
* precedes the first free transaction.
*/
trans_id = trans_info->free_id - 1;
} else if (trans_info->polled_id != pending_id) {
/* Otherwise (TX or RX) we want to wait for anything that
* has completed, or has been polled but not released yet.
*
* The last completed or polled transaction precedes the
* first pending transaction.
*/
trans_id = pending_id - 1;
} else {
return NULL;
}
/* Caller will wait for this, so take a reference */
trans = &trans_info->trans[trans_id % channel->tre_count];
refcount_inc(&trans->refcount);
return trans;
}
/* Wait for transaction activity on a channel to complete */
static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
{
struct gsi_trans *trans;
/* Get the last transaction, and wait for it to complete */
trans = gsi_channel_trans_last(channel);
if (trans) {
wait_for_completion(&trans->completion);
gsi_trans_free(trans);
}
}
/* Program a channel for use; there is no gsi_channel_deprogram() */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
u32 channel_id = gsi_channel_id(channel);
union gsi_channel_scratch scr = { };
struct gsi_channel_scratch_gpi *gpi;
struct gsi *gsi = channel->gsi;
const struct reg *reg;
u32 wrr_weight = 0;
u32 offset;
u32 val;
reg = gsi_reg(gsi, CH_C_CNTXT_0);
/* We program all channels as GPI type/protocol */
val = ch_c_cntxt_0_type_encode(gsi->version, reg, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
val |= reg_bit(reg, CHTYPE_DIR);
if (gsi->version < IPA_VERSION_5_0)
val |= reg_encode(reg, ERINDEX, channel->evt_ring_id);
val |= reg_encode(reg, ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_CNTXT_1);
val = reg_encode(reg, CH_R_LENGTH, size);
if (gsi->version >= IPA_VERSION_5_0)
val |= reg_encode(reg, CH_ERINDEX, channel->evt_ring_id);
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the channel ring,
* respectively.
*/
reg = gsi_reg(gsi, CH_C_CNTXT_2);
val = lower_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_CNTXT_3);
val = upper_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_QOS);
/* Command channel gets low weighted round-robin priority */
if (channel->command)
wrr_weight = reg_field_max(reg, WRR_WEIGHT);
val = reg_encode(reg, WRR_WEIGHT, wrr_weight);
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
/* No need to use the doorbell engine starting at IPA v4.0 */
if (gsi->version < IPA_VERSION_4_0 && doorbell)
val |= reg_bit(reg, USE_DB_ENG);
/* v4.0 introduces an escape buffer for prefetch. We use it
* on all but the AP command channel.
*/
if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
/* If not otherwise set, prefetch buffers are used */
if (gsi->version < IPA_VERSION_4_5)
val |= reg_bit(reg, USE_ESCAPE_BUF_ONLY);
else
val |= reg_encode(reg, PREFETCH_MODE, ESCAPE_BUF_ONLY);
}
/* All channels set DB_IN_BYTES */
if (gsi->version >= IPA_VERSION_4_9)
val |= reg_bit(reg, DB_IN_BYTES);
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
gpi->max_outstanding_tre = channel->trans_tre_max *
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
reg = gsi_reg(gsi, CH_C_SCRATCH_0);
val = scr.data.word1;
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_SCRATCH_1);
val = scr.data.word2;
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_SCRATCH_2);
val = scr.data.word3;
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* We must preserve the upper 16 bits of the last scratch register.
* The next sequence assumes those bits remain unchanged between the
* read and the write.
*/
reg = gsi_reg(gsi, CH_C_SCRATCH_3);
offset = reg_n_offset(reg, channel_id);
val = ioread32(gsi->virt + offset);
val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
iowrite32(val, gsi->virt + offset);
/* All done! */
}
static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
{
struct gsi *gsi = channel->gsi;
int ret;
/* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
if (resume && gsi->version < IPA_VERSION_4_0)
return 0;
mutex_lock(&gsi->mutex);
ret = gsi_channel_start_command(channel);
mutex_unlock(&gsi->mutex);
return ret;
}
/* Start an allocated GSI channel */
int gsi_channel_start(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
/* Enable NAPI and the completion interrupt */
napi_enable(&channel->napi);
gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
ret = __gsi_channel_start(channel, false);
if (ret) {
gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
napi_disable(&channel->napi);
}
return ret;
}
static int gsi_channel_stop_retry(struct gsi_channel *channel)
{
u32 retries = GSI_CHANNEL_STOP_RETRIES;
int ret;
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
break;
usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
return ret;
}
static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
{
struct gsi *gsi = channel->gsi;
int ret;
/* Wait for any underway transactions to complete before stopping. */
gsi_channel_trans_quiesce(channel);
/* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
if (suspend && gsi->version < IPA_VERSION_4_0)
return 0;
mutex_lock(&gsi->mutex);
ret = gsi_channel_stop_retry(channel);
mutex_unlock(&gsi->mutex);
return ret;
}
/* Stop a started channel */
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
ret = __gsi_channel_stop(channel, false);
if (ret)
return ret;
/* Disable the completion interrupt and NAPI if successful */
gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
napi_disable(&channel->napi);
return 0;
}
/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
mutex_lock(&gsi->mutex);
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
/* Hardware assumes this is 0 following reset */
channel->tre_ring.index = 0;
gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
}
/* Stop a started channel for suspend */
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
ret = __gsi_channel_stop(channel, true);
if (ret)
return ret;
/* Ensure NAPI polling has finished. */
napi_synchronize(&channel->napi);
return 0;
}
/* Resume a suspended channel (starting if stopped) */
int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
return __gsi_channel_start(channel, true);
}
/* Prevent all GSI interrupts while suspended */
void gsi_suspend(struct gsi *gsi)
{
disable_irq(gsi->irq);
}
/* Allow all GSI interrupts again when resuming */
void gsi_resume(struct gsi *gsi)
{
enable_irq(gsi->irq);
}
void gsi_trans_tx_committed(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
channel->trans_count++;
channel->byte_count += trans->len;
trans->trans_count = channel->trans_count;
trans->byte_count = channel->byte_count;
}
void gsi_trans_tx_queued(struct gsi_trans *trans)
{
u32 channel_id = trans->channel_id;
struct gsi *gsi = trans->gsi;
struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
channel = &gsi->channel[channel_id];
byte_count = channel->byte_count - channel->queued_byte_count;
trans_count = channel->trans_count - channel->queued_trans_count;
channel->queued_byte_count = channel->byte_count;
channel->queued_trans_count = channel->trans_count;
ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
}
/**
* gsi_trans_tx_completed() - Report completed TX transactions
* @trans: TX channel transaction that has completed
*
* Report that a transaction on a TX channel has completed. At the time a
* transaction is committed, we record *in the transaction* its channel's
* committed transaction and byte counts. Transactions are completed in
* order, and the difference between the channel's byte/transaction count
* when the transaction was committed and when it completes tells us
* exactly how much data has been transferred while the transaction was
* pending.
*
* We report this information to the network stack, which uses it to manage
* the rate at which data is sent to hardware.
*/
static void gsi_trans_tx_completed(struct gsi_trans *trans)
{
u32 channel_id = trans->channel_id;
struct gsi *gsi = trans->gsi;
struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
channel = &gsi->channel[channel_id];
trans_count = trans->trans_count - channel->compl_trans_count;
byte_count = trans->byte_count - channel->compl_byte_count;
channel->compl_trans_count += trans_count;
channel->compl_byte_count += byte_count;
ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
}
/* Channel control interrupt handler */
static void gsi_isr_chan_ctrl(struct gsi *gsi)
{
const struct reg *reg;
u32 channel_mask;
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ);
channel_mask = ioread32(gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
iowrite32(channel_mask, gsi->virt + reg_offset(reg));
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
channel_mask ^= BIT(channel_id);
complete(&gsi->completion);
}
}
/* Event ring control interrupt handler */
static void gsi_isr_evt_ctrl(struct gsi *gsi)
{
const struct reg *reg;
u32 event_mask;
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ);
event_mask = ioread32(gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
event_mask ^= BIT(evt_ring_id);
complete(&gsi->completion);
}
}
/* Global channel error interrupt handler */
static void
gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
complete(&gsi->completion);
return;
}
/* Report, but otherwise ignore all other error codes */
dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
channel_id, err_ee, code);
}
/* Global event error interrupt handler */
static void
gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
{
if (code == GSI_OUT_OF_RESOURCES) {
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
complete(&gsi->completion);
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
channel_id);
return;
}
/* Report, but otherwise ignore all other error codes */
dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
evt_ring_id, err_ee, code);
}
/* Global error interrupt handler */
static void gsi_isr_glob_err(struct gsi *gsi)
{
const struct reg *log_reg;
const struct reg *clr_reg;
enum gsi_err_type type;
enum gsi_err_code code;
u32 offset;
u32 which;
u32 val;
u32 ee;
/* Get the logged error, then reinitialize the log */
log_reg = gsi_reg(gsi, ERROR_LOG);
offset = reg_offset(log_reg);
val = ioread32(gsi->virt + offset);
iowrite32(0, gsi->virt + offset);
clr_reg = gsi_reg(gsi, ERROR_LOG_CLR);
iowrite32(~0, gsi->virt + reg_offset(clr_reg));
/* Parse the error value */
ee = reg_decode(log_reg, ERR_EE, val);
type = reg_decode(log_reg, ERR_TYPE, val);
which = reg_decode(log_reg, ERR_VIRT_IDX, val);
code = reg_decode(log_reg, ERR_CODE, val);
if (type == GSI_ERR_TYPE_CHAN)
gsi_isr_glob_chan_err(gsi, ee, which, code);
else if (type == GSI_ERR_TYPE_EVT)
gsi_isr_glob_evt_err(gsi, ee, which, code);
else /* type GSI_ERR_TYPE_GLOB should be fatal */
dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
}
/* Generic EE interrupt handler */
static void gsi_isr_gp_int1(struct gsi *gsi)
{
const struct reg *reg;
u32 result;
u32 val;
/* This interrupt is used to handle completions of GENERIC GSI
* commands. We use these to allocate and halt channels on the
* modem's behalf due to a hardware quirk on IPA v4.2. The modem
* "owns" channels even when the AP allocates them, and have no
* way of knowing whether a modem channel's state has been changed.
*
* We also use GENERIC commands to enable/disable channel flow
* control for IPA v4.2+.
*
* It is recommended that we halt the modem channels we allocated
* when shutting down, but it's possible the channel isn't running
* at the time we issue the HALT command. We'll get an error in
* that case, but it's harmless (the channel is already halted).
* Similarly, we could get an error back when updating flow control
* on a channel because it's not in the proper state.
*
* In either case, we silently ignore a INCORRECT_CHANNEL_STATE
* error if we receive it.
*/
reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
val = ioread32(gsi->virt + reg_offset(reg));
result = reg_decode(reg, GENERIC_EE_RESULT, val);
switch (result) {
case GENERIC_EE_SUCCESS:
case GENERIC_EE_INCORRECT_CHANNEL_STATE:
gsi->result = 0;
break;
case GENERIC_EE_RETRY:
gsi->result = -EAGAIN;
break;
default:
dev_err(gsi->dev, "global INT1 generic result %u\n", result);
gsi->result = -EIO;
break;
}
complete(&gsi->completion);
}
/* Inter-EE interrupt handler */
static void gsi_isr_glob_ee(struct gsi *gsi)
{
const struct reg *reg;
u32 val;
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS);
val = ioread32(gsi->virt + reg_offset(reg));
if (val & ERROR_INT)
gsi_isr_glob_err(gsi);
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR);
iowrite32(val, gsi->virt + reg_offset(reg));
val &= ~ERROR_INT;
if (val & GP_INT1) {
val ^= GP_INT1;
gsi_isr_gp_int1(gsi);
}
if (val)
dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
}
/* I/O completion interrupt event */
static void gsi_isr_ieob(struct gsi *gsi)
{
const struct reg *reg;
u32 event_mask;
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ);
event_mask = ioread32(gsi->virt + reg_offset(reg));
gsi_irq_ieob_disable(gsi, event_mask);
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR);
iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
event_mask ^= BIT(evt_ring_id);
napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
}
}
/* General event interrupts represent serious problems, so report them */
static void gsi_isr_general(struct gsi *gsi)
{
struct device *dev = gsi->dev;
const struct reg *reg;
u32 val;
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS);
val = ioread32(gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR);
iowrite32(val, gsi->virt + reg_offset(reg));
dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
}
/**
* gsi_isr() - Top level GSI interrupt service routine
* @irq: Interrupt number (ignored)
* @dev_id: GSI pointer supplied to request_irq()
*
* This is the main handler function registered for the GSI IRQ. Each type
* of interrupt has a separate handler function that is called from here.
*/
static irqreturn_t gsi_isr(int irq, void *dev_id)
{
struct gsi *gsi = dev_id;
const struct reg *reg;
u32 intr_mask;
u32 cnt = 0;
u32 offset;
reg = gsi_reg(gsi, CNTXT_TYPE_IRQ);
offset = reg_offset(reg);
/* enum gsi_irq_type_id defines GSI interrupt types */
while ((intr_mask = ioread32(gsi->virt + offset))) {
/* intr_mask contains bitmask of pending GSI interrupts */
do {
u32 gsi_intr = BIT(__ffs(intr_mask));
intr_mask ^= gsi_intr;
/* Note: the IRQ condition for each type is cleared
* when the type-specific register is updated.
*/
switch (gsi_intr) {
case GSI_CH_CTRL:
gsi_isr_chan_ctrl(gsi);
break;
case GSI_EV_CTRL:
gsi_isr_evt_ctrl(gsi);
break;
case GSI_GLOB_EE:
gsi_isr_glob_ee(gsi);
break;
case GSI_IEOB:
gsi_isr_ieob(gsi);
break;
case GSI_GENERAL:
gsi_isr_general(gsi);
break;
default:
dev_err(gsi->dev,
"unrecognized interrupt type 0x%08x\n",
gsi_intr);
break;
}
} while (intr_mask);
if (++cnt > GSI_ISR_MAX_ITER) {
dev_err(gsi->dev, "interrupt flood\n");
break;
}
}
return IRQ_HANDLED;
}
/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
{
int ret;
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0)
return ret ? : -EINVAL;
gsi->irq = ret;
return 0;
}
/* Return the transaction associated with a transfer completion event */
static struct gsi_trans *
gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
{
u32 channel_id = event->chid;
struct gsi_channel *channel;
struct gsi_trans *trans;
u32 tre_offset;
u32 tre_index;
channel = &gsi->channel[channel_id];
if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
return NULL;
/* Event xfer_ptr records the TRE it's associated with */
tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
trans = gsi_channel_trans_mapped(channel, tre_index);
if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
return NULL;
return trans;
}
/**
* gsi_evt_ring_update() - Update transaction state from hardware
* @gsi: GSI pointer
* @evt_ring_id: Event ring ID
* @index: Event index in ring reported by hardware
*
* Events for RX channels contain the actual number of bytes received into
* the buffer. Every event has a transaction associated with it, and here
* we update transactions to record their actual received lengths.
*
* When an event for a TX channel arrives we use information in the
* transaction to report the number of requests and bytes that have
* been transferred.
*
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
* the first entry in need of processing. The index provided is the
* first *unfilled* event in the ring (following the last filled one).
*
* Events are sequential within the event ring, and transactions are
* sequential within the transaction array.
*
* Note that @index always refers to an element *within* the event ring.
*/
static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
struct gsi_event *event_done;
struct gsi_event *event;
u32 event_avail;
u32 old_index;
/* Starting with the oldest un-processed event, determine which
* transaction (and which channel) is associated with the event.
* For RX channels, update each completed transaction with the
* number of bytes that were actually received. For TX channels
* associated with a network device, report to the network stack
* the number of transfers and bytes this completion represents.
*/
old_index = ring->index;
event = gsi_ring_virt(ring, old_index);
/* Compute the number of events to process before we wrap,
* and determine when we'll be done processing events.
*/
event_avail = ring->count - old_index % ring->count;
event_done = gsi_ring_virt(ring, index);
do {
struct gsi_trans *trans;
trans = gsi_event_trans(gsi, event);
if (!trans)
return;
if (trans->direction == DMA_FROM_DEVICE)
trans->len = __le16_to_cpu(event->len);
else
gsi_trans_tx_completed(trans);
gsi_trans_move_complete(trans);
/* Move on to the next event and transaction */
if (--event_avail)
event++;
else
event = gsi_ring_virt(ring, 0);
} while (event != event_done);
/* Tell the hardware we've handled these events */
gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
}
/* Initialize a ring, including allocating DMA memory for its entries */
static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
{
u32 size = count * GSI_RING_ELEMENT_SIZE;
struct device *dev = gsi->dev;
dma_addr_t addr;
/* Hardware requires a 2^n ring size, with alignment equal to size.
* The DMA address returned by dma_alloc_coherent() is guaranteed to
* be a power-of-2 number of pages, which satisfies the requirement.
*/
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (!ring->virt)
return -ENOMEM;
ring->addr = addr;
ring->count = count;
ring->index = 0;
return 0;
}
/* Free a previously-allocated ring */
static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
{
size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
}
/* Allocate an available event ring id */
static int gsi_evt_ring_id_alloc(struct gsi *gsi)
{
u32 evt_ring_id;
if (gsi->event_bitmap == ~0U) {
dev_err(gsi->dev, "event rings exhausted\n");
return -ENOSPC;
}
evt_ring_id = ffz(gsi->event_bitmap);
gsi->event_bitmap |= BIT(evt_ring_id);
return (int)evt_ring_id;
}
/* Free a previously-allocated event ring id */
static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
{
gsi->event_bitmap &= ~BIT(evt_ring_id);
}
/* Ring a channel doorbell, reporting the first un-filled entry */
void gsi_channel_doorbell(struct gsi_channel *channel)
{
struct gsi_ring *tre_ring = &channel->tre_ring;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
const struct reg *reg;
u32 val;
reg = gsi_reg(gsi, CH_C_DOORBELL_0);
/* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
}
/* Consult hardware, move newly completed transactions to completed state */
void gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
struct gsi_evt_ring *evt_ring;
struct gsi_trans *trans;
struct gsi_ring *ring;
const struct reg *reg;
u32 offset;
u32 index;
evt_ring = &gsi->evt_ring[evt_ring_id];
ring = &evt_ring->ring;
/* See if there's anything new to process; if not, we're done. Note
* that index always refers to an entry *within* the event ring.
*/
reg = gsi_reg(gsi, EV_CH_E_CNTXT_4);
offset = reg_n_offset(reg, evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
return;
/* Get the transaction for the latest completed event. */
trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
if (!trans)
return;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
* the number of transactions and bytes this completion represents
* up the network stack.
*/
gsi_evt_ring_update(gsi, evt_ring_id, index);
}
/**
* gsi_channel_poll_one() - Return a single completed transaction on a channel
* @channel: Channel to be polled
*
* Return: Transaction pointer, or null if none are available
*
* This function returns the first of a channel's completed transactions.
* If no transactions are in completed state, the hardware is consulted to
* determine whether any new transactions have completed. If so, they're
* moved to completed state and the first such transaction is returned.
* If there are no more completed transactions, a null pointer is returned.
*/
static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
{
struct gsi_trans *trans;
/* Get the first completed transaction */
trans = gsi_channel_trans_complete(channel);
if (trans)
gsi_trans_move_polled(trans);
return trans;
}
/**
* gsi_channel_poll() - NAPI poll function for a channel
* @napi: NAPI structure for the channel
* @budget: Budget supplied by NAPI core
*
* Return: Number of items polled (<= budget)
*
* Single transactions completed by hardware are polled until either
* the budget is exhausted, or there are no more. Each transaction
* polled is passed to gsi_trans_complete(), to perform remaining
* completion processing and retire/free the transaction.
*/
static int gsi_channel_poll(struct napi_struct *napi, int budget)
{
struct gsi_channel *channel;
int count;
channel = container_of(napi, struct gsi_channel, napi);
for (count = 0; count < budget; count++) {
struct gsi_trans *trans;
trans = gsi_channel_poll_one(channel);
if (!trans)
break;
gsi_trans_complete(trans);
}
if (count < budget && napi_complete(napi))
gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
return count;
}
/* The event bitmap represents which event ids are available for allocation.
* Set bits are not available, clear bits can be used. This function
* initializes the map so all events supported by the hardware are available,
* then precludes any reserved events from being allocated.
*/
static u32 gsi_event_bitmap_init(u32 evt_ring_max)
{
u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
return event_bitmap;
}
/* Setup function for a single channel */
static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
int ret;
if (!gsi_channel_initialized(channel))
return 0;
ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
if (ret)
return ret;
gsi_evt_ring_program(gsi, evt_ring_id);
ret = gsi_channel_alloc_command(gsi, channel_id);
if (ret)
goto err_evt_ring_de_alloc;
gsi_channel_program(channel, true);
if (channel->toward_ipa)
netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
gsi_channel_poll);
else
netif_napi_add(&gsi->dummy_dev, &channel->napi,
gsi_channel_poll);
return 0;
err_evt_ring_de_alloc:
/* We've done nothing with the event ring yet so don't reset */
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
return ret;
}
/* Inverse of gsi_channel_setup_one() */
static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
if (!gsi_channel_initialized(channel))
return;
netif_napi_del(&channel->napi);
gsi_channel_de_alloc_command(gsi, channel_id);
gsi_evt_ring_reset_command(gsi, evt_ring_id);
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
}
/* We use generic commands only to operate on modem channels. We don't have
* the ability to determine channel state for a modem channel, so we simply
* issue the command and wait for it to complete.
*/
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode,
u8 params)
{
const struct reg *reg;
bool timeout;
u32 offset;
u32 val;
/* The error global interrupt type is always enabled (until we tear
* down), so we will keep it enabled.
*
* A generic EE command completes with a GSI global interrupt of
* type GP_INT1. We only perform one generic command at a time
* (to allocate, halt, or enable/disable flow control on a modem
* channel), and only from this function. So we enable the GP_INT1
* IRQ type here, and disable it again after the command completes.
*/
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
val = ERROR_INT | GP_INT1;
iowrite32(val, gsi->virt + reg_offset(reg));
/* First zero the result code field */
reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
offset = reg_offset(reg);
val = ioread32(gsi->virt + offset);
val &= ~reg_fmask(reg, GENERIC_EE_RESULT);
iowrite32(val, gsi->virt + offset);
/* Now issue the command */
reg = gsi_reg(gsi, GENERIC_CMD);
val = reg_encode(reg, GENERIC_OPCODE, opcode);
val |= reg_encode(reg, GENERIC_CHID, channel_id);
val |= reg_encode(reg, GENERIC_EE, GSI_EE_MODEM);
if (gsi->version >= IPA_VERSION_4_11)
val |= reg_encode(reg, GENERIC_PARAMS, params);
timeout = !gsi_command(gsi, reg_offset(reg), val);
/* Disable the GP_INT1 IRQ type again */
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
if (!timeout)
return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
opcode, channel_id);
return -ETIMEDOUT;
}
static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
{
return gsi_generic_command(gsi, channel_id,
GSI_GENERIC_ALLOCATE_CHANNEL, 0);
}
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
{
u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
int ret;
do
ret = gsi_generic_command(gsi, channel_id,
GSI_GENERIC_HALT_CHANNEL, 0);
while (ret == -EAGAIN && retries--);
if (ret)
dev_err(gsi->dev, "error %d halting modem channel %u\n",
ret, channel_id);
}
/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
void
gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
{
u32 retries = 0;
u32 command;
int ret;
command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
: GSI_GENERIC_DISABLE_FLOW_CONTROL;
/* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
* is underway. In this case we need to retry the command.
*/
if (!enable && gsi->version >= IPA_VERSION_4_11)
retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
do
ret = gsi_generic_command(gsi, channel_id, command, 0);
while (ret == -EAGAIN && retries--);
if (ret)
dev_err(gsi->dev,
"error %d %sabling mode channel %u flow control\n",
ret, enable ? "en" : "dis", channel_id);
}
/* Setup function for channels */
static int gsi_channel_setup(struct gsi *gsi)
{
u32 channel_id = 0;
u32 mask;
int ret;
gsi_irq_enable(gsi);
mutex_lock(&gsi->mutex);
do {
ret = gsi_channel_setup_one(gsi, channel_id);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
/* Make sure no channels were defined that hardware does not support */
while (channel_id < GSI_CHANNEL_COUNT_MAX) {
struct gsi_channel *channel = &gsi->channel[channel_id++];
if (!gsi_channel_initialized(channel))
continue;
ret = -EINVAL;
dev_err(gsi->dev, "channel %u not supported by hardware\n",
channel_id - 1);
channel_id = gsi->channel_count;
goto err_unwind;
}
/* Allocate modem channels if necessary */
mask = gsi->modem_channel_bitmap;
while (mask) {
u32 modem_channel_id = __ffs(mask);
ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
if (ret)
goto err_unwind_modem;
/* Clear bit from mask only after success (for unwind) */
mask ^= BIT(modem_channel_id);
}
mutex_unlock(&gsi->mutex);
return 0;
err_unwind_modem:
/* Compute which modem channels need to be deallocated */
mask ^= gsi->modem_channel_bitmap;
while (mask) {
channel_id = __fls(mask);
mask ^= BIT(channel_id);
gsi_modem_channel_halt(gsi, channel_id);
}
err_unwind:
while (channel_id--)
gsi_channel_teardown_one(gsi, channel_id);
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
return ret;
}
/* Inverse of gsi_channel_setup() */
static void gsi_channel_teardown(struct gsi *gsi)
{
u32 mask = gsi->modem_channel_bitmap;
u32 channel_id;
mutex_lock(&gsi->mutex);
while (mask) {
channel_id = __fls(mask);
mask ^= BIT(channel_id);
gsi_modem_channel_halt(gsi, channel_id);
}
channel_id = gsi->channel_count - 1;
do
gsi_channel_teardown_one(gsi, channel_id);
while (channel_id--);
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
}
/* Turn off all GSI interrupts initially */
static int gsi_irq_setup(struct gsi *gsi)
{
const struct reg *reg;
int ret;
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
reg = gsi_reg(gsi, CNTXT_INTSET);
iowrite32(reg_bit(reg, INTYPE), gsi->virt + reg_offset(reg));
/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);
/* Clear all type-specific interrupt masks */
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
/* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
if (gsi->version > IPA_VERSION_3_1) {
reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
}
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
if (ret)
dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
return ret;
}
static void gsi_irq_teardown(struct gsi *gsi)
{
free_irq(gsi->irq, gsi);
}
/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
static int gsi_ring_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
const struct reg *reg;
u32 count;
u32 val;
if (gsi->version < IPA_VERSION_3_5_1) {
/* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
return 0;
}
reg = gsi_reg(gsi, HW_PARAM_2);
val = ioread32(gsi->virt + reg_offset(reg));
count = reg_decode(reg, NUM_CH_PER_EE, val);
if (!count) {
dev_err(dev, "GSI reports zero channels supported\n");
return -EINVAL;
}
if (count > GSI_CHANNEL_COUNT_MAX) {
dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
GSI_CHANNEL_COUNT_MAX, count);
count = GSI_CHANNEL_COUNT_MAX;
}
gsi->channel_count = count;
if (gsi->version < IPA_VERSION_5_0) {
count = reg_decode(reg, NUM_EV_PER_EE, val);
} else {
reg = gsi_reg(gsi, HW_PARAM_4);
count = reg_decode(reg, EV_PER_EE, val);
}
if (!count) {
dev_err(dev, "GSI reports zero event rings supported\n");
return -EINVAL;
}
if (count > GSI_EVT_RING_COUNT_MAX) {
dev_warn(dev,
"limiting to %u event rings; hardware supports %u\n",
GSI_EVT_RING_COUNT_MAX, count);
count = GSI_EVT_RING_COUNT_MAX;
}
gsi->evt_ring_count = count;
return 0;
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi)
{
const struct reg *reg;
u32 val;
int ret;
/* Here is where we first touch the GSI hardware */
reg = gsi_reg(gsi, GSI_STATUS);
val = ioread32(gsi->virt + reg_offset(reg));
if (!(val & reg_bit(reg, ENABLED))) {
dev_err(gsi->dev, "GSI has not been enabled\n");
return -EIO;
}
ret = gsi_irq_setup(gsi);
if (ret)
return ret;
ret = gsi_ring_setup(gsi); /* No matching teardown required */
if (ret)
goto err_irq_teardown;
/* Initialize the error log */
reg = gsi_reg(gsi, ERROR_LOG);
iowrite32(0, gsi->virt + reg_offset(reg));
ret = gsi_channel_setup(gsi);
if (ret)
goto err_irq_teardown;
return 0;
err_irq_teardown:
gsi_irq_teardown(gsi);
return ret;
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
{
struct gsi *gsi = channel->gsi;
struct gsi_evt_ring *evt_ring;
int ret;
ret = gsi_evt_ring_id_alloc(gsi);
if (ret < 0)
return ret;
channel->evt_ring_id = ret;
evt_ring = &gsi->evt_ring[channel->evt_ring_id];
evt_ring->channel = channel;
ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
if (!ret)
return 0; /* Success! */
dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
ret, gsi_channel_id(channel));
gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
return ret;
}
/* Inverse of gsi_channel_evt_ring_init() */
static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
struct gsi_evt_ring *evt_ring;
evt_ring = &gsi->evt_ring[evt_ring_id];
gsi_ring_free(gsi, &evt_ring->ring);
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
const struct ipa_gsi_endpoint_data *data)
{
const struct gsi_channel_data *channel_data;
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
/* Make sure channel ids are in the range driver supports */
if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
dev_err(dev, "bad channel id %u; must be less than %u\n",
channel_id, GSI_CHANNEL_COUNT_MAX);
return false;
}
if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
return false;
}
if (command && !data->toward_ipa) {
dev_err(dev, "command channel %u is not TX\n", channel_id);
return false;
}
channel_data = &data->channel;
if (!channel_data->tlv_count ||
channel_data->tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
channel_id, channel_data->tlv_count, GSI_TLV_MAX);
return false;
}
if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
channel_id, IPA_COMMAND_TRANS_TRE_MAX,
channel_data->tlv_count);
return false;
}
/* We have to allow at least one maximally-sized transaction to
* be outstanding (which would use tlv_count TREs). Given how
* gsi_channel_tre_max() is computed, tre_count has to be almost
* twice the TLV FIFO size to satisfy this requirement.
*/
if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
channel_id, channel_data->tlv_count,
channel_data->tre_count);
return false;
}
if (!is_power_of_2(channel_data->tre_count)) {
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
channel_id, channel_data->tre_count);
return false;
}
if (!is_power_of_2(channel_data->event_count)) {
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
channel_id, channel_data->event_count);
return false;
}
return true;
}
/* Init function for a single channel */
static int gsi_channel_init_one(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data,
bool command)
{
struct gsi_channel *channel;
u32 tre_count;
int ret;
if (!gsi_channel_data_valid(gsi, command, data))
return -EINVAL;
/* Worst case we need an event for every outstanding TRE */
if (data->channel.tre_count > data->channel.event_count) {
tre_count = data->channel.event_count;
dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
data->channel_id, tre_count);
} else {
tre_count = data->channel.tre_count;
}
channel = &gsi->channel[data->channel_id];
memset(channel, 0, sizeof(*channel));
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
channel->trans_tre_max = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
ret = gsi_channel_evt_ring_init(channel);
if (ret)
goto err_clear_gsi;
ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
if (ret) {
dev_err(gsi->dev, "error %d allocating channel %u ring\n",
ret, data->channel_id);
goto err_channel_evt_ring_exit;
}
ret = gsi_channel_trans_init(gsi, data->channel_id);
if (ret)
goto err_ring_free;
if (command) {
u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
ret = ipa_cmd_pool_init(channel, tre_max);
}
if (!ret)
return 0; /* Success! */
gsi_channel_trans_exit(channel);
err_ring_free:
gsi_ring_free(gsi, &channel->tre_ring);
err_channel_evt_ring_exit:
gsi_channel_evt_ring_exit(channel);
err_clear_gsi:
channel->gsi = NULL; /* Mark it not (fully) initialized */
return ret;
}
/* Inverse of gsi_channel_init_one() */
static void gsi_channel_exit_one(struct gsi_channel *channel)
{
if (!gsi_channel_initialized(channel))
return;
if (channel->command)
ipa_cmd_pool_exit(channel);
gsi_channel_trans_exit(channel);
gsi_ring_free(channel->gsi, &channel->tre_ring);
gsi_channel_evt_ring_exit(channel);
}
/* Init function for channels */
static int gsi_channel_init(struct gsi *gsi, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
bool modem_alloc;
int ret = 0;
u32 i;
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
gsi->ieob_enabled_bitmap = 0;
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
if (ipa_gsi_endpoint_data_empty(&data[i]))
continue; /* Skip over empty slots */
/* Mark modem channels to be allocated (hardware workaround) */
if (data[i].ee_id == GSI_EE_MODEM) {
if (modem_alloc)
gsi->modem_channel_bitmap |=
BIT(data[i].channel_id);
continue;
}
ret = gsi_channel_init_one(gsi, &data[i], command);
if (ret)
goto err_unwind;
}
return ret;
err_unwind:
while (i--) {
if (ipa_gsi_endpoint_data_empty(&data[i]))
continue;
if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
continue;
}
gsi_channel_exit_one(&gsi->channel[data->channel_id]);
}
return ret;
}
/* Inverse of gsi_channel_init() */
static void gsi_channel_exit(struct gsi *gsi)
{
u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
do
gsi_channel_exit_one(&gsi->channel[channel_id]);
while (channel_id--);
gsi->modem_channel_bitmap = 0;
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
int gsi_init(struct gsi *gsi, struct platform_device *pdev,
enum ipa_version version, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
int ret;
gsi_validate_build();
gsi->dev = &pdev->dev;
gsi->version = version;
/* GSI uses NAPI on all channels. Create a dummy network device
* for the channel NAPI contexts to be associated with.
*/
init_dummy_netdev(&gsi->dummy_dev);
init_completion(&gsi->completion);
ret = gsi_reg_init(gsi, pdev);
if (ret)
return ret;
ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret)
goto err_reg_exit;
ret = gsi_channel_init(gsi, count, data);
if (ret)
goto err_reg_exit;
mutex_init(&gsi->mutex);
return 0;
err_reg_exit:
gsi_reg_exit(gsi);
return ret;
}
/* Inverse of gsi_init() */
void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
gsi_reg_exit(gsi);
}
/* The maximum number of outstanding TREs on a channel. This limits
* a channel's maximum number of transactions outstanding (worst case
* is one TRE per transaction).
*
* The absolute limit is the number of TREs in the channel's TRE ring,
* and in theory we should be able use all of them. But in practice,
* doing that led to the hardware reporting exhaustion of event ring
* slots for writing completion information. So the hardware limit
* would be (tre_count - 1).
*
* We reduce it a bit further though. Transaction resource pools are
* sized to be a little larger than this maximum, to allow resource
* allocations to always be contiguous. The number of entries in a
* TRE ring buffer is a power of 2, and the extra resources in a pool
* tends to nearly double the memory allocated for it. Reducing the
* maximum number of outstanding TREs allows the number of entries in
* a pool to avoid crossing that power-of-2 boundary, and this can
* substantially reduce pool memory requirements. The number we
* reduce it by matches the number added in gsi_trans_pool_init().
*/
u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
/* Hardware limit is channel->tre_count - 1 */
return channel->tre_count - (channel->trans_tre_max - 1);
}
| linux-master | drivers/net/ipa/gsi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
#include "linux/soc/qcom/qcom_aoss.h"
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
#include "ipa_modem.h"
#include "ipa_data.h"
/**
* DOC: IPA Power Management
*
* The IPA hardware is enabled when the IPA core clock and all the
* interconnects (buses) it depends on are enabled. Runtime power
* management is used to determine whether the core clock and
* interconnects are enabled, and if not in use to be suspended
* automatically.
*
* The core clock currently runs at a fixed clock rate when enabled,
* an all interconnects use a fixed average and peak bandwidth.
*/
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
* enum ipa_power_flag - IPA power flags
* @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
* @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
* @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
* @IPA_POWER_FLAG_COUNT: Number of defined power flags
*/
enum ipa_power_flag {
IPA_POWER_FLAG_RESUMED,
IPA_POWER_FLAG_SYSTEM,
IPA_POWER_FLAG_STOPPED,
IPA_POWER_FLAG_STARTED,
IPA_POWER_FLAG_COUNT, /* Last; not a flag */
};
/**
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
* @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
struct ipa_power {
struct device *dev;
struct clk *core;
struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct icc_bulk_data interconnect[];
};
/* Initialize interconnects required for IPA operation */
static int ipa_interconnect_init(struct ipa_power *power,
const struct ipa_interconnect_data *data)
{
struct icc_bulk_data *interconnect;
int ret;
u32 i;
/* Initialize our interconnect data array for bulk operations */
interconnect = &power->interconnect[0];
for (i = 0; i < power->interconnect_count; i++) {
/* interconnect->path is filled in by of_icc_bulk_get() */
interconnect->name = data->name;
interconnect->avg_bw = data->average_bandwidth;
interconnect->peak_bw = data->peak_bandwidth;
data++;
interconnect++;
}
ret = of_icc_bulk_get(power->dev, power->interconnect_count,
power->interconnect);
if (ret)
return ret;
/* All interconnects are initially disabled */
icc_bulk_disable(power->interconnect_count, power->interconnect);
/* Set the bandwidth values to be used when enabled */
ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
if (ret)
icc_bulk_put(power->interconnect_count, power->interconnect);
return ret;
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit(struct ipa_power *power)
{
icc_bulk_put(power->interconnect_count, power->interconnect);
}
/* Enable IPA power, enabling interconnects and the core clock */
static int ipa_power_enable(struct ipa *ipa)
{
struct ipa_power *power = ipa->power;
int ret;
ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
if (ret)
return ret;
ret = clk_prepare_enable(power->core);
if (ret) {
dev_err(power->dev, "error %d enabling core clock\n", ret);
icc_bulk_disable(power->interconnect_count,
power->interconnect);
}
return ret;
}
/* Inverse of ipa_power_enable() */
static void ipa_power_disable(struct ipa *ipa)
{
struct ipa_power *power = ipa->power;
clk_disable_unprepare(power->core);
icc_bulk_disable(power->interconnect_count, power->interconnect);
}
static int ipa_runtime_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
__clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
ipa_power_disable(ipa);
return 0;
}
static int ipa_runtime_resume(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
int ret;
ret = ipa_power_enable(ipa);
if (WARN_ON(ret < 0))
return ret;
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
gsi_resume(&ipa->gsi);
ipa_endpoint_resume(ipa);
}
return 0;
}
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
__set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
/* Increment the disable depth to ensure that the IRQ won't
* be re-enabled until the matching _enable call in
* ipa_resume(). We do this to ensure that the interrupt
* handler won't run whilst PM runtime is disabled.
*
* Note that disabling the IRQ is NOT the same as disabling
* irq wake. If wakeup is enabled for the IPA then the IRQ
* will still cause the system to wake up, see irq_set_irq_wake().
*/
ipa_interrupt_irq_disable(ipa);
return pm_runtime_force_suspend(dev);
}
static int ipa_resume(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
__clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
/* Now that PM runtime is enabled again it's safe
* to turn the IRQ back on and process any data
* that was received during suspend.
*/
ipa_interrupt_irq_enable(ipa);
return ret;
}
/* Return the current IPA core clock rate */
u32 ipa_core_clock_rate(struct ipa *ipa)
{
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
/* To handle an IPA interrupt we will have resumed the hardware
* just to handle the interrupt, so we're done. If we are in a
* system suspend, trigger a system resume.
*/
if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt);
}
/* The next few functions coordinate stopping and starting the modem
* network device transmit queue.
*
* Transmit can be running concurrent with power resume, and there's a
* chance the resume completes before the transmit path stops the queue,
* leaving the queue in a stopped state. The next two functions are used
* to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
* to conditionally stop the TX queue; and ipa_power_modem_queue_start()
* is used by ipa_runtime_resume() to conditionally restart it.
*
* Two flags and a spinlock are used. If the queue is stopped, the STOPPED
* power flag is set. And if the queue is started, the STARTED flag is set.
* The queue is only started on resume if the STOPPED flag is set. And the
* queue is only started in ipa_start_xmit() if the STARTED flag is *not*
* set. As a result, the queue remains operational if the two activites
* happen concurrently regardless of the order they complete. The spinlock
* ensures the flag and TX queue operations are done atomically.
*
* The first function stops the modem netdev transmit queue, but only if
* the STARTED flag is *not* set. That flag is cleared if it was set.
* If the queue is stopped, the STOPPED flag is set. This is called only
* from the power ->runtime_resume operation.
*/
void ipa_power_modem_queue_stop(struct ipa *ipa)
{
struct ipa_power *power = ipa->power;
unsigned long flags;
spin_lock_irqsave(&power->spinlock, flags);
if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
netif_stop_queue(ipa->modem_netdev);
__set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
}
spin_unlock_irqrestore(&power->spinlock, flags);
}
/* This function starts the modem netdev transmit queue, but only if the
* STOPPED flag is set. That flag is cleared if it was set. If the queue
* was restarted, the STARTED flag is set; this allows ipa_start_xmit()
* to skip stopping the queue in the event of a race.
*/
void ipa_power_modem_queue_wake(struct ipa *ipa)
{
struct ipa_power *power = ipa->power;
unsigned long flags;
spin_lock_irqsave(&power->spinlock, flags);
if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
__set_bit(IPA_POWER_FLAG_STARTED, power->flags);
netif_wake_queue(ipa->modem_netdev);
}
spin_unlock_irqrestore(&power->spinlock, flags);
}
/* This function clears the STARTED flag once the TX queue is operating */
void ipa_power_modem_queue_active(struct ipa *ipa)
{
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
static int ipa_power_retention_init(struct ipa_power *power)
{
struct qmp *qmp = qmp_get(power->dev);
if (IS_ERR(qmp)) {
if (PTR_ERR(qmp) == -EPROBE_DEFER)
return -EPROBE_DEFER;
/* We assume any other error means it's not defined/needed */
qmp = NULL;
}
power->qmp = qmp;
return 0;
}
static void ipa_power_retention_exit(struct ipa_power *power)
{
qmp_put(power->qmp);
power->qmp = NULL;
}
/* Control register retention on power collapse */
void ipa_power_retention(struct ipa *ipa, bool enable)
{
static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
struct ipa_power *power = ipa->power;
int ret;
if (!power->qmp)
return; /* Not needed on this platform */
ret = qmp_send(power->qmp, fmt, enable ? '1' : '0');
if (ret)
dev_err(power->dev, "error %d sending QMP %sable request\n",
ret, enable ? "en" : "dis");
}
int ipa_power_setup(struct ipa *ipa)
{
int ret;
ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
ret = device_init_wakeup(&ipa->pdev->dev, true);
if (ret)
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
return ret;
}
void ipa_power_teardown(struct ipa *ipa)
{
(void)device_init_wakeup(&ipa->pdev->dev, false);
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
/* Initialize IPA power management */
struct ipa_power *
ipa_power_init(struct device *dev, const struct ipa_power_data *data)
{
struct ipa_power *power;
struct clk *clk;
size_t size;
int ret;
clk = clk_get(dev, "core");
if (IS_ERR(clk)) {
dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
return ERR_CAST(clk);
}
ret = clk_set_rate(clk, data->core_clock_rate);
if (ret) {
dev_err(dev, "error %d setting core clock rate to %u\n",
ret, data->core_clock_rate);
goto err_clk_put;
}
size = struct_size(power, interconnect, data->interconnect_count);
power = kzalloc(size, GFP_KERNEL);
if (!power) {
ret = -ENOMEM;
goto err_clk_put;
}
power->dev = dev;
power->core = clk;
spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
ret = ipa_interconnect_init(power, data->interconnect_data);
if (ret)
goto err_kfree;
ret = ipa_power_retention_init(power);
if (ret)
goto err_interconnect_exit;
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
err_interconnect_exit:
ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
clk_put(clk);
return ERR_PTR(ret);
}
/* Inverse of ipa_power_init() */
void ipa_power_exit(struct ipa_power *power)
{
struct device *dev = power->dev;
struct clk *clk = power->core;
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);
}
const struct dev_pm_ops ipa_pm_ops = {
.suspend = ipa_suspend,
.resume = ipa_resume,
.runtime_suspend = ipa_runtime_suspend,
.runtime_resume = ipa_runtime_resume,
};
| linux-master | drivers/net/ipa/ipa_power.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2020 Linaro Ltd.
*/
#include <linux/types.h>
#include "ipa_gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
#include "ipa_endpoint.h"
#include "ipa_data.h"
void ipa_gsi_trans_complete(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
ipa_endpoint_trans_complete(ipa->channel_map[trans->channel_id], trans);
}
void ipa_gsi_trans_release(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
ipa_endpoint_trans_release(ipa->channel_map[trans->channel_id], trans);
}
void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
u32 byte_count)
{
struct ipa *ipa = container_of(gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
endpoint = ipa->channel_map[channel_id];
if (endpoint->netdev)
netdev_sent_queue(endpoint->netdev, byte_count);
}
void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
u32 byte_count)
{
struct ipa *ipa = container_of(gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
endpoint = ipa->channel_map[channel_id];
if (endpoint->netdev)
netdev_completed_queue(endpoint->netdev, count, byte_count);
}
/* Indicate whether an endpoint config data entry is "empty" */
bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data)
{
return data->ee_id == GSI_EE_AP && !data->channel.tlv_count;
}
| linux-master | drivers/net/ipa/ipa_gsi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
/* Bit 4 reserved */
[IPA_QMB_SELECT_CONS_EN] = BIT(5),
[IPA_QMB_SELECT_PROD_EN] = BIT(6),
[GSI_MULTI_INORDER_RD_DIS] = BIT(7),
[GSI_MULTI_INORDER_WR_DIS] = BIT(8),
[GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
[GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
[GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
[GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
[GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
[GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
[GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
[IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
[ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
[FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
/* Bits 22-31 reserved */
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
[CLKON_DCMP] = BIT(17),
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
[QSB2AXI_CMDQ_L] = BIT(22),
[AGGR_WRAPPER] = BIT(23),
[RAM_SLAVEWAY] = BIT(24),
[CLKON_QMB] = BIT(25),
[WEIGHT_ARB] = BIT(26),
[GSI_IF] = BIT(27),
[CLKON_GLOBAL] = BIT(28),
[GLOBAL_2X_CLK] = BIT(29),
[DPL_FIFO] = BIT(30),
[DRBIP] = BIT(31),
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
[GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
[DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
[DMAW_MAX_BEATS_256_DIS] = BIT(11),
[PA_MASK_EN] = BIT(12),
[PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
[DUAL_TX_ENABLE] = BIT(17),
[SSPND_PA_NO_START_STATE] = BIT(18),
/* Bits 19-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[MAX_PROD_PIPES] = GENMASK(20, 16),
/* Bits 21-23 reserved */
[PROD_LOWEST] = GENMASK(27, 24),
/* Bits 28-31 reserved */
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
[TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
/* Bits 21-31 reserved */
};
REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
[HDR_A5_MUX] = BIT(26),
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_LEN_MSB] = GENMASK(29, 28),
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
[HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
[HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
/* Bits 22-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
/* Bit 11 reserved */
[TIME_LIMIT] = GENMASK(16, 12),
[PKT_LIMIT] = GENMASK(22, 17),
[SW_EOF_ACTIVE] = BIT(23),
[FORCE_CLOSE] = BIT(24),
/* Bit 25 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(26),
[AGGR_GRAN_SEL] = BIT(27),
/* Bits 28-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
[STATUS_PKT_SUPPRESS] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
[FILTER_HASH_MSK_SRC_PORT] = BIT(3),
[FILTER_HASH_MSK_DST_PORT] = BIT(4),
[FILTER_HASH_MSK_PROTOCOL] = BIT(5),
[FILTER_HASH_MSK_METADATA] = BIT(6),
[FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
/* Bits 7-15 reserved */
[ROUTER_HASH_MSK_SRC_ID] = BIT(16),
[ROUTER_HASH_MSK_SRC_IP] = BIT(17),
[ROUTER_HASH_MSK_DST_IP] = BIT(18),
[ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
[ROUTER_HASH_MSK_DST_PORT] = BIT(20),
[ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
[ROUTER_HASH_MSK_METADATA] = BIT(22),
[ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
/* Bits 23-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[QTIME_TIMESTAMP_CFG] = ®_qtime_timestamp_cfg,
[TIMERS_XO_CLK_DIV_CFG] = ®_timers_xo_clk_div_cfg,
[TIMERS_PULSE_GRAN_CFG] = ®_timers_pulse_gran_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_ROUTER_HSH_CFG] = ®_endp_filter_router_hsh_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v4_7 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v4.7.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
/* Bit 4 reserved */
[IPA_QMB_SELECT_CONS_EN] = BIT(5),
[IPA_QMB_SELECT_PROD_EN] = BIT(6),
[GSI_MULTI_INORDER_RD_DIS] = BIT(7),
[GSI_MULTI_INORDER_WR_DIS] = BIT(8),
[GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
[GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
[GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
[GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
[GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
[GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
[GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
[IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
[FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
[QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
[GENQMB_AOOOWR] = BIT(20),
[IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
[ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(24, 22),
/* Bits 25-29 reserved */
[GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
[CLKON_DCMP] = BIT(17),
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
[QSB2AXI_CMDQ_L] = BIT(22),
[AGGR_WRAPPER] = BIT(23),
[RAM_SLAVEWAY] = BIT(24),
[CLKON_QMB] = BIT(25),
[WEIGHT_ARB] = BIT(26),
[GSI_IF] = BIT(27),
[CLKON_GLOBAL] = BIT(28),
[GLOBAL_2X_CLK] = BIT(29),
[DPL_FIFO] = BIT(30),
[DRBIP] = BIT(31),
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
[GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
[DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
[DMAW_MAX_BEATS_256_DIS] = BIT(11),
[PA_MASK_EN] = BIT(12),
[PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
[DUAL_TX_ENABLE] = BIT(17),
[SSPND_PA_NO_START_STATE] = BIT(18),
/* Bits 19-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[MAX_PROD_PIPES] = GENMASK(20, 16),
/* Bits 21-23 reserved */
[PROD_LOWEST] = GENMASK(27, 24),
/* Bits 28-31 reserved */
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
[TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
/* Bits 21-31 reserved */
};
REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_LEN_MSB] = GENMASK(29, 28),
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
[HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
[HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
/* Bits 22-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
[DRBIP_ACL_ENABLE] = BIT(30),
/* Bit 31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
/* Bit 11 reserved */
[TIME_LIMIT] = GENMASK(16, 12),
[PKT_LIMIT] = GENMASK(22, 17),
[SW_EOF_ACTIVE] = BIT(23),
[FORCE_CLOSE] = BIT(24),
/* Bit 25 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(26),
[AGGR_GRAN_SEL] = BIT(27),
/* Bits 28-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
[STATUS_PKT_SUPPRESS] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
[FILTER_HASH_MSK_SRC_PORT] = BIT(3),
[FILTER_HASH_MSK_DST_PORT] = BIT(4),
[FILTER_HASH_MSK_PROTOCOL] = BIT(5),
[FILTER_HASH_MSK_METADATA] = BIT(6),
[FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
/* Bits 7-15 reserved */
[ROUTER_HASH_MSK_SRC_ID] = BIT(16),
[ROUTER_HASH_MSK_SRC_IP] = BIT(17),
[ROUTER_HASH_MSK_DST_IP] = BIT(18),
[ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
[ROUTER_HASH_MSK_DST_PORT] = BIT(20),
[ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
[ROUTER_HASH_MSK_METADATA] = BIT(22),
[ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
/* Bits 23-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[QTIME_TIMESTAMP_CFG] = ®_qtime_timestamp_cfg,
[TIMERS_XO_CLK_DIV_CFG] = ®_timers_xo_clk_div_cfg,
[TIMERS_PULSE_GRAN_CFG] = ®_timers_pulse_gran_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_ROUTER_HSH_CFG] = ®_endp_filter_router_hsh_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v4_9 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v4.9.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
[IPA_DCMP_FAST_CLK_EN] = BIT(4),
/* Bits 5-31 reserved */
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
/* Bit 17 reserved */
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
/* Bits 22-31 reserved */
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
REG(IPA_BCR, ipa_bcr, 0x000001d0);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
static const u32 reg_ipa_tx_cfg_fmask[] = {
[TX0_PREFETCH_DISABLE] = BIT(0),
[TX1_PREFETCH_DISABLE] = BIT(1),
[PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
/* Bits 5-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[MAX_PROD_PIPES] = GENMASK(20, 16),
/* Bits 21-23 reserved */
[PROD_LOWEST] = GENMASK(27, 24),
/* Bits 28-31 reserved */
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
[HDR_A5_MUX] = BIT(26),
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_METADATA_REG_VALID] = BIT(28),
/* Bits 29-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
[HDR_FTCH_DISABLE] = BIT(30),
/* Bit 31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
[TIME_LIMIT] = GENMASK(14, 10),
[PKT_LIMIT] = GENMASK(20, 15),
[SW_EOF_ACTIVE] = BIT(21),
[FORCE_CLOSE] = BIT(22),
/* Bit 23 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(24),
/* Bits 25-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
/* Entire register is a tick count */
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
[STATUS_LOCATION] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
[FILTER_HASH_MSK_SRC_PORT] = BIT(3),
[FILTER_HASH_MSK_DST_PORT] = BIT(4),
[FILTER_HASH_MSK_PROTOCOL] = BIT(5),
[FILTER_HASH_MSK_METADATA] = BIT(6),
[FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
/* Bits 7-15 reserved */
[ROUTER_HASH_MSK_SRC_ID] = BIT(16),
[ROUTER_HASH_MSK_SRC_IP] = BIT(17),
[ROUTER_HASH_MSK_DST_IP] = BIT(18),
[ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
[ROUTER_HASH_MSK_DST_PORT] = BIT(20),
[ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
[ROUTER_HASH_MSK_METADATA] = BIT(22),
[ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
/* Bits 23-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[IPA_BCR] = ®_ipa_bcr,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[COUNTER_CFG] = ®_counter_cfg,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[ENDP_INIT_CTRL] = ®_endp_init_ctrl,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_ROUTER_HSH_CFG] = ®_endp_filter_router_hsh_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v3_5_1 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v3.5.1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(7, 0),
[MAX_CONS_PIPES] = GENMASK(15, 8),
[MAX_PROD_PIPES] = GENMASK(23, 16),
[PROD_LOWEST] = GENMASK(31, 24),
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000000);
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
/* Bit 4 reserved */
[IPA_QMB_SELECT_CONS_EN] = BIT(5),
[IPA_QMB_SELECT_PROD_EN] = BIT(6),
[GSI_MULTI_INORDER_RD_DIS] = BIT(7),
[GSI_MULTI_INORDER_WR_DIS] = BIT(8),
[GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
[GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
[GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
[GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
[GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
[GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
[GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
[IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
[FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
/* Bit 18 reserved */
[QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
[GENQMB_AOOOWR] = BIT(20),
[IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
[ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(27, 22),
/* Bits 28-29 reserved */
[GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000002c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
/* Bit 17 reserved */
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
[QSB2AXI_CMDQ_L] = BIT(22),
[AGGR_WRAPPER] = BIT(23),
[RAM_SLAVEWAY] = BIT(24),
[CLKON_QMB] = BIT(25),
[WEIGHT_ARB] = BIT(26),
[GSI_IF] = BIT(27),
[CLKON_GLOBAL] = BIT(28),
[GLOBAL_2X_CLK] = BIT(29),
[DPL_FIFO] = BIT(30),
[DRBIP] = BIT(31),
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000034);
static const u32 reg_route_fmask[] = {
[ROUTE_DEF_PIPE] = GENMASK(7, 0),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(15, 8),
[ROUTE_DEF_HDR_OFST] = GENMASK(25, 16),
[ROUTE_DEF_HDR_TABLE] = BIT(26),
[ROUTE_DEF_RETAIN_HDR] = BIT(27),
[ROUTE_DIS] = BIT(28),
/* Bits 29-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000038);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000040);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000054);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
[GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000058);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x00000100, 0x0004);
static const u32 reg_filt_rout_cache_flush_fmask[] = {
[ROUTER_CACHE] = BIT(0),
/* Bits 1-3 reserved */
[FILTER_CACHE] = BIT(4),
/* Bits 5-31 reserved */
};
REG_FIELDS(FILT_ROUT_CACHE_FLUSH, filt_rout_cache_flush, 0x0000404);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x00000478);
static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
[DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
[DMAW_MAX_BEATS_256_DIS] = BIT(11),
[PA_MASK_EN] = BIT(12),
[PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
[DUAL_TX_ENABLE] = BIT(17),
[SSPND_PA_NO_START_STATE] = BIT(18),
/* Bit 19 reserved */
[HOLB_STICKY_DROP_EN] = BIT(20),
/* Bits 21-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x00000488);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x000004a8);
static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
[TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
/* Bits 21-31 reserved */
};
REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x000004ac);
static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x000004b0);
static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
[PULSE_GRAN_3] = GENMASK(11, 9),
/* Bits 12-31 reserved */
};
REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x000004b4);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
0x00000508, 0x0020);
static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
0x0000050c, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000600, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000604, 0x0020);
static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
0x00000608, 0x0020);
static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
0x0000060c, 0x0020);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000006b0, 0x0004);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00001008, 0x0080);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000100c, 0x0080);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
/* Bit 26 reserved */
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_LEN_MSB] = GENMASK(29, 28),
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00001010, 0x0080);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
[HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
[HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
[HDR_BYTES_TO_REMOVE_VALID] = BIT(22),
/* Bit 23 reserved */
[HDR_BYTES_TO_REMOVE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00001014, 0x0080);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00001018, 0x0080);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(11, 4),
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
[DRBIP_ACL_ENABLE] = BIT(30),
/* Bit 31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00001020, 0x0080);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
/* Bit 11 reserved */
[TIME_LIMIT] = GENMASK(16, 12),
[PKT_LIMIT] = GENMASK(22, 17),
[SW_EOF_ACTIVE] = BIT(23),
[FORCE_CLOSE] = BIT(24),
/* Bit 25 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(26),
[AGGR_GRAN_SEL] = BIT(27),
/* Bits 28-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00001024, 0x0080);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000102c, 0x0080);
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = GENMASK(9, 8),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00001030, 0x0080);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00001034, 0x0080);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00001038, 0x0080);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000103c, 0x0080);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(8, 1),
[STATUS_PKT_SUPPRESS] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00001040, 0x0080);
static const u32 reg_endp_filter_cache_cfg_fmask[] = {
[CACHE_MSK_SRC_ID] = BIT(0),
[CACHE_MSK_SRC_IP] = BIT(1),
[CACHE_MSK_DST_IP] = BIT(2),
[CACHE_MSK_SRC_PORT] = BIT(3),
[CACHE_MSK_DST_PORT] = BIT(4),
[CACHE_MSK_PROTOCOL] = BIT(5),
[CACHE_MSK_METADATA] = BIT(6),
/* Bits 7-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_CACHE_CFG, endp_filter_cache_cfg,
0x0000105c, 0x0080);
static const u32 reg_endp_router_cache_cfg_fmask[] = {
[CACHE_MSK_SRC_ID] = BIT(0),
[CACHE_MSK_SRC_IP] = BIT(1),
[CACHE_MSK_DST_IP] = BIT(2),
[CACHE_MSK_SRC_PORT] = BIT(3),
[CACHE_MSK_DST_PORT] = BIT(4),
[CACHE_MSK_PROTOCOL] = BIT(5),
[CACHE_MSK_METADATA] = BIT(6),
/* Bits 7-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_ROUTER_CACHE_CFG, endp_router_cache_cfg,
0x00001070, 0x0080);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x0000c008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000c00c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x0000c010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000c01c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x0000c030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x0000c050 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x0000c070 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_CACHE_FLUSH] = ®_filt_rout_cache_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[QTIME_TIMESTAMP_CFG] = ®_qtime_timestamp_cfg,
[TIMERS_XO_CLK_DIV_CFG] = ®_timers_xo_clk_div_cfg,
[TIMERS_PULSE_GRAN_CFG] = ®_timers_pulse_gran_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[SRC_RSRC_GRP_45_RSRC_TYPE] = ®_src_rsrc_grp_45_rsrc_type,
[SRC_RSRC_GRP_67_RSRC_TYPE] = ®_src_rsrc_grp_67_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_45_RSRC_TYPE] = ®_dst_rsrc_grp_45_rsrc_type,
[DST_RSRC_GRP_67_RSRC_TYPE] = ®_dst_rsrc_grp_67_rsrc_type,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_CACHE_CFG] = ®_endp_filter_cache_cfg,
[ENDP_ROUTER_CACHE_CFG] = ®_endp_router_cache_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v5_0 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v5.0.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(2, 0),
[CHTYPE_DIR] = BIT(3),
[CH_EE] = GENMASK(7, 4),
[CHID] = GENMASK(12, 8),
/* Bit 13 reserved */
[ERINDEX] = GENMASK(18, 14),
/* Bit 19 reserved */
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(15, 0),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
[EV_EE] = GENMASK(7, 4),
[EV_EVCHID] = GENMASK(15, 8),
[EV_INTYPE] = BIT(16),
/* Bits 17-19 reserved */
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x0001f098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001f09c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x0001f0a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x0001f0a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x0001f0b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x0001f0c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v3_1 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v3.1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(2, 0),
[CHTYPE_DIR] = BIT(3),
[CH_EE] = GENMASK(7, 4),
[CHID] = GENMASK(12, 8),
[CHTYPE_PROTOCOL_MSB] = BIT(13),
[ERINDEX] = GENMASK(18, 14),
/* Bit 19 reserved */
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(15, 0),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
[PREFETCH_MODE] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
/* Bits 24-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
[EV_EE] = GENMASK(7, 4),
[EV_EVCHID] = GENMASK(15, 8),
[EV_INTYPE] = BIT(16),
/* Bits 17-19 reserved */
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
[NUM_CH_PER_EE] = GENMASK(7, 3),
[NUM_EV_PER_EE] = GENMASK(12, 8),
[GSI_CH_PEND_TRANSLATE] = BIT(13),
[GSI_CH_FULL_LOGIC] = BIT(14),
[GSI_USE_SDMA] = BIT(15),
[GSI_SDMA_N_INT] = GENMASK(18, 16),
[GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
[GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
[GSI_USE_RD_WR_ENG] = BIT(30),
[GSI_USE_INTER_EE] = BIT(31),
};
REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x00012098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001209c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x000120a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x000120a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x000120b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x000120c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[HW_PARAM_2] = ®_hw_param_2,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v4_5 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v4.5.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
/* Bit 4 reserved */
[IPA_QMB_SELECT_CONS_EN] = BIT(5),
[IPA_QMB_SELECT_PROD_EN] = BIT(6),
[GSI_MULTI_INORDER_RD_DIS] = BIT(7),
[GSI_MULTI_INORDER_WR_DIS] = BIT(8),
[GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
[GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
[GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
[GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
[GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
[GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
[GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
[IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
[ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
[FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
/* Bits 22-31 reserved */
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
[CLKON_DCMP] = BIT(17),
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
[QSB2AXI_CMDQ_L] = BIT(22),
[AGGR_WRAPPER] = BIT(23),
[RAM_SLAVEWAY] = BIT(24),
[CLKON_QMB] = BIT(25),
[WEIGHT_ARB] = BIT(26),
[GSI_IF] = BIT(27),
[CLKON_GLOBAL] = BIT(28),
[GLOBAL_2X_CLK] = BIT(29),
[DPL_FIFO] = BIT(30),
/* Bit 31 reserved */
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
[GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
[DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
[DMAW_MAX_BEATS_256_DIS] = BIT(11),
[PA_MASK_EN] = BIT(12),
[PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
[DUAL_TX_ENABLE] = BIT(17),
/* Bits 18-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[MAX_PROD_PIPES] = GENMASK(20, 16),
/* Bits 21-23 reserved */
[PROD_LOWEST] = GENMASK(27, 24),
/* Bits 28-31 reserved */
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
[TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
/* Bits 21-31 reserved */
};
REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
0x00000408, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
0x00000508, 0x0020);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
[HDR_A5_MUX] = BIT(26),
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_LEN_MSB] = GENMASK(29, 28),
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
[HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
[HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
/* Bits 22-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
/* Bit 11 reserved */
[TIME_LIMIT] = GENMASK(16, 12),
[PKT_LIMIT] = GENMASK(22, 17),
[SW_EOF_ACTIVE] = BIT(23),
[FORCE_CLOSE] = BIT(24),
/* Bit 25 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(26),
[AGGR_GRAN_SEL] = BIT(27),
/* Bits 28-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
[STATUS_PKT_SUPPRESS] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
[FILTER_HASH_MSK_SRC_PORT] = BIT(3),
[FILTER_HASH_MSK_DST_PORT] = BIT(4),
[FILTER_HASH_MSK_PROTOCOL] = BIT(5),
[FILTER_HASH_MSK_METADATA] = BIT(6),
[FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
/* Bits 7-15 reserved */
[ROUTER_HASH_MSK_SRC_ID] = BIT(16),
[ROUTER_HASH_MSK_SRC_IP] = BIT(17),
[ROUTER_HASH_MSK_DST_IP] = BIT(18),
[ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
[ROUTER_HASH_MSK_DST_PORT] = BIT(20),
[ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
[ROUTER_HASH_MSK_METADATA] = BIT(22),
[ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
/* Bits 23-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[QTIME_TIMESTAMP_CFG] = ®_qtime_timestamp_cfg,
[TIMERS_XO_CLK_DIV_CFG] = ®_timers_xo_clk_div_cfg,
[TIMERS_PULSE_GRAN_CFG] = ®_timers_pulse_gran_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[SRC_RSRC_GRP_45_RSRC_TYPE] = ®_src_rsrc_grp_45_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_45_RSRC_TYPE] = ®_dst_rsrc_grp_45_rsrc_type,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_ROUTER_HSH_CFG] = ®_endp_filter_router_hsh_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v4_5 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v4.5.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(2, 0),
[CHTYPE_DIR] = BIT(3),
[CH_EE] = GENMASK(7, 4),
[CHID] = GENMASK(12, 8),
/* Bit 13 reserved */
[ERINDEX] = GENMASK(18, 14),
/* Bit 19 reserved */
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(15, 0),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
[EV_EE] = GENMASK(7, 4),
[EV_EVCHID] = GENMASK(15, 8),
[EV_INTYPE] = BIT(16),
/* Bits 17-19 reserved */
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
[NUM_CH_PER_EE] = GENMASK(7, 3),
[NUM_EV_PER_EE] = GENMASK(12, 8),
[GSI_CH_PEND_TRANSLATE] = BIT(13),
[GSI_CH_FULL_LOGIC] = BIT(14),
/* Bits 15-31 reserved */
};
REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x0001f098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001f09c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x0001f0a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x0001f0a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x0001f0b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x0001f0c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[HW_PARAM_2] = ®_hw_param_2,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v3_5_1 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v3.5.1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(2, 0),
[CHTYPE_DIR] = BIT(3),
[CH_EE] = GENMASK(7, 4),
[CHID] = GENMASK(12, 8),
/* Bit 13 reserved */
[ERINDEX] = GENMASK(18, 14),
/* Bit 19 reserved */
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(15, 0),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
[USE_ESCAPE_BUF_ONLY] = BIT(10),
/* Bits 11-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
[EV_EE] = GENMASK(7, 4),
[EV_EVCHID] = GENMASK(15, 8),
[EV_INTYPE] = BIT(16),
/* Bits 17-19 reserved */
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
[NUM_CH_PER_EE] = GENMASK(7, 3),
[NUM_EV_PER_EE] = GENMASK(12, 8),
[GSI_CH_PEND_TRANSLATE] = BIT(13),
[GSI_CH_FULL_LOGIC] = BIT(14),
[GSI_USE_SDMA] = BIT(15),
[GSI_SDMA_N_INT] = GENMASK(18, 16),
[GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
[GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
/* Bits 30-31 reserved */
};
REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x0001f098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001f09c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x0001f0a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x0001f0a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x0001f0b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x0001f0c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[HW_PARAM_2] = ®_hw_param_2,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v4_0 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v4.0.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
[IPA_DCMP_FAST_CLK_EN] = BIT(4),
/* Bits 5-31 reserved */
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
REG(IPA_BCR, ipa_bcr, 0x000001d0);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0),
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
0x00000408, 0x0020);
static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
0x0000040c, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
0x00000508, 0x0020);
static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
0x0000050c, 0x0020);
static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
[HDR_A5_MUX] = BIT(26),
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_METADATA_REG_VALID] = BIT(28),
/* Bits 29-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
[HDR_FTCH_DISABLE] = BIT(30),
/* Bit 31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
[TIME_LIMIT] = GENMASK(14, 10),
[PKT_LIMIT] = GENMASK(20, 15),
[SW_EOF_ACTIVE] = BIT(21),
[FORCE_CLOSE] = BIT(22),
/* Bit 23 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(24),
/* Bits 25-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
/* Entire register is a tick count */
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
[STATUS_LOCATION] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
[FILTER_HASH_MSK_SRC_PORT] = BIT(3),
[FILTER_HASH_MSK_DST_PORT] = BIT(4),
[FILTER_HASH_MSK_PROTOCOL] = BIT(5),
[FILTER_HASH_MSK_METADATA] = BIT(6),
[FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
/* Bits 7-15 reserved */
[ROUTER_HASH_MSK_SRC_ID] = BIT(16),
[ROUTER_HASH_MSK_SRC_IP] = BIT(17),
[ROUTER_HASH_MSK_DST_IP] = BIT(18),
[ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
[ROUTER_HASH_MSK_DST_PORT] = BIT(20),
[ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
[ROUTER_HASH_MSK_METADATA] = BIT(22),
[ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
/* Bits 23-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[IPA_BCR] = ®_ipa_bcr,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[COUNTER_CFG] = ®_counter_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[SRC_RSRC_GRP_45_RSRC_TYPE] = ®_src_rsrc_grp_45_rsrc_type,
[SRC_RSRC_GRP_67_RSRC_TYPE] = ®_src_rsrc_grp_67_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_45_RSRC_TYPE] = ®_dst_rsrc_grp_45_rsrc_type,
[DST_RSRC_GRP_67_RSRC_TYPE] = ®_dst_rsrc_grp_67_rsrc_type,
[ENDP_INIT_CTRL] = ®_endp_init_ctrl,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_ROUTER_HSH_CFG] = ®_endp_filter_router_hsh_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v3_1 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v3.1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c01c + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c028 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(6, 0),
[CHTYPE_DIR] = BIT(7),
[CH_EE] = GENMASK(11, 8),
[CHID] = GENMASK(19, 12),
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x00014000 + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(23, 0),
[ERINDEX] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x00014004 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x00014008 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001400c + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
[PREFETCH_MODE] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
[DB_IN_BYTES] = BIT(24),
[LOW_LATENCY_EN] = BIT(25),
/* Bits 26-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x00014048 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0001404c + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x00014050 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x00014054 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x00014058 + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(6, 0),
[EV_INTYPE] = BIT(7),
[EV_EVCHID] = GENMASK(15, 8),
[EV_EE] = GENMASK(19, 16),
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(19, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x0001c004 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x0001c008 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001c00c + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x0001c010 + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x0001c020 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x0001c024 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x0001c028 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001c02c + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x0001c030 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x0001c034 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x0001c048 + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001c04c + 0x12000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x00024000 + 0x12000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x00024800 + 0x12000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x00025000 + 0x12000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x00025008 + 0x12000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00025010 + 0x12000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00025018 + 0x12000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[NUM_CH_PER_EE] = GENMASK(7, 0),
[IRAM_SIZE] = GENMASK(12, 8),
[GSI_CH_PEND_TRANSLATE] = BIT(13),
[GSI_CH_FULL_LOGIC] = BIT(14),
[GSI_USE_SDMA] = BIT(15),
[GSI_SDMA_N_INT] = GENMASK(18, 16),
[GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
[GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
[GSI_USE_RD_WR_ENG] = BIT(30),
[GSI_USE_INTER_EE] = BIT(31),
};
REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00025040 + 0x12000 * GSI_EE_AP);
static const u32 reg_hw_param_4_fmask[] = {
[EV_PER_EE] = GENMASK(7, 0),
[IRAM_PROTOCOL_COUNT] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
REG_FIELDS(HW_PARAM_4, hw_param_4, 0x00025050 + 0x12000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00025080 + 0x12000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00025088 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00025090 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x00025094 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x00025098 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0002509c + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x000250a0 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x000250a4 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000250a8 + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x000250ac + 0x12000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x000250b0 + 0x12000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00025200 + 0x12000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00025204 + 0x12000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00025208 + 0x12000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0002520c + 0x12000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00025210 + 0x12000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00025214 + 0x12000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00025220 + 0x12000 * GSI_EE_AP);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_FIELDS(ERROR_LOG, error_log, 0x00025240 + 0x12000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x00025244 + 0x12000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00025400 + 0x12000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[HW_PARAM_2] = ®_hw_param_2,
[HW_PARAM_4] = ®_hw_param_4,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v5_0 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v5.0.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(2, 0),
[CHTYPE_DIR] = BIT(3),
[CH_EE] = GENMASK(7, 4),
[CHID] = GENMASK(12, 8),
[CHTYPE_PROTOCOL_MSB] = BIT(13),
[ERINDEX] = GENMASK(18, 14),
/* Bit 19 reserved */
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(19, 0),
/* Bits 20-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
[PREFETCH_MODE] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
[DB_IN_BYTES] = BIT(24),
/* Bits 25-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
[EV_EE] = GENMASK(7, 4),
[EV_EVCHID] = GENMASK(15, 8),
[EV_INTYPE] = BIT(16),
/* Bits 17-19 reserved */
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(19, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-23 reserved */
[GENERIC_PARAMS] = GENMASK(31, 24),
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
[NUM_CH_PER_EE] = GENMASK(7, 3),
[NUM_EV_PER_EE] = GENMASK(12, 8),
[GSI_CH_PEND_TRANSLATE] = BIT(13),
[GSI_CH_FULL_LOGIC] = BIT(14),
[GSI_USE_SDMA] = BIT(15),
[GSI_SDMA_N_INT] = GENMASK(18, 16),
[GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
[GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
[GSI_USE_RD_WR_ENG] = BIT(30),
[GSI_USE_INTER_EE] = BIT(31),
};
REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x00012098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001209c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x000120a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x000120a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x000120b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x000120c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[HW_PARAM_2] = ®_hw_param_2,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v4_11 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v4.11.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
[IPA_DCMP_FAST_CLK_EN] = BIT(4),
[IPA_QMB_SELECT_CONS_EN] = BIT(5),
[IPA_QMB_SELECT_PROD_EN] = BIT(6),
[GSI_MULTI_INORDER_RD_DIS] = BIT(7),
[GSI_MULTI_INORDER_WR_DIS] = BIT(8),
[GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
[GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
[GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
[GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
[GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
[GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
[GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
[IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
[ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
/* Bits 21-31 reserved */
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
/* Bit 17 reserved */
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
[QSB2AXI_CMDQ_L] = BIT(22),
[AGGR_WRAPPER] = BIT(23),
[RAM_SLAVEWAY] = BIT(24),
[CLKON_QMB] = BIT(25),
[WEIGHT_ARB] = BIT(26),
[GSI_IF] = BIT(27),
[CLKON_GLOBAL] = BIT(28),
[GLOBAL_2X_CLK] = BIT(29),
/* Bits 30-31 reserved */
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
[GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
REG(IPA_BCR, ipa_bcr, 0x000001d0);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 9-31 reserved */
};
REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
[DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
[DMAW_MAX_BEATS_256_DIS] = BIT(11),
[PA_MASK_EN] = BIT(12),
[PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
/* Bit 17 reserved */
[SSPND_PA_NO_START_STATE] = BIT(18),
[SSPND_PA_NO_BQ_STATE] = BIT(19),
/* Bits 20-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[MAX_PROD_PIPES] = GENMASK(20, 16),
/* Bits 21-23 reserved */
[PROD_LOWEST] = GENMASK(27, 24),
/* Bits 28-31 reserved */
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
[HDR_A5_MUX] = BIT(26),
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_METADATA_REG_VALID] = BIT(28),
/* Bits 29-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
[HDR_FTCH_DISABLE] = BIT(30),
/* Bit 31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
[TIME_LIMIT] = GENMASK(14, 10),
[PKT_LIMIT] = GENMASK(20, 15),
[SW_EOF_ACTIVE] = BIT(21),
[FORCE_CLOSE] = BIT(22),
/* Bit 23 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(24),
/* Bits 25-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_SCALE] = GENMASK(12, 8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
[STATUS_LOCATION] = BIT(8),
[STATUS_PKT_SUPPRESS] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[IPA_BCR] = ®_ipa_bcr,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[COUNTER_CFG] = ®_counter_cfg,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v4_2 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v4.2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/types.h>
#include "../ipa.h"
#include "../ipa_reg.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
[GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
/* Bit 4 reserved */
[IPA_QMB_SELECT_CONS_EN] = BIT(5),
[IPA_QMB_SELECT_PROD_EN] = BIT(6),
[GSI_MULTI_INORDER_RD_DIS] = BIT(7),
[GSI_MULTI_INORDER_WR_DIS] = BIT(8),
[GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
[GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
[GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
[GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
[GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
[GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
[GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
[IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
[FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
/* Bit 18 reserved */
[QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
[GENQMB_AOOOWR] = BIT(20),
[IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
[ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(23, 22),
/* Bits 24-29 reserved */
[GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
[CLKON_MISC] = BIT(3),
[RAM_ARB] = BIT(4),
[FTCH_HPS] = BIT(5),
[FTCH_DPS] = BIT(6),
[CLKON_HPS] = BIT(7),
[CLKON_DPS] = BIT(8),
[RX_HPS_CMDQS] = BIT(9),
[HPS_DPS_CMDQS] = BIT(10),
[DPS_TX_CMDQS] = BIT(11),
[RSRC_MNGR] = BIT(12),
[CTX_HANDLER] = BIT(13),
[ACK_MNGR] = BIT(14),
[D_DCPH] = BIT(15),
[H_DCPH] = BIT(16),
/* Bit 17 reserved */
[NTF_TX_CMDQS] = BIT(18),
[CLKON_TX_0] = BIT(19),
[CLKON_TX_1] = BIT(20),
[CLKON_FNR] = BIT(21),
[QSB2AXI_CMDQ_L] = BIT(22),
[AGGR_WRAPPER] = BIT(23),
[RAM_SLAVEWAY] = BIT(24),
[CLKON_QMB] = BIT(25),
[WEIGHT_ARB] = BIT(26),
[GSI_IF] = BIT(27),
[CLKON_GLOBAL] = BIT(28),
[GLOBAL_2X_CLK] = BIT(29),
[DPL_FIFO] = BIT(30),
[DRBIP] = BIT(31),
};
REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
[ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
[ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
/* Bits 22-23 reserved */
[ROUTE_DEF_RETAIN_HDR] = BIT(24),
/* Bits 25-31 reserved */
};
REG_FIELDS(ROUTE, route, 0x00000048);
static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
[GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
/* Bits 5-7 reserved */
[IPV4_ROUTER_HASH] = BIT(8),
/* Bits 9-11 reserved */
[IPV4_FILTER_HASH] = BIT(12),
/* Bits 13-31 reserved */
};
REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
[DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
[DMAW_MAX_BEATS_256_DIS] = BIT(11),
[PA_MASK_EN] = BIT(12),
[PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
[DUAL_TX_ENABLE] = BIT(17),
[SSPND_PA_NO_START_STATE] = BIT(18),
/* Bits 19-31 reserved */
};
REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[MAX_PROD_PIPES] = GENMASK(20, 16),
/* Bits 21-23 reserved */
[PROD_LOWEST] = GENMASK(27, 24),
/* Bits 28-31 reserved */
};
REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
[TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
/* Bits 13-15 reserved */
[NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
/* Bits 21-31 reserved */
};
REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
/* Bits 9-31 reserved */
};
REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
0x00000400, 0x0020);
static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
0x00000404, 0x0020);
static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
0x00000500, 0x0020);
static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
/* Bits 14-15 reserved */
[Y_MIN_LIM] = GENMASK(21, 16),
/* Bits 22-23 reserved */
[Y_MAX_LIM] = GENMASK(29, 24),
/* Bits 30-31 reserved */
};
REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
0x00000504, 0x0020);
static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
/* Bit 7 reserved */
[CS_GEN_QMB_MASTER_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
[HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
[HDR_OFST_PKT_SIZE_VALID] = BIT(19),
[HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
/* Bit 26 reserved */
[HDR_LEN_INC_DEAGG_HDR] = BIT(27),
[HDR_LEN_MSB] = GENMASK(29, 28),
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
[HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
[HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
[HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
[HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
[HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
/* Bits 22-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
0x00000818, 0x0070);
static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
/* Bits 9-11 reserved */
[BYTE_THRESHOLD] = GENMASK(27, 12),
[PIPE_REPLICATION_EN] = BIT(28),
[PAD_EN] = BIT(29),
[DRBIP_ACL_ENABLE] = BIT(30),
/* Bit 31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
/* Bit 11 reserved */
[TIME_LIMIT] = GENMASK(16, 12),
[PKT_LIMIT] = GENMASK(22, 17),
[SW_EOF_ACTIVE] = BIT(23),
[FORCE_CLOSE] = BIT(24),
/* Bit 25 reserved */
[HARD_BYTE_LIMIT_EN] = BIT(26),
[AGGR_GRAN_SEL] = BIT(27),
/* Bits 28-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
0x0000082c, 0x0070);
static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
0x00000830, 0x0070);
static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
[PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
[IGNORE_MIN_PKT_ERR] = BIT(14),
/* Bit 15 reserved */
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
[STATUS_PKT_SUPPRESS] = BIT(9),
/* Bits 10-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
[FILTER_HASH_MSK_SRC_PORT] = BIT(3),
[FILTER_HASH_MSK_DST_PORT] = BIT(4),
[FILTER_HASH_MSK_PROTOCOL] = BIT(5),
[FILTER_HASH_MSK_METADATA] = BIT(6),
[FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
/* Bits 7-15 reserved */
[ROUTER_HASH_MSK_SRC_ID] = BIT(16),
[ROUTER_HASH_MSK_SRC_IP] = BIT(17),
[ROUTER_HASH_MSK_DST_IP] = BIT(18),
[ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
[ROUTER_HASH_MSK_DST_PORT] = BIT(20),
[ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
[ROUTER_HASH_MSK_METADATA] = BIT(22),
[ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
/* Bits 23-31 reserved */
};
REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct reg *reg_array[] = {
[COMP_CFG] = ®_comp_cfg,
[CLKON_CFG] = ®_clkon_cfg,
[ROUTE] = ®_route,
[SHARED_MEM_SIZE] = ®_shared_mem_size,
[QSB_MAX_WRITES] = ®_qsb_max_writes,
[QSB_MAX_READS] = ®_qsb_max_reads,
[FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = ®_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt,
[AGGR_FORCE_CLOSE] = ®_aggr_force_close,
[IPA_TX_CFG] = ®_ipa_tx_cfg,
[FLAVOR_0] = ®_flavor_0,
[IDLE_INDICATION_CFG] = ®_idle_indication_cfg,
[QTIME_TIMESTAMP_CFG] = ®_qtime_timestamp_cfg,
[TIMERS_XO_CLK_DIV_CFG] = ®_timers_xo_clk_div_cfg,
[TIMERS_PULSE_GRAN_CFG] = ®_timers_pulse_gran_cfg,
[SRC_RSRC_GRP_01_RSRC_TYPE] = ®_src_rsrc_grp_01_rsrc_type,
[SRC_RSRC_GRP_23_RSRC_TYPE] = ®_src_rsrc_grp_23_rsrc_type,
[DST_RSRC_GRP_01_RSRC_TYPE] = ®_dst_rsrc_grp_01_rsrc_type,
[DST_RSRC_GRP_23_RSRC_TYPE] = ®_dst_rsrc_grp_23_rsrc_type,
[ENDP_INIT_CFG] = ®_endp_init_cfg,
[ENDP_INIT_NAT] = ®_endp_init_nat,
[ENDP_INIT_HDR] = ®_endp_init_hdr,
[ENDP_INIT_HDR_EXT] = ®_endp_init_hdr_ext,
[ENDP_INIT_HDR_METADATA_MASK] = ®_endp_init_hdr_metadata_mask,
[ENDP_INIT_MODE] = ®_endp_init_mode,
[ENDP_INIT_AGGR] = ®_endp_init_aggr,
[ENDP_INIT_HOL_BLOCK_EN] = ®_endp_init_hol_block_en,
[ENDP_INIT_HOL_BLOCK_TIMER] = ®_endp_init_hol_block_timer,
[ENDP_INIT_DEAGGR] = ®_endp_init_deaggr,
[ENDP_INIT_RSRC_GRP] = ®_endp_init_rsrc_grp,
[ENDP_INIT_SEQ] = ®_endp_init_seq,
[ENDP_STATUS] = ®_endp_status,
[ENDP_FILTER_ROUTER_HSH_CFG] = ®_endp_filter_router_hsh_cfg,
[IPA_IRQ_STTS] = ®_ipa_irq_stts,
[IPA_IRQ_EN] = ®_ipa_irq_en,
[IPA_IRQ_CLR] = ®_ipa_irq_clr,
[IPA_IRQ_UC] = ®_ipa_irq_uc,
[IRQ_SUSPEND_INFO] = ®_irq_suspend_info,
[IRQ_SUSPEND_EN] = ®_irq_suspend_en,
[IRQ_SUSPEND_CLR] = ®_irq_suspend_clr,
};
const struct regs ipa_regs_v4_11 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/ipa_reg-v4.11.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
static const u32 reg_ch_c_cntxt_0_fmask[] = {
[CHTYPE_PROTOCOL] = GENMASK(2, 0),
[CHTYPE_DIR] = BIT(3),
[CH_EE] = GENMASK(7, 4),
[CHID] = GENMASK(12, 8),
[CHTYPE_PROTOCOL_MSB] = BIT(13),
[ERINDEX] = GENMASK(18, 14),
/* Bit 19 reserved */
[CHSTATE] = GENMASK(23, 20),
[ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_cntxt_1_fmask[] = {
[CH_R_LENGTH] = GENMASK(19, 0),
/* Bits 20-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ch_c_qos_fmask[] = {
[WRR_WEIGHT] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_PREFETCH] = BIT(8),
[USE_DB_ENG] = BIT(9),
[PREFETCH_MODE] = GENMASK(13, 10),
/* Bits 14-15 reserved */
[EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
[DB_IN_BYTES] = BIT(24),
/* Bits 25-31 reserved */
};
REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_error_log_fmask[] = {
[ERR_ARG3] = GENMASK(3, 0),
[ERR_ARG2] = GENMASK(7, 4),
[ERR_ARG1] = GENMASK(11, 8),
[ERR_CODE] = GENMASK(15, 12),
/* Bits 16-18 reserved */
[ERR_VIRT_IDX] = GENMASK(23, 19),
[ERR_TYPE] = GENMASK(27, 24),
[ERR_EE] = GENMASK(31, 28),
};
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
[EV_CHTYPE] = GENMASK(3, 0),
[EV_EE] = GENMASK(7, 4),
[EV_EVCHID] = GENMASK(15, 8),
[EV_INTYPE] = BIT(16),
/* Bits 17-19 reserved */
[EV_CHSTATE] = GENMASK(23, 20),
[EV_ELEMENT_SIZE] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
[R_LENGTH] = GENMASK(15, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
[EV_MODT] = GENMASK(15, 0),
[EV_MODC] = GENMASK(23, 16),
[EV_MOD_CNT] = GENMASK(31, 24),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
static const u32 reg_gsi_status_fmask[] = {
[ENABLED] = BIT(0),
/* Bits 1-31 reserved */
};
REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
static const u32 reg_ch_cmd_fmask[] = {
[CH_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[CH_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
static const u32 reg_ev_ch_cmd_fmask[] = {
[EV_CHID] = GENMASK(7, 0),
/* Bits 8-23 reserved */
[EV_OPCODE] = GENMASK(31, 24),
};
REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
static const u32 reg_generic_cmd_fmask[] = {
[GENERIC_OPCODE] = GENMASK(4, 0),
[GENERIC_CHID] = GENMASK(9, 5),
[GENERIC_EE] = GENMASK(13, 10),
/* Bits 14-31 reserved */
};
REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
static const u32 reg_hw_param_2_fmask[] = {
[IRAM_SIZE] = GENMASK(2, 0),
[NUM_CH_PER_EE] = GENMASK(7, 3),
[NUM_EV_PER_EE] = GENMASK(12, 8),
[GSI_CH_PEND_TRANSLATE] = BIT(13),
[GSI_CH_FULL_LOGIC] = BIT(14),
[GSI_USE_SDMA] = BIT(15),
[GSI_SDMA_N_INT] = GENMASK(18, 16),
[GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
[GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
[GSI_USE_RD_WR_ENG] = BIT(30),
[GSI_USE_INTER_EE] = BIT(31),
};
REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x00012098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001209c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x000120a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x000120a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x000120b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x000120c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_intset_fmask[] = {
[INTYPE] = BIT(0)
/* Bits 1-31 reserved */
};
REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
static const u32 reg_cntxt_scratch_0_fmask[] = {
[INTER_EE_RESULT] = GENMASK(2, 0),
/* Bits 3-4 reserved */
[GENERIC_EE_RESULT] = GENMASK(7, 5),
/* Bits 8-31 reserved */
};
REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = ®_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = ®_ch_c_cntxt_0,
[CH_C_CNTXT_1] = ®_ch_c_cntxt_1,
[CH_C_CNTXT_2] = ®_ch_c_cntxt_2,
[CH_C_CNTXT_3] = ®_ch_c_cntxt_3,
[CH_C_QOS] = ®_ch_c_qos,
[CH_C_SCRATCH_0] = ®_ch_c_scratch_0,
[CH_C_SCRATCH_1] = ®_ch_c_scratch_1,
[CH_C_SCRATCH_2] = ®_ch_c_scratch_2,
[CH_C_SCRATCH_3] = ®_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = ®_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = ®_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = ®_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = ®_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = ®_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = ®_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = ®_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = ®_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = ®_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = ®_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = ®_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = ®_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = ®_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = ®_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = ®_ev_ch_e_doorbell_0,
[GSI_STATUS] = ®_gsi_status,
[CH_CMD] = ®_ch_cmd,
[EV_CH_CMD] = ®_ev_ch_cmd,
[GENERIC_CMD] = ®_generic_cmd,
[HW_PARAM_2] = ®_hw_param_2,
[CNTXT_TYPE_IRQ] = ®_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = ®_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = ®_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = ®_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = ®_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = ®_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = ®_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = ®_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = ®_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = ®_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = ®_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = ®_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = ®_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = ®_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = ®_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = ®_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = ®_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = ®_cntxt_intset,
[ERROR_LOG] = ®_error_log,
[ERROR_LOG_CLR] = ®_error_log_clr,
[CNTXT_SCRATCH_0] = ®_cntxt_scratch_0,
};
const struct regs gsi_regs_v4_9 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
| linux-master | drivers/net/ipa/reg/gsi_reg-v4.9.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.5 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UNUSED_0 = 0,
IPA_RSRC_GROUP_SRC_UL_DL,
IPA_RSRC_GROUP_SRC_UNUSED_2,
IPA_RSRC_GROUP_SRC_UNUSED_3,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UNUSED_0 = 0,
IPA_RSRC_GROUP_DST_UL_DL_DPL,
IPA_RSRC_GROUP_DST_UNUSED_2,
IPA_RSRC_GROUP_DST_UNUSED_3,
IPA_RSRC_GROUP_DST_UC,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.5 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 120,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 8,
.max_reads = 12,
/* no outstanding read byte (beat) limit */
},
};
/* Endpoint configuration data for an SoC having IPA v4.5 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 9,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 10,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 1,
.endpoint_id = 14,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 21,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.5 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 11,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 18, .max = 18,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UNUSED_0] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UNUSED_2] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UNUSED_3] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 24, .max = 24,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.5 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 16, .max = 16,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 2, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
.min = 2, .max = 2,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 1, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_3] = {
.min = 1, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UC] = {
.min = 0, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.5 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.5 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
},
};
/* Memory configuration data for an SoC having IPA v4.5 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 450000, /* 450 MBps */
.average_bandwidth = 75000, /* 75 MBps (unused?) */
},
{
.name = "config",
.peak_bandwidth = 171400, /* 171.4 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.5 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.5 */
const struct ipa_data ipa_data_v4_5 = {
.version = IPA_VERSION_4_5,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v4.5.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_HDR_SECTORS,
IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v3.1 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL = 0,
IPA_RSRC_GROUP_SRC_DL,
IPA_RSRC_GROUP_SRC_DIAG,
IPA_RSRC_GROUP_SRC_DMA,
IPA_RSRC_GROUP_SRC_UNUSED,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL = 0,
IPA_RSRC_GROUP_DST_DL,
IPA_RSRC_GROUP_DST_DIAG_DPL,
IPA_RSRC_GROUP_DST_DMA,
IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL,
IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v3.1 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 8,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 2,
.max_reads = 8,
},
};
/* Endpoint data for an SoC having IPA v3.1 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 6,
.endpoint_id = 22,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 18,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 15,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 3,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 8,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_DL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_LAN_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 4,
.endpoint_id = 9,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 5,
.endpoint_id = 18,
.toward_ipa = false,
},
};
/* Source resource configuration data for an SoC having IPA v3.1 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 3, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 3, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 1, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 1, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 2, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 16, .max = 16,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 19, .max = 19,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 26, .max = 26,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 5, .max = 5, /* 3 downstream */
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 5, .max = 5, /* 7 downstream */
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 19, .max = 19,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 26, .max = 26,
},
.limits[IPA_RSRC_GROUP_SRC_DIAG] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
};
/* Destination resource configuration data for an SoC having IPA v3.1 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 3, .max = 3, /* 2 downstream */
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 3, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
.min = 1, .max = 1, /* 0 downstream */
},
/* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
.min = 3, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
.min = 0, .max = 255,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
.min = 0, .max = 255,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
.min = 1, .max = 1,
},
},
};
/* Resource configuration data for an SoC having IPA v3.1 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v3.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1424,
.canary_count = 0,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v3.1 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00002000,
};
/* Interconnect bandwidths are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 640000, /* 640 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
{
.name = "imem",
.peak_bandwidth = 640000, /* 640 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
/* Average bandwidth is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 80000, /* 80 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v3.1 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 16 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
.version = IPA_VERSION_3_1,
.backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v3.1.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019-2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.2 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.2 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 12,
/* no outstanding read byte (beat) limit */
},
};
/* Endpoint configuration data for an SoC having IPA v4.2 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 1,
.endpoint_id = 6,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 6,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 0,
.endpoint_id = 1,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 8,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_1_PASS_SKIP_LAST_UC,
.seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 3,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 6,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_COMMAND_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 1,
.endpoint_id = 5,
.toward_ipa = true,
},
[IPA_ENDPOINT_MODEM_LAN_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 3,
.endpoint_id = 11,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 4,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 10,
.toward_ipa = false,
},
};
/* Source resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 3, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 10, .max = 10,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 1,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 5, .max = 5,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 1, .max = 63,
},
},
};
/* Resource configuration data for an SoC having IPA v4.2 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0290,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0310,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0318,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0398,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x03a0,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0420,
.size = 0,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0428,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x04a8,
.size = 0x0140,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x05f0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x07f0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x09f8,
.size = 0x0050,
.canary_count = 2,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x0ab0,
.size = 0x0140,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x0bf0,
.size = 0x140c,
.canary_count = 0,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v4.2 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00002000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 465000, /* 465 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
/* Average bandwidth is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 68570, /* 68.570 MBps */
.average_bandwidth = 0, /* unused */
},
{
.name = "config",
.peak_bandwidth = 30000, /* 30 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.2 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.2 */
const struct ipa_data ipa_data_v4_2 = {
.version = IPA_VERSION_4_2,
/* backward_compat value is 0 */
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v4.2.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.9 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_DMA,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_DMA,
IPA_RSRC_GROUP_DST_UC,
IPA_RSRC_GROUP_DST_DRB_IP,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.9 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 120,
},
};
/* Endpoint configuration data for an SoC having IPA v4.9 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 6,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 11,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 12,
.endpoint_id = 20,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 16,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.9 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 12,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 12,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 20, .max = 20,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 2, .max = 2,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 3, .max = 3,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 38, .max = 38,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 4, .max = 4,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 0, .max = 4,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 0, .max = 4,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 4,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 30, .max = 30,
},
.limits[IPA_RSRC_GROUP_SRC_DMA] = {
.min = 8, .max = 8,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.9 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 9, .max = 9,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_UC] = {
.min = 1, .max = 1,
},
.limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
.min = 39, .max = 39,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DMA] = {
.min = 1, .max = 2,
},
.limits[IPA_RSRC_GROUP_DST_UC] = {
.min = 0, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.9 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.9 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
},
};
/* Memory configuration data for an SoC having IPA v4.9 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.9 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 60 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.9. */
const struct ipa_data ipa_data_v4_9 = {
.version = IPA_VERSION_4_9,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v4.9.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.7 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.7 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_UNUSED_1,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.7 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 120,
},
};
/* Endpoint configuration data for an SoC having IPA v4.7 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 14,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 14,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 18, .max = 18,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 2, .max = 2,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 15, .max = 15,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 7, .max = 7,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.7 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v4.7 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 450000, /* 450 MBps */
.average_bandwidth = 75000, /* 75 MBps (unused?) */
},
{
.name = "config",
.peak_bandwidth = 171400, /* 171.4 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.7 */
static const struct ipa_power_data ipa_power_data = {
/* XXX Downstream code says 150 MHz (DT SVS2), 60 MHz (code) */
.core_clock_rate = 100 * 1000 * 1000, /* Hz (150? 60?) */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.7 */
const struct ipa_data ipa_data_v4_7 = {
.version = IPA_VERSION_4_7,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v4.7.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v4.11 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_UNUSED_2,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
IPA_RSRC_GROUP_DST_UNUSED_1,
IPA_RSRC_GROUP_DST_DRB_IP,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.11 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 12,
.max_reads = 13,
.max_reads_beats = 120,
},
};
/* Endpoint configuration data for an SoC having IPA v4.11 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 7,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 14,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 2,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 7,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 32768,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 5,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 14,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 8,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v4.11 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 6, .max = 6,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 18, .max = 18,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 2, .max = 2,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 15, .max = 15,
},
},
};
/* Destination resource configuration data for an SoC having IPA v4.11 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 3, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
.min = 25, .max = 25,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 2, .max = 2,
},
},
};
/* Resource configuration data for an SoC having IPA v4.11 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v4.11 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
{
.id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v4.11 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
/* Average rate is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v4.11 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 60 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v4.11 */
const struct ipa_data ipa_data_v4_11 = {
.version = IPA_VERSION_4_11,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v4.11.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.0 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
IPA_RESOURCE_TYPE_DST_ULSO_SEGMENTS,
};
/* Resource groups used for an SoC having IPA v5.0 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL = 0,
IPA_RSRC_GROUP_SRC_DL,
IPA_RSRC_GROUP_SRC_UNUSED_2,
IPA_RSRC_GROUP_SRC_UNUSED_3,
IPA_RSRC_GROUP_SRC_URLLC,
IPA_RSRC_GROUP_SRC_U_RX_QC,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_UL = 0,
IPA_RSRC_GROUP_DST_DL,
IPA_RSRC_GROUP_DST_DMA,
IPA_RSRC_GROUP_DST_QDSS,
IPA_RSRC_GROUP_DST_CV2X,
IPA_RSRC_GROUP_DST_UC,
IPA_RSRC_GROUP_DST_DRB_IP,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v5.0 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 0,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 0,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 0,
.max_reads = 0, /* no limit (hardware max) */
.max_reads_beats = 0,
},
};
/* Endpoint configuration data for an SoC having IPA v5.0 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 12,
.endpoint_id = 14,
.toward_ipa = true,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 13,
.endpoint_id = 16,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 11,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 25,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 1,
.endpoint_id = 23,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 9,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_DL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 12,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 7,
.endpoint_id = 21,
.toward_ipa = false,
},
[IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 15,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
};
/* Source resource configuration data for an SoC having IPA v5.0 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 3, .max = 9,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 4, .max = 10,
},
.limits[IPA_RSRC_GROUP_SRC_URLLC] = {
.min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_U_RX_QC] = {
.min = 0, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 9, .max = 9,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 12, .max = 12,
},
.limits[IPA_RSRC_GROUP_SRC_URLLC] = {
.min = 10, .max = 10,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 9, .max = 9,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 24, .max = 24,
},
.limits[IPA_RSRC_GROUP_SRC_URLLC] = {
.min = 20, .max = 20,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_URLLC] = {
.min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_U_RX_QC] = {
.min = 0, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_UL] = {
.min = 22, .max = 22,
},
.limits[IPA_RSRC_GROUP_SRC_DL] = {
.min = 16, .max = 16,
},
.limits[IPA_RSRC_GROUP_SRC_URLLC] = {
.min = 16, .max = 16,
},
},
};
/* Destination resource configuration data for an SoC having IPA v5.0 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 6, .max = 6,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 5, .max = 5,
},
.limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
.min = 39, .max = 39,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 0, .max = 3,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 0, .max = 3,
},
},
[IPA_RESOURCE_TYPE_DST_ULSO_SEGMENTS] = {
.limits[IPA_RSRC_GROUP_DST_UL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_DL] = {
.min = 0, .max = 63,
},
},
};
/* Resource configuration data for an SoC having IPA v5.0 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v5.0 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x0000,
.size = 0x1000,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x1000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x1080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x1288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x1308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x1388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x1408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x1488,
.size = 0x0098,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x1528,
.size = 0x0098,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x15c8,
.size = 0x0098,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x1668,
.size = 0x0098,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x1708,
.size = 0x0240,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x1948,
.size = 0x01e0,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x1b40,
.size = 0x0b20,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x2660,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2868,
.size = 0x0060,
.canary_count = 2,
},
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x28c8,
.size = 0x0048,
.canary_count = 0,
},
{
.id = IPA_MEM_AP_V4_FILTER,
.offset = 0x2918,
.size = 0x0118,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_V6_FILTER,
.offset = 0x2aa0,
.size = 0x0228,
.canary_count = 0,
},
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x2cd0,
.size = 0x0ba0,
.canary_count = 2,
},
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x3870,
.size = 0x0020,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x3898,
.size = 0x0d48,
.canary_count = 2,
},
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x45e0,
.size = 0x0900,
.canary_count = 0,
},
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4ee8,
.size = 0x0100,
.canary_count = 2,
},
};
/* Memory configuration data for an SoC having IPA v5.0 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
.smem_id = 497,
.smem_size = 0x00009000,
};
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 1900000, /* 1.9 GBps */
.average_bandwidth = 600000, /* 600 MBps */
},
/* Average rate is unused for the next interconnect */
{
.name = "config",
.peak_bandwidth = 76800, /* 76.8 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v5.0 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 120 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v5.0. */
const struct ipa_data ipa_data_v5_0 = {
.version = IPA_VERSION_5_0,
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 11,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v5.0.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/log2.h>
#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
/* Source resource types; first must have value 0 */
IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
/* Destination resource types; first must have value 0 */
IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
IPA_RESOURCE_TYPE_DST_DPS_DMARS,
};
/* Resource groups used for an SoC having IPA v3.5.1 */
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_LWA_DL = 0,
IPA_RSRC_GROUP_SRC_UL_DL,
IPA_RSRC_GROUP_SRC_MHI_DMA,
IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
IPA_RSRC_GROUP_DST_LWA_DL = 0,
IPA_RSRC_GROUP_DST_UL_DL_DPL,
IPA_RSRC_GROUP_DST_UNUSED_2,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
.max_writes = 8,
.max_reads = 8,
},
[IPA_QSB_MASTER_PCIE] = {
.max_writes = 4,
.max_reads = 12,
},
};
/* Endpoint datdata for an SoC having IPA v3.5.1 */
static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
[IPA_ENDPOINT_AP_COMMAND_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 4,
.endpoint_id = 5,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 256,
.tlv_count = 20,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
.tx = {
.seq_type = IPA_SEQ_DMA,
},
},
},
},
[IPA_ENDPOINT_AP_LAN_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 5,
.endpoint_id = 9,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.aggregation = true,
.status_enable = true,
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_TX] = {
.ee_id = GSI_EE_AP,
.channel_id = 3,
.endpoint_id = 2,
.toward_ipa = true,
.channel = {
.tre_count = 512,
.event_count = 512,
.tlv_count = 16,
},
.endpoint = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
.checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
},
},
},
[IPA_ENDPOINT_AP_MODEM_RX] = {
.ee_id = GSI_EE_AP,
.channel_id = 6,
.endpoint_id = 10,
.toward_ipa = false,
.channel = {
.tre_count = 256,
.event_count = 256,
.tlv_count = 8,
},
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
.checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
},
},
[IPA_ENDPOINT_MODEM_LAN_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 0,
.endpoint_id = 3,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_TX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 4,
.endpoint_id = 6,
.toward_ipa = true,
.endpoint = {
.filter_support = true,
},
},
[IPA_ENDPOINT_MODEM_AP_RX] = {
.ee_id = GSI_EE_MODEM,
.channel_id = 2,
.endpoint_id = 12,
.toward_ipa = false,
},
};
/* Source resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 10, .max = 10,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 10, .max = 10,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 12, .max = 12,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 8, .max = 8,
},
},
[IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_MHI_DMA] = {
.min = 0, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 0, .max = 63,
},
},
[IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
.min = 14, .max = 14,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
.min = 20, .max = 20,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 14, .max = 14,
},
},
};
/* Destination resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
.limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
.min = 4, .max = 4,
},
.limits[1] = {
.min = 4, .max = 4,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 3, .max = 3,
}
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
.limits[IPA_RSRC_GROUP_DST_LWA_DL] = {
.min = 2, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
.min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_DST_UNUSED_2] = {
.min = 1, .max = 2,
}
},
};
/* Resource configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_resource_data ipa_resource_data = {
.rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
.rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
.resource_src_count = ARRAY_SIZE(ipa_resource_src),
.resource_src = ipa_resource_src,
.resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
.resource_dst = ipa_resource_dst,
};
/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
{
.id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1024,
.canary_count = 0,
},
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x1c00,
.size = 0x0400,
.canary_count = 1,
},
};
/* Memory configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_mem_data ipa_mem_data = {
.local_count = ARRAY_SIZE(ipa_mem_local_data),
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
.smem_id = 497,
.smem_size = 0x00002000,
};
/* Interconnect bandwidths are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 80000, /* 80 MBps */
},
/* Average bandwidth is unused for the next two interconnects */
{
.name = "imem",
.peak_bandwidth = 350000, /* 350 MBps */
.average_bandwidth = 0, /* unused */
},
{
.name = "config",
.peak_bandwidth = 40000, /* 40 MBps */
.average_bandwidth = 0, /* unused */
},
};
/* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */
static const struct ipa_power_data ipa_power_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
.interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
.interconnect_data = ipa_interconnect_data,
};
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
.backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
BIT(BCR_TX_NOT_USING_BRESP) |
BIT(BCR_SUSPEND_L2_IRQ) |
BIT(BCR_HOLB_DROP_L2_IRQ) |
BIT(BCR_DUAL_TX),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.modem_route_count = 8,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
.power_data = &ipa_power_data,
};
| linux-master | drivers/net/ipa/data/ipa_data-v3.5.1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
*
* Copyright (C) 1998-2002 by Jes Sorensen, <[email protected]>.
*
* Thanks to Essential Communication for providing us with hardware
* and very comprehensive documentation without which I would not have
* been able to write this driver. A special thank you to John Gibbon
* for sorting out the legal issues, with the NDA, allowing the code to
* be released under the GPL.
*
* Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
* stupid bugs in my code.
*
* Softnet support and various other patches from Val Henson of
* ODS/Essential.
*
* PCI DMA mapping code partly based on work by Francois Romieu.
*/
#define DEBUG 1
#define RX_DMA_SKBUFF 1
#define PKT_COPY_THRESHOLD 512
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/hippidevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
#define rr_if_busy(dev) netif_queue_stopped(dev)
#define rr_if_running(dev) netif_running(dev)
#include "rrunner.h"
#define RUN_AT(x) (jiffies + (x))
MODULE_AUTHOR("Jes Sorensen <[email protected]>");
MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
MODULE_LICENSE("GPL");
static const char version[] =
"rrunner.c: v0.50 11/11/2002 Jes Sorensen ([email protected])\n";
static const struct net_device_ops rr_netdev_ops = {
.ndo_open = rr_open,
.ndo_stop = rr_close,
.ndo_siocdevprivate = rr_siocdevprivate,
.ndo_start_xmit = rr_start_xmit,
.ndo_set_mac_address = hippi_mac_addr,
};
/*
* Implementation notes:
*
* The DMA engine only allows for DMA within physical 64KB chunks of
* memory. The current approach of the driver (and stack) is to use
* linear blocks of memory for the skbuffs. However, as the data block
* is always the first part of the skb and skbs are 2^n aligned so we
* are guarantted to get the whole block within one 64KB align 64KB
* chunk.
*
* On the long term, relying on being able to allocate 64KB linear
* chunks of memory is not feasible and the skb handling code and the
* stack will need to know about I/O vectors or something similar.
*/
static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
static int version_disp;
u8 pci_latency;
struct rr_private *rrpriv;
void *tmpptr;
dma_addr_t ring_dma;
int ret = -ENOMEM;
dev = alloc_hippi_dev(sizeof(struct rr_private));
if (!dev)
goto out3;
ret = pci_enable_device(pdev);
if (ret) {
ret = -ENODEV;
goto out2;
}
rrpriv = netdev_priv(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
ret = pci_request_regions(pdev, "rrunner");
if (ret < 0)
goto out;
pci_set_drvdata(pdev, dev);
rrpriv->pci_dev = pdev;
spin_lock_init(&rrpriv->lock);
dev->netdev_ops = &rr_netdev_ops;
/* display version info if adapter is found */
if (!version_disp) {
/* set display flag to TRUE so that */
/* we only display this string ONCE */
version_disp = 1;
printk(version);
}
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency <= 0x58){
pci_latency = 0x58;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
}
pci_set_master(pdev);
printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
"at 0x%llx, irq %i, PCI latency %i\n", dev->name,
(unsigned long long)pci_resource_start(pdev, 0),
pdev->irq, pci_latency);
/*
* Remap the MMIO regs into kernel space.
*/
rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
if (!rrpriv->regs) {
printk(KERN_ERR "%s: Unable to map I/O register, "
"RoadRunner will be disabled.\n", dev->name);
ret = -EIO;
goto out;
}
tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
GFP_KERNEL);
rrpriv->tx_ring = tmpptr;
rrpriv->tx_ring_dma = ring_dma;
if (!tmpptr) {
ret = -ENOMEM;
goto out;
}
tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
GFP_KERNEL);
rrpriv->rx_ring = tmpptr;
rrpriv->rx_ring_dma = ring_dma;
if (!tmpptr) {
ret = -ENOMEM;
goto out;
}
tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
GFP_KERNEL);
rrpriv->evt_ring = tmpptr;
rrpriv->evt_ring_dma = ring_dma;
if (!tmpptr) {
ret = -ENOMEM;
goto out;
}
/*
* Don't access any register before this point!
*/
#ifdef __BIG_ENDIAN
writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
&rrpriv->regs->HostCtrl);
#endif
/*
* Need to add a case for little-endian 64-bit hosts here.
*/
rr_init(dev);
ret = register_netdev(dev);
if (ret)
goto out;
return 0;
out:
if (rrpriv->evt_ring)
dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
rrpriv->evt_ring_dma);
if (rrpriv->rx_ring)
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
rrpriv->rx_ring_dma);
if (rrpriv->tx_ring)
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
rrpriv->tx_ring_dma);
if (rrpriv->regs)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
out2:
free_netdev(dev);
out3:
return ret;
}
static void rr_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rr_private *rr = netdev_priv(dev);
if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
printk(KERN_ERR "%s: trying to unload running NIC\n",
dev->name);
writel(HALT_NIC, &rr->regs->HostCtrl);
}
unregister_netdev(dev);
dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
rr->evt_ring_dma);
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
rr->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
rr->tx_ring_dma);
pci_iounmap(pdev, rr->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
}
/*
* Commands are considered to be slow, thus there is no reason to
* inline this.
*/
static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
{
struct rr_regs __iomem *regs;
u32 idx;
regs = rrpriv->regs;
/*
* This is temporary - it will go away in the final version.
* We probably also want to make this function inline.
*/
if (readl(®s->HostCtrl) & NIC_HALTED){
printk("issuing command for halted NIC, code 0x%x, "
"HostCtrl %08x\n", cmd->code, readl(®s->HostCtrl));
if (readl(®s->Mode) & FATAL_ERR)
printk("error codes Fail1 %02x, Fail2 %02x\n",
readl(®s->Fail1), readl(®s->Fail2));
}
idx = rrpriv->info->cmd_ctrl.pi;
writel(*(u32*)(cmd), ®s->CmdRing[idx]);
wmb();
idx = (idx - 1) % CMD_RING_ENTRIES;
rrpriv->info->cmd_ctrl.pi = idx;
wmb();
if (readl(®s->Mode) & FATAL_ERR)
printk("error code %02x\n", readl(®s->Fail1));
}
/*
* Reset the board in a sensible manner. The NIC is already halted
* when we get here and a spin-lock is held.
*/
static int rr_reset(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 start_pc;
int i;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
rr_load_firmware(dev);
writel(0x01000000, ®s->TX_state);
writel(0xff800000, ®s->RX_state);
writel(0, ®s->AssistState);
writel(CLEAR_INTA, ®s->LocalCtrl);
writel(0x01, ®s->BrkPt);
writel(0, ®s->Timer);
writel(0, ®s->TimerRef);
writel(RESET_DMA, ®s->DmaReadState);
writel(RESET_DMA, ®s->DmaWriteState);
writel(0, ®s->DmaWriteHostHi);
writel(0, ®s->DmaWriteHostLo);
writel(0, ®s->DmaReadHostHi);
writel(0, ®s->DmaReadHostLo);
writel(0, ®s->DmaReadLen);
writel(0, ®s->DmaWriteLen);
writel(0, ®s->DmaWriteLcl);
writel(0, ®s->DmaWriteIPchecksum);
writel(0, ®s->DmaReadLcl);
writel(0, ®s->DmaReadIPchecksum);
writel(0, ®s->PciState);
#if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, ®s->Mode);
#elif (BITS_PER_LONG == 64)
writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, ®s->Mode);
#else
writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, ®s->Mode);
#endif
#if 0
/*
* Don't worry, this is just black magic.
*/
writel(0xdf000, ®s->RxBase);
writel(0xdf000, ®s->RxPrd);
writel(0xdf000, ®s->RxCon);
writel(0xce000, ®s->TxBase);
writel(0xce000, ®s->TxPrd);
writel(0xce000, ®s->TxCon);
writel(0, ®s->RxIndPro);
writel(0, ®s->RxIndCon);
writel(0, ®s->RxIndRef);
writel(0, ®s->TxIndPro);
writel(0, ®s->TxIndCon);
writel(0, ®s->TxIndRef);
writel(0xcc000, ®s->pad10[0]);
writel(0, ®s->DrCmndPro);
writel(0, ®s->DrCmndCon);
writel(0, ®s->DwCmndPro);
writel(0, ®s->DwCmndCon);
writel(0, ®s->DwCmndRef);
writel(0, ®s->DrDataPro);
writel(0, ®s->DrDataCon);
writel(0, ®s->DrDataRef);
writel(0, ®s->DwDataPro);
writel(0, ®s->DwDataCon);
writel(0, ®s->DwDataRef);
#endif
writel(0xffffffff, ®s->MbEvent);
writel(0, ®s->Event);
writel(0, ®s->TxPi);
writel(0, ®s->IpRxPi);
writel(0, ®s->EvtCon);
writel(0, ®s->EvtPrd);
rrpriv->info->evt_ctrl.pi = 0;
for (i = 0; i < CMD_RING_ENTRIES; i++)
writel(0, ®s->CmdRing[i]);
/*
* Why 32 ? is this not cache line size dependent?
*/
writel(RBURST_64|WBURST_64, ®s->PciState);
wmb();
start_pc = rr_read_eeprom_word(rrpriv,
offsetof(struct eeprom, rncd_info.FwStart));
#if (DEBUG > 1)
printk("%s: Executing firmware at address 0x%06x\n",
dev->name, start_pc);
#endif
writel(start_pc + 0x800, ®s->Pc);
wmb();
udelay(5);
writel(start_pc, ®s->Pc);
wmb();
return 0;
}
/*
* Read a string from the EEPROM.
*/
static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
unsigned long offset,
unsigned char *buf,
unsigned long length)
{
struct rr_regs __iomem *regs = rrpriv->regs;
u32 misc, io, host, i;
io = readl(®s->ExtIo);
writel(0, ®s->ExtIo);
misc = readl(®s->LocalCtrl);
writel(0, ®s->LocalCtrl);
host = readl(®s->HostCtrl);
writel(host | HALT_NIC, ®s->HostCtrl);
mb();
for (i = 0; i < length; i++){
writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
mb();
buf[i] = (readl(®s->WinData) >> 24) & 0xff;
mb();
}
writel(host, ®s->HostCtrl);
writel(misc, ®s->LocalCtrl);
writel(io, ®s->ExtIo);
mb();
return i;
}
/*
* Shortcut to read one word (4 bytes) out of the EEPROM and convert
* it to our CPU byte-order.
*/
static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
size_t offset)
{
__be32 word;
if ((rr_read_eeprom(rrpriv, offset,
(unsigned char *)&word, 4) == 4))
return be32_to_cpu(word);
return 0;
}
/*
* Write a string to the EEPROM.
*
* This is only called when the firmware is not running.
*/
static unsigned int write_eeprom(struct rr_private *rrpriv,
unsigned long offset,
unsigned char *buf,
unsigned long length)
{
struct rr_regs __iomem *regs = rrpriv->regs;
u32 misc, io, data, i, j, ready, error = 0;
io = readl(®s->ExtIo);
writel(0, ®s->ExtIo);
misc = readl(®s->LocalCtrl);
writel(ENABLE_EEPROM_WRITE, ®s->LocalCtrl);
mb();
for (i = 0; i < length; i++){
writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
mb();
data = buf[i] << 24;
/*
* Only try to write the data if it is not the same
* value already.
*/
if ((readl(®s->WinData) & 0xff000000) != data){
writel(data, ®s->WinData);
ready = 0;
j = 0;
mb();
while(!ready){
udelay(20);
if ((readl(®s->WinData) & 0xff000000) ==
data)
ready = 1;
mb();
if (j++ > 5000){
printk("data mismatch: %08x, "
"WinData %08x\n", data,
readl(®s->WinData));
ready = 1;
error = 1;
}
}
}
}
writel(misc, ®s->LocalCtrl);
writel(io, ®s->ExtIo);
mb();
return error;
}
static int rr_init(struct net_device *dev)
{
u8 addr[HIPPI_ALEN] __aligned(4);
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 sram_size, rev;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
rev = readl(®s->FwRev);
rrpriv->fw_rev = rev;
if (rev > 0x00020024)
printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
((rev >> 8) & 0xff), (rev & 0xff));
else if (rev >= 0x00020000) {
printk(" Firmware revision: %i.%i.%i (2.0.37 or "
"later is recommended)\n", (rev >> 16),
((rev >> 8) & 0xff), (rev & 0xff));
}else{
printk(" Firmware revision too old: %i.%i.%i, please "
"upgrade to 2.0.37 or later.\n",
(rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
}
#if (DEBUG > 2)
printk(" Maximum receive rings %i\n", readl(®s->MaxRxRng));
#endif
/*
* Read the hardware address from the eeprom. The HW address
* is not really necessary for HIPPI but awfully convenient.
* The pointer arithmetic to put it in dev_addr is ugly, but
* Donald Becker does it this way for the GigE version of this
* card and it's shorter and more portable than any
* other method I've seen. -VAL
*/
*(__be16 *)(addr) =
htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
*(__be32 *)(addr+2) =
htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
dev_addr_set(dev, addr);
printk(" MAC: %pM\n", dev->dev_addr);
sram_size = rr_read_eeprom_word(rrpriv, 8);
printk(" SRAM size 0x%06x\n", sram_size);
return 0;
}
static int rr_init1(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
unsigned long myjif, flags;
struct cmd cmd;
u32 hostctrl;
int ecode = 0;
short i;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
spin_lock_irqsave(&rrpriv->lock, flags);
hostctrl = readl(®s->HostCtrl);
writel(hostctrl | HALT_NIC | RR_CLEAR_INT, ®s->HostCtrl);
wmb();
if (hostctrl & PARITY_ERR){
printk("%s: Parity error halting NIC - this is serious!\n",
dev->name);
spin_unlock_irqrestore(&rrpriv->lock, flags);
ecode = -EFAULT;
goto error;
}
set_rxaddr(regs, rrpriv->rx_ctrl_dma);
set_infoaddr(regs, rrpriv->info_dma);
rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
rrpriv->info->evt_ctrl.mode = 0;
rrpriv->info->evt_ctrl.pi = 0;
set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
rrpriv->info->cmd_ctrl.mode = 0;
rrpriv->info->cmd_ctrl.pi = 15;
for (i = 0; i < CMD_RING_ENTRIES; i++) {
writel(0, ®s->CmdRing[i]);
}
for (i = 0; i < TX_RING_ENTRIES; i++) {
rrpriv->tx_ring[i].size = 0;
set_rraddr(&rrpriv->tx_ring[i].addr, 0);
rrpriv->tx_skbuff[i] = NULL;
}
rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
rrpriv->info->tx_ctrl.mode = 0;
rrpriv->info->tx_ctrl.pi = 0;
set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
/*
* Set dirty_tx before we start receiving interrupts, otherwise
* the interrupt handler might think it is supposed to process
* tx ints before we are up and running, which may cause a null
* pointer access in the int handler.
*/
rrpriv->tx_full = 0;
rrpriv->cur_rx = 0;
rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
rr_reset(dev);
/* Tuning values */
writel(0x5000, ®s->ConRetry);
writel(0x100, ®s->ConRetryTmr);
writel(0x500000, ®s->ConTmout);
writel(0x60, ®s->IntrTmr);
writel(0x500000, ®s->TxDataMvTimeout);
writel(0x200000, ®s->RxDataMvTimeout);
writel(0x80, ®s->WriteDmaThresh);
writel(0x80, ®s->ReadDmaThresh);
rrpriv->fw_running = 0;
wmb();
hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
writel(hostctrl, ®s->HostCtrl);
wmb();
spin_unlock_irqrestore(&rrpriv->lock, flags);
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb;
dma_addr_t addr;
rrpriv->rx_ring[i].mode = 0;
skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: Unable to allocate memory "
"for receive ring - halting NIC\n", dev->name);
ecode = -ENOMEM;
goto error;
}
rrpriv->rx_skbuff[i] = skb;
addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
/*
* Sanity test to see if we conflict with the DMA
* limitations of the Roadrunner.
*/
if ((((unsigned long)skb->data) & 0xfff) > ~65320)
printk("skb alloc error\n");
set_rraddr(&rrpriv->rx_ring[i].addr, addr);
rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
}
rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
rrpriv->rx_ctrl[4].mode = 8;
rrpriv->rx_ctrl[4].pi = 0;
wmb();
set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
udelay(1000);
/*
* Now start the FirmWare.
*/
cmd.code = C_START_FW;
cmd.ring = 0;
cmd.index = 0;
rr_issue_cmd(rrpriv, &cmd);
/*
* Give the FirmWare time to chew on the `get running' command.
*/
myjif = jiffies + 5 * HZ;
while (time_before(jiffies, myjif) && !rrpriv->fw_running)
cpu_relax();
netif_start_queue(dev);
return ecode;
error:
/*
* We might have gotten here because we are out of memory,
* make sure we release everything we allocated before failing
*/
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
dma_unmap_single(&rrpriv->pci_dev->dev,
rrpriv->rx_ring[i].addr.addrlo,
dev->mtu + HIPPI_HLEN,
DMA_FROM_DEVICE);
rrpriv->rx_ring[i].size = 0;
set_rraddr(&rrpriv->rx_ring[i].addr, 0);
dev_kfree_skb(skb);
rrpriv->rx_skbuff[i] = NULL;
}
}
return ecode;
}
/*
* All events are considered to be slow (RX/TX ints do not generate
* events) and are handled here, outside the main interrupt handler,
* to reduce the size of the handler.
*/
static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 tmp;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
while (prodidx != eidx){
switch (rrpriv->evt_ring[eidx].code){
case E_NIC_UP:
tmp = readl(®s->FwRev);
printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
"up and running\n", dev->name,
(tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
rrpriv->fw_running = 1;
writel(RX_RING_ENTRIES - 1, ®s->IpRxPi);
wmb();
break;
case E_LINK_ON:
printk(KERN_INFO "%s: Optical link ON\n", dev->name);
break;
case E_LINK_OFF:
printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
break;
case E_RX_IDLE:
printk(KERN_WARNING "%s: RX data not moving\n",
dev->name);
goto drop;
case E_WATCHDOG:
printk(KERN_INFO "%s: The watchdog is here to see "
"us\n", dev->name);
break;
case E_INTERN_ERR:
printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_HOST_ERR:
printk(KERN_ERR "%s: Host software error\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
/*
* TX events.
*/
case E_CON_REJ:
printk(KERN_WARNING "%s: Connection rejected\n",
dev->name);
dev->stats.tx_aborted_errors++;
break;
case E_CON_TMOUT:
printk(KERN_WARNING "%s: Connection timeout\n",
dev->name);
break;
case E_DISC_ERR:
printk(KERN_WARNING "%s: HIPPI disconnect error\n",
dev->name);
dev->stats.tx_aborted_errors++;
break;
case E_INT_PRTY:
printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_IDLE:
printk(KERN_WARNING "%s: Transmitter idle\n",
dev->name);
break;
case E_TX_LINK_DROP:
printk(KERN_WARNING "%s: Link lost during transmit\n",
dev->name);
dev->stats.tx_aborted_errors++;
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_INV_RNG:
printk(KERN_ERR "%s: Invalid send ring block\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_INV_BUF:
printk(KERN_ERR "%s: Invalid send buffer address\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_INV_DSC:
printk(KERN_ERR "%s: Invalid descriptor address\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
/*
* RX events.
*/
case E_RX_RNG_OUT:
printk(KERN_INFO "%s: Receive ring full\n", dev->name);
break;
case E_RX_PAR_ERR:
printk(KERN_WARNING "%s: Receive parity error\n",
dev->name);
goto drop;
case E_RX_LLRC_ERR:
printk(KERN_WARNING "%s: Receive LLRC error\n",
dev->name);
goto drop;
case E_PKT_LN_ERR:
printk(KERN_WARNING "%s: Receive packet length "
"error\n", dev->name);
goto drop;
case E_DTA_CKSM_ERR:
printk(KERN_WARNING "%s: Data checksum error\n",
dev->name);
goto drop;
case E_SHT_BST:
printk(KERN_WARNING "%s: Unexpected short burst "
"error\n", dev->name);
goto drop;
case E_STATE_ERR:
printk(KERN_WARNING "%s: Recv. state transition"
" error\n", dev->name);
goto drop;
case E_UNEXP_DATA:
printk(KERN_WARNING "%s: Unexpected data error\n",
dev->name);
goto drop;
case E_LST_LNK_ERR:
printk(KERN_WARNING "%s: Link lost error\n",
dev->name);
goto drop;
case E_FRM_ERR:
printk(KERN_WARNING "%s: Framing Error\n",
dev->name);
goto drop;
case E_FLG_SYN_ERR:
printk(KERN_WARNING "%s: Flag sync. lost during "
"packet\n", dev->name);
goto drop;
case E_RX_INV_BUF:
printk(KERN_ERR "%s: Invalid receive buffer "
"address\n", dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_RX_INV_DSC:
printk(KERN_ERR "%s: Invalid receive descriptor "
"address\n", dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_RNG_BLK:
printk(KERN_ERR "%s: Invalid ring block\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
drop:
/* Label packet to be dropped.
* Actual dropping occurs in rx
* handling.
*
* The index of packet we get to drop is
* the index of the packet following
* the bad packet. -kbf
*/
{
u16 index = rrpriv->evt_ring[eidx].index;
index = (index + (RX_RING_ENTRIES - 1)) %
RX_RING_ENTRIES;
rrpriv->rx_ring[index].mode |=
(PACKET_BAD | PACKET_END);
}
break;
default:
printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
dev->name, rrpriv->evt_ring[eidx].code);
}
eidx = (eidx + 1) % EVT_RING_ENTRIES;
}
rrpriv->info->evt_ctrl.pi = eidx;
wmb();
return eidx;
}
static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct rr_regs __iomem *regs = rrpriv->regs;
do {
struct rx_desc *desc;
u32 pkt_len;
desc = &(rrpriv->rx_ring[index]);
pkt_len = desc->size;
#if (DEBUG > 2)
printk("index %i, rxlimit %i\n", index, rxlimit);
printk("len %x, mode %x\n", pkt_len, desc->mode);
#endif
if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
dev->stats.rx_dropped++;
goto defer;
}
if (pkt_len > 0){
struct sk_buff *skb, *rx_skb;
rx_skb = rrpriv->rx_skbuff[index];
if (pkt_len < PKT_COPY_THRESHOLD) {
skb = alloc_skb(pkt_len, GFP_ATOMIC);
if (skb == NULL){
printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
dev->stats.rx_dropped++;
goto defer;
} else {
dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
desc->addr.addrlo,
pkt_len,
DMA_FROM_DEVICE);
skb_put_data(skb, rx_skb->data,
pkt_len);
dma_sync_single_for_device(&rrpriv->pci_dev->dev,
desc->addr.addrlo,
pkt_len,
DMA_FROM_DEVICE);
}
}else{
struct sk_buff *newskb;
newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
GFP_ATOMIC);
if (newskb){
dma_addr_t addr;
dma_unmap_single(&rrpriv->pci_dev->dev,
desc->addr.addrlo,
dev->mtu + HIPPI_HLEN,
DMA_FROM_DEVICE);
skb = rx_skb;
skb_put(skb, pkt_len);
rrpriv->rx_skbuff[index] = newskb;
addr = dma_map_single(&rrpriv->pci_dev->dev,
newskb->data,
dev->mtu + HIPPI_HLEN,
DMA_FROM_DEVICE);
set_rraddr(&desc->addr, addr);
} else {
printk("%s: Out of memory, deferring "
"packet\n", dev->name);
dev->stats.rx_dropped++;
goto defer;
}
}
skb->protocol = hippi_type_trans(skb, dev);
netif_rx(skb); /* send it up */
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
defer:
desc->mode = 0;
desc->size = dev->mtu + HIPPI_HLEN;
if ((index & 7) == 7)
writel(index, ®s->IpRxPi);
index = (index + 1) % RX_RING_ENTRIES;
} while(index != rxlimit);
rrpriv->cur_rx = index;
wmb();
}
static irqreturn_t rr_interrupt(int irq, void *dev_id)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
struct net_device *dev = (struct net_device *)dev_id;
u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
if (!(readl(®s->HostCtrl) & RR_INT))
return IRQ_NONE;
spin_lock(&rrpriv->lock);
prodidx = readl(®s->EvtPrd);
txcsmr = (prodidx >> 8) & 0xff;
rxlimit = (prodidx >> 16) & 0xff;
prodidx &= 0xff;
#if (DEBUG > 2)
printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
prodidx, rrpriv->info->evt_ctrl.pi);
#endif
/*
* Order here is important. We must handle events
* before doing anything else in order to catch
* such things as LLRC errors, etc -kbf
*/
eidx = rrpriv->info->evt_ctrl.pi;
if (prodidx != eidx)
eidx = rr_handle_event(dev, prodidx, eidx);
rxindex = rrpriv->cur_rx;
if (rxindex != rxlimit)
rx_int(dev, rxlimit, rxindex);
txcon = rrpriv->dirty_tx;
if (txcsmr != txcon) {
do {
/* Due to occational firmware TX producer/consumer out
* of sync. error need to check entry in ring -kbf
*/
if(rrpriv->tx_skbuff[txcon]){
struct tx_desc *desc;
struct sk_buff *skb;
desc = &(rrpriv->tx_ring[txcon]);
skb = rrpriv->tx_skbuff[txcon];
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dma_unmap_single(&rrpriv->pci_dev->dev,
desc->addr.addrlo, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
rrpriv->tx_skbuff[txcon] = NULL;
desc->size = 0;
set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
desc->mode = 0;
}
txcon = (txcon + 1) % TX_RING_ENTRIES;
} while (txcsmr != txcon);
wmb();
rrpriv->dirty_tx = txcon;
if (rrpriv->tx_full && rr_if_busy(dev) &&
(((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
!= rrpriv->dirty_tx)){
rrpriv->tx_full = 0;
netif_wake_queue(dev);
}
}
eidx |= ((txcsmr << 8) | (rxlimit << 16));
writel(eidx, ®s->EvtCon);
wmb();
spin_unlock(&rrpriv->lock);
return IRQ_HANDLED;
}
static inline void rr_raz_tx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < TX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->tx_skbuff[i];
if (skb) {
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
dma_unmap_single(&rrpriv->pci_dev->dev,
desc->addr.addrlo, skb->len,
DMA_TO_DEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->tx_skbuff[i] = NULL;
}
}
}
static inline void rr_raz_rx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
dma_unmap_single(&rrpriv->pci_dev->dev,
desc->addr.addrlo,
dev->mtu + HIPPI_HLEN,
DMA_FROM_DEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->rx_skbuff[i] = NULL;
}
}
}
static void rr_timer(struct timer_list *t)
{
struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
struct rr_regs __iomem *regs = rrpriv->regs;
unsigned long flags;
if (readl(®s->HostCtrl) & NIC_HALTED){
printk("%s: Restarting nic\n", dev->name);
memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
memset(rrpriv->info, 0, sizeof(struct rr_info));
wmb();
rr_raz_tx(rrpriv, dev);
rr_raz_rx(rrpriv, dev);
if (rr_init1(dev)) {
spin_lock_irqsave(&rrpriv->lock, flags);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
spin_unlock_irqrestore(&rrpriv->lock, flags);
}
}
rrpriv->timer.expires = RUN_AT(5*HZ);
add_timer(&rrpriv->timer);
}
static int rr_open(struct net_device *dev)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct pci_dev *pdev = rrpriv->pci_dev;
struct rr_regs __iomem *regs;
int ecode = 0;
unsigned long flags;
dma_addr_t dma_addr;
regs = rrpriv->regs;
if (rrpriv->fw_rev < 0x00020000) {
printk(KERN_WARNING "%s: trying to configure device with "
"obsolete firmware\n", dev->name);
ecode = -EBUSY;
goto error;
}
rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
256 * sizeof(struct ring_ctrl),
&dma_addr, GFP_KERNEL);
if (!rrpriv->rx_ctrl) {
ecode = -ENOMEM;
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
&dma_addr, GFP_KERNEL);
if (!rrpriv->info) {
ecode = -ENOMEM;
goto error;
}
rrpriv->info_dma = dma_addr;
wmb();
spin_lock_irqsave(&rrpriv->lock, flags);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
readl(®s->HostCtrl);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
dev->name, pdev->irq);
ecode = -EAGAIN;
goto error;
}
if ((ecode = rr_init1(dev)))
goto error;
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
timer_setup(&rrpriv->timer, rr_timer, 0);
rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
add_timer(&rrpriv->timer);
netif_start_queue(dev);
return ecode;
error:
spin_lock_irqsave(&rrpriv->lock, flags);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (rrpriv->info) {
dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
rrpriv->info, rrpriv->info_dma);
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}
netif_stop_queue(dev);
return ecode;
}
static void rr_dump(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 index, cons;
short i;
int len;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
printk("%s: dumping NIC TX rings\n", dev->name);
printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
readl(®s->RxPrd), readl(®s->TxPrd),
readl(®s->EvtPrd), readl(®s->TxPi),
rrpriv->info->tx_ctrl.pi);
printk("Error code 0x%x\n", readl(®s->Fail1));
index = (((readl(®s->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
cons = rrpriv->dirty_tx;
printk("TX ring index %i, TX consumer %i\n",
index, cons);
if (rrpriv->tx_skbuff[index]){
len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
for (i = 0; i < len; i++){
if (!(i & 7))
printk("\n");
printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
}
printk("\n");
}
if (rrpriv->tx_skbuff[cons]){
len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
rrpriv->tx_ring[cons].mode,
rrpriv->tx_ring[cons].size,
(unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
rrpriv->tx_skbuff[cons]->data,
(unsigned int)rrpriv->tx_skbuff[cons]->truesize);
for (i = 0; i < len; i++){
if (!(i & 7))
printk("\n");
printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
}
printk("\n");
}
printk("dumping TX ring info:\n");
for (i = 0; i < TX_RING_ENTRIES; i++)
printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
rrpriv->tx_ring[i].mode,
rrpriv->tx_ring[i].size,
(unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
}
static int rr_close(struct net_device *dev)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct rr_regs __iomem *regs = rrpriv->regs;
struct pci_dev *pdev = rrpriv->pci_dev;
unsigned long flags;
u32 tmp;
short i;
netif_stop_queue(dev);
/*
* Lock to make sure we are not cleaning up while another CPU
* is handling interrupts.
*/
spin_lock_irqsave(&rrpriv->lock, flags);
tmp = readl(®s->HostCtrl);
if (tmp & NIC_HALTED){
printk("%s: NIC already halted\n", dev->name);
rr_dump(dev);
}else{
tmp |= HALT_NIC | RR_CLEAR_INT;
writel(tmp, ®s->HostCtrl);
readl(®s->HostCtrl);
}
rrpriv->fw_running = 0;
spin_unlock_irqrestore(&rrpriv->lock, flags);
del_timer_sync(&rrpriv->timer);
spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, ®s->TxPi);
writel(0, ®s->IpRxPi);
writel(0, ®s->EvtCon);
writel(0, ®s->EvtPrd);
for (i = 0; i < CMD_RING_ENTRIES; i++)
writel(0, ®s->CmdRing[i]);
rrpriv->info->tx_ctrl.entries = 0;
rrpriv->info->cmd_ctrl.pi = 0;
rrpriv->info->evt_ctrl.pi = 0;
rrpriv->rx_ctrl[4].entries = 0;
rr_raz_tx(rrpriv, dev);
rr_raz_rx(rrpriv, dev);
dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
rrpriv->info_dma);
rrpriv->info = NULL;
spin_unlock_irqrestore(&rrpriv->lock, flags);
free_irq(pdev->irq, dev);
return 0;
}
static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct rr_regs __iomem *regs = rrpriv->regs;
struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
struct ring_ctrl *txctrl;
unsigned long flags;
u32 index, len = skb->len;
u32 *ifield;
struct sk_buff *new_skb;
if (readl(®s->Mode) & FATAL_ERR)
printk("error codes Fail1 %02x, Fail2 %02x\n",
readl(®s->Fail1), readl(®s->Fail2));
/*
* We probably need to deal with tbusy here to prevent overruns.
*/
if (skb_headroom(skb) < 8){
printk("incoming skb too small - reallocating\n");
if (!(new_skb = dev_alloc_skb(len + 8))) {
dev_kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
skb_reserve(new_skb, 8);
skb_put(new_skb, len);
skb_copy_from_linear_data(skb, new_skb->data, len);
dev_kfree_skb(skb);
skb = new_skb;
}
ifield = skb_push(skb, 8);
ifield[0] = 0;
ifield[1] = hcb->ifield;
/*
* We don't need the lock before we are actually going to start
* fiddling with the control blocks.
*/
spin_lock_irqsave(&rrpriv->lock, flags);
txctrl = &rrpriv->info->tx_ctrl;
index = txctrl->pi;
rrpriv->tx_skbuff[index] = skb;
set_rraddr(&rrpriv->tx_ring[index].addr,
dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
txctrl->pi = (index + 1) % TX_RING_ENTRIES;
wmb();
writel(txctrl->pi, ®s->TxPi);
if (txctrl->pi == rrpriv->dirty_tx){
rrpriv->tx_full = 1;
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&rrpriv->lock, flags);
return NETDEV_TX_OK;
}
/*
* Read the firmware out of the EEPROM and put it into the SRAM
* (or from user space - later)
*
* This operation requires the NIC to be halted and is performed with
* interrupts disabled and with the spinlock hold.
*/
static int rr_load_firmware(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
size_t eptr, segptr;
int i, j;
u32 localctrl, sptr, len, tmp;
u32 p2len, p2size, nr_seg, revision, io, sram_size;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
if (dev->flags & IFF_UP)
return -EBUSY;
if (!(readl(®s->HostCtrl) & NIC_HALTED)){
printk("%s: Trying to load firmware to a running NIC.\n",
dev->name);
return -EBUSY;
}
localctrl = readl(®s->LocalCtrl);
writel(0, ®s->LocalCtrl);
writel(0, ®s->EvtPrd);
writel(0, ®s->RxPrd);
writel(0, ®s->TxPrd);
/*
* First wipe the entire SRAM, otherwise we might run into all
* kinds of trouble ... sigh, this took almost all afternoon
* to track down ;-(
*/
io = readl(®s->ExtIo);
writel(0, ®s->ExtIo);
sram_size = rr_read_eeprom_word(rrpriv, 8);
for (i = 200; i < sram_size / 4; i++){
writel(i * 4, ®s->WinBase);
mb();
writel(0, ®s->WinData);
mb();
}
writel(io, ®s->ExtIo);
mb();
eptr = rr_read_eeprom_word(rrpriv,
offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
eptr = ((eptr & 0x1fffff) >> 3);
p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
p2len = (p2len << 2);
p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
p2size = ((p2size & 0x1fffff) >> 3);
if ((eptr < p2size) || (eptr > (p2size + p2len))){
printk("%s: eptr is invalid\n", dev->name);
goto out;
}
revision = rr_read_eeprom_word(rrpriv,
offsetof(struct eeprom, manf.HeaderFmt));
if (revision != 1){
printk("%s: invalid firmware format (%i)\n",
dev->name, revision);
goto out;
}
nr_seg = rr_read_eeprom_word(rrpriv, eptr);
eptr +=4;
#if (DEBUG > 1)
printk("%s: nr_seg %i\n", dev->name, nr_seg);
#endif
for (i = 0; i < nr_seg; i++){
sptr = rr_read_eeprom_word(rrpriv, eptr);
eptr += 4;
len = rr_read_eeprom_word(rrpriv, eptr);
eptr += 4;
segptr = rr_read_eeprom_word(rrpriv, eptr);
segptr = ((segptr & 0x1fffff) >> 3);
eptr += 4;
#if (DEBUG > 1)
printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
dev->name, i, sptr, len, segptr);
#endif
for (j = 0; j < len; j++){
tmp = rr_read_eeprom_word(rrpriv, segptr);
writel(sptr, ®s->WinBase);
mb();
writel(tmp, ®s->WinData);
mb();
segptr += 4;
sptr += 4;
}
}
out:
writel(localctrl, ®s->LocalCtrl);
mb();
return 0;
}
static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd)
{
struct rr_private *rrpriv;
unsigned char *image, *oldimage;
unsigned long flags;
unsigned int i;
int error = -EOPNOTSUPP;
rrpriv = netdev_priv(dev);
switch(cmd){
case SIOCRRGFW:
if (!capable(CAP_SYS_RAWIO)){
return -EPERM;
}
image = kmalloc_array(EEPROM_WORDS, sizeof(u32), GFP_KERNEL);
if (!image)
return -ENOMEM;
if (rrpriv->fw_running){
printk("%s: Firmware already running\n", dev->name);
error = -EPERM;
goto gf_out;
}
spin_lock_irqsave(&rrpriv->lock, flags);
i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (i != EEPROM_BYTES){
printk(KERN_ERR "%s: Error reading EEPROM\n",
dev->name);
error = -EFAULT;
goto gf_out;
}
error = copy_to_user(data, image, EEPROM_BYTES);
if (error)
error = -EFAULT;
gf_out:
kfree(image);
return error;
case SIOCRRPFW:
if (!capable(CAP_SYS_RAWIO)){
return -EPERM;
}
image = memdup_user(data, EEPROM_BYTES);
if (IS_ERR(image))
return PTR_ERR(image);
oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
if (!oldimage) {
kfree(image);
return -ENOMEM;
}
if (rrpriv->fw_running){
printk("%s: Firmware already running\n", dev->name);
error = -EPERM;
goto wf_out;
}
printk("%s: Updating EEPROM firmware\n", dev->name);
spin_lock_irqsave(&rrpriv->lock, flags);
error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
if (error)
printk(KERN_ERR "%s: Error writing EEPROM\n",
dev->name);
i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (i != EEPROM_BYTES)
printk(KERN_ERR "%s: Error reading back EEPROM "
"image\n", dev->name);
error = memcmp(image, oldimage, EEPROM_BYTES);
if (error){
printk(KERN_ERR "%s: Error verifying EEPROM image\n",
dev->name);
error = -EFAULT;
}
wf_out:
kfree(oldimage);
kfree(image);
return error;
case SIOCRRID:
return put_user(0x52523032, (int __user *)data);
default:
return error;
}
}
static const struct pci_device_id rr_pci_tbl[] = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
static struct pci_driver rr_driver = {
.name = "rrunner",
.id_table = rr_pci_tbl,
.probe = rr_init_one,
.remove = rr_remove_one,
};
module_pci_driver(rr_driver);
| linux-master | drivers/net/hippi/rrunner.c |
/*
* ipddp.c: IP to Appletalk-IP Encapsulation driver for Linux
* Appletalk-IP to IP Decapsulation driver for Linux
*
* Authors:
* - DDP-IP Encap by: Bradford W. Johnson <[email protected]>
* - DDP-IP Decap by: Jay Schulist <[email protected]>
*
* Derived from:
* - Almost all code already existed in net/appletalk/ddp.c I just
* moved/reorginized it into a driver file. Original IP-over-DDP code
* was done by Bradford W. Johnson <[email protected]>
* - skeleton.c: A network driver outline for linux.
* Written 1993-94 by Donald Becker.
* - dummy.c: A dummy net driver. By Nick Holloway.
* - MacGate: A user space Daemon for Appletalk-IP Decap for
* Linux by Jay Schulist <[email protected]>
*
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/atalk.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <net/route.h>
#include <linux/uaccess.h>
#include "ipddp.h" /* Our stuff */
static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <[email protected]>\n";
static struct ipddp_route *ipddp_route_list;
static DEFINE_SPINLOCK(ipddp_route_lock);
#ifdef CONFIG_IPDDP_ENCAP
static int ipddp_mode = IPDDP_ENCAP;
#else
static int ipddp_mode = IPDDP_DECAP;
#endif
/* Index to functions, as function prototypes. */
static netdev_tx_t ipddp_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ipddp_create(struct ipddp_route *new_rt);
static int ipddp_delete(struct ipddp_route *rt);
static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd);
static const struct net_device_ops ipddp_netdev_ops = {
.ndo_start_xmit = ipddp_xmit,
.ndo_siocdevprivate = ipddp_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static struct net_device * __init ipddp_init(void)
{
static unsigned version_printed;
struct net_device *dev;
int err;
dev = alloc_etherdev(0);
if (!dev)
return ERR_PTR(-ENOMEM);
netif_keep_dst(dev);
strcpy(dev->name, "ipddp%d");
if (version_printed++ == 0)
printk(version);
/* Initialize the device structure. */
dev->netdev_ops = &ipddp_netdev_ops;
dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */
dev->mtu = 585;
dev->flags |= IFF_NOARP;
/*
* The worst case header we will need is currently a
* ethernet header (14 bytes) and a ddp header (sizeof ddpehdr+1)
* We send over SNAP so that takes another 8 bytes.
*/
dev->hard_header_len = 14+8+sizeof(struct ddpehdr)+1;
err = register_netdev(dev);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
}
/* Let the user now what mode we are in */
if(ipddp_mode == IPDDP_ENCAP)
printk("%s: Appletalk-IP Encap. mode by Bradford W. Johnson <[email protected]>\n",
dev->name);
if(ipddp_mode == IPDDP_DECAP)
printk("%s: Appletalk-IP Decap. mode by Jay Schulist <[email protected]>\n",
dev->name);
return dev;
}
/*
* Transmit LLAP/ELAP frame using aarp_send_ddp.
*/
static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rtable *rtable = skb_rtable(skb);
__be32 paddr = 0;
struct ddpehdr *ddp;
struct ipddp_route *rt;
struct atalk_addr *our_addr;
if (rtable->rt_gw_family == AF_INET)
paddr = rtable->rt_gw4;
spin_lock(&ipddp_route_lock);
/*
* Find appropriate route to use, based only on IP number.
*/
for(rt = ipddp_route_list; rt != NULL; rt = rt->next)
{
if(rt->ip == paddr)
break;
}
if(rt == NULL) {
spin_unlock(&ipddp_route_lock);
return NETDEV_TX_OK;
}
our_addr = atalk_find_dev_addr(rt->dev);
if(ipddp_mode == IPDDP_DECAP)
/*
* Pull off the excess room that should not be there.
* This is due to a hard-header problem. This is the
* quick fix for now though, till it breaks.
*/
skb_pull(skb, 35-(sizeof(struct ddpehdr)+1));
/* Create the Extended DDP header */
ddp = (struct ddpehdr *)skb->data;
ddp->deh_len_hops = htons(skb->len + (1<<10));
ddp->deh_sum = 0;
/*
* For Localtalk we need aarp_send_ddp to strip the
* long DDP header and place a shot DDP header on it.
*/
if(rt->dev->type == ARPHRD_LOCALTLK)
{
ddp->deh_dnet = 0; /* FIXME more hops?? */
ddp->deh_snet = 0;
}
else
{
ddp->deh_dnet = rt->at.s_net; /* FIXME more hops?? */
ddp->deh_snet = our_addr->s_net;
}
ddp->deh_dnode = rt->at.s_node;
ddp->deh_snode = our_addr->s_node;
ddp->deh_dport = 72;
ddp->deh_sport = 72;
*((__u8 *)(ddp+1)) = 22; /* ddp type = IP */
skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
aarp_send_ddp(rt->dev, skb, &rt->at, NULL);
spin_unlock(&ipddp_route_lock);
return NETDEV_TX_OK;
}
/*
* Create a routing entry. We first verify that the
* record does not already exist. If it does we return -EEXIST
*/
static int ipddp_create(struct ipddp_route *new_rt)
{
struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL)
return -ENOMEM;
rt->ip = new_rt->ip;
rt->at = new_rt->at;
rt->next = NULL;
if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) {
kfree(rt);
return -ENETUNREACH;
}
spin_lock_bh(&ipddp_route_lock);
if (__ipddp_find_route(rt)) {
spin_unlock_bh(&ipddp_route_lock);
kfree(rt);
return -EEXIST;
}
rt->next = ipddp_route_list;
ipddp_route_list = rt;
spin_unlock_bh(&ipddp_route_lock);
return 0;
}
/*
* Delete a route, we only delete a FULL match.
* If route does not exist we return -ENOENT.
*/
static int ipddp_delete(struct ipddp_route *rt)
{
struct ipddp_route **r = &ipddp_route_list;
struct ipddp_route *tmp;
spin_lock_bh(&ipddp_route_lock);
while((tmp = *r) != NULL)
{
if(tmp->ip == rt->ip &&
tmp->at.s_net == rt->at.s_net &&
tmp->at.s_node == rt->at.s_node)
{
*r = tmp->next;
spin_unlock_bh(&ipddp_route_lock);
kfree(tmp);
return 0;
}
r = &tmp->next;
}
spin_unlock_bh(&ipddp_route_lock);
return -ENOENT;
}
/*
* Find a routing entry, we only return a FULL match
*/
static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
{
struct ipddp_route *f;
for(f = ipddp_route_list; f != NULL; f = f->next)
{
if(f->ip == rt->ip &&
f->at.s_net == rt->at.s_net &&
f->at.s_node == rt->at.s_node)
return f;
}
return NULL;
}
static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct ipddp_route rcp, rcp2, *rp;
if (in_compat_syscall())
return -EOPNOTSUPP;
if(!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&rcp, data, sizeof(rcp)))
return -EFAULT;
switch(cmd)
{
case SIOCADDIPDDPRT:
return ipddp_create(&rcp);
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
rp = __ipddp_find_route(&rcp);
if (rp) {
memset(&rcp2, 0, sizeof(rcp2));
rcp2.ip = rp->ip;
rcp2.at = rp->at;
rcp2.flags = rp->flags;
}
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
if (copy_to_user(data, &rcp2,
sizeof(struct ipddp_route)))
return -EFAULT;
return 0;
} else
return -ENOENT;
case SIOCDELIPDDPRT:
return ipddp_delete(&rcp);
default:
return -EINVAL;
}
}
static struct net_device *dev_ipddp;
MODULE_LICENSE("GPL");
module_param(ipddp_mode, int, 0);
static int __init ipddp_init_module(void)
{
dev_ipddp = ipddp_init();
return PTR_ERR_OR_ZERO(dev_ipddp);
}
static void __exit ipddp_cleanup_module(void)
{
struct ipddp_route *p;
unregister_netdev(dev_ipddp);
free_netdev(dev_ipddp);
while (ipddp_route_list) {
p = ipddp_route_list->next;
kfree(ipddp_route_list);
ipddp_route_list = p;
}
}
module_init(ipddp_init_module);
module_exit(ipddp_cleanup_module);
| linux-master | drivers/net/appletalk/ipddp.c |
/* cops.c: LocalTalk driver for Linux.
*
* Authors:
* - Jay Schulist <[email protected]>
*
* With more than a little help from;
* - Alan Cox <[email protected]>
*
* Derived from:
* - skeleton.c: A network driver outline for linux.
* Written 1993-94 by Donald Becker.
* - ltpc.c: A driver for the LocalTalk PC card.
* Written by Bradford W. Johnson.
*
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Changes:
* 19970608 Alan Cox Allowed dual card type support
* Can set board type in insmod
* Hooks for cops_setup routine
* (not yet implemented).
* 19971101 Jay Schulist Fixes for multiple lt* devices.
* 19980607 Steven Hirsch Fixed the badly broken support
* for Tangent type cards. Only
* tested on Daystar LT200. Some
* cleanup of formatting and program
* logic. Added emacs 'local-vars'
* setup for Jay's brace style.
* 20000211 Alan Cox Cleaned up for softnet
*/
static const char *version =
"cops.c:v0.04 6/7/98 Jay Schulist <[email protected]>\n";
/*
* Sources:
* COPS Localtalk SDK. This provides almost all of the information
* needed.
*/
/*
* insmod/modprobe configurable stuff.
* - IO Port, choose one your card supports or 0 if you dare.
* - IRQ, also choose one your card supports or nothing and let
* the driver figure it out.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ltalk.h>
#include <linux/delay.h> /* For udelay() */
#include <linux/atalk.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <net/Space.h>
#include <asm/io.h>
#include <asm/dma.h>
#include "cops.h" /* Our Stuff */
#include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */
#include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */
/*
* The name of the card. Is used for messages and in the requests for
* io regions, irqs and dma channels
*/
static const char *cardname = "cops";
#ifdef CONFIG_COPS_DAYNA
static int board_type = DAYNA; /* Module exported */
#else
static int board_type = TANGENT;
#endif
static int io = 0x240; /* Default IO for Dayna */
static int irq = 5; /* Default IRQ */
/*
* COPS Autoprobe information.
* Right now if port address is right but IRQ is not 5 this will
* return a 5 no matter what since we will still get a status response.
* Need one more additional check to narrow down after we have gotten
* the ioaddr. But since only other possible IRQs is 3 and 4 so no real
* hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with
* this driver.
*
* This driver has 2 modes and they are: Dayna mode and Tangent mode.
* Each mode corresponds with the type of card. It has been found
* that there are 2 main types of cards and all other cards are
* the same and just have different names or only have minor differences
* such as more IO ports. As this driver is tested it will
* become more clear on exactly what cards are supported. The driver
* defaults to using Dayna mode. To change the drivers mode, simply
* select Dayna or Tangent mode when configuring the kernel.
*
* This driver should support:
* TANGENT driver mode:
* Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200,
* COPS LT-1
* DAYNA driver mode:
* Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95,
* Farallon PhoneNET PC III, Farallon PhoneNET PC II
* Other cards possibly supported mode unknown though:
* Dayna DL2000 (Full length), COPS LT/M (Micro-Channel)
*
* Cards NOT supported by this driver but supported by the ltpc.c
* driver written by Bradford W. Johnson <[email protected]>
* Farallon PhoneNET PC
* Original Apple LocalTalk PC card
*
* N.B.
*
* The Daystar Digital LT200 boards do not support interrupt-driven
* IO. You must specify 'irq=0xff' as a module parameter to invoke
* polled mode. I also believe that the port probing logic is quite
* dangerous at best and certainly hopeless for a polled card. Best to
* specify both. - Steve H.
*
*/
/*
* Zero terminated list of IO ports to probe.
*/
static unsigned int ports[] = {
0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260,
0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360,
0
};
/*
* Zero terminated list of IRQ ports to probe.
*/
static int cops_irqlist[] = {
5, 4, 3, 0
};
static struct timer_list cops_timer;
static struct net_device *cops_timer_dev;
/* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */
#ifndef COPS_DEBUG
#define COPS_DEBUG 1
#endif
static unsigned int cops_debug = COPS_DEBUG;
/* The number of low I/O ports used by the card. */
#define COPS_IO_EXTENT 8
/* Information that needs to be kept for each board. */
struct cops_local
{
int board; /* Holds what board type is. */
int nodeid; /* Set to 1 once have nodeid. */
unsigned char node_acquire; /* Node ID when acquired. */
struct atalk_addr node_addr; /* Full node address */
spinlock_t lock; /* RX/TX lock */
};
/* Index to functions, as function prototypes. */
static int cops_probe1 (struct net_device *dev, int ioaddr);
static int cops_irq (int ioaddr, int board);
static int cops_open (struct net_device *dev);
static int cops_jumpstart (struct net_device *dev);
static void cops_reset (struct net_device *dev, int sleep);
static void cops_load (struct net_device *dev);
static int cops_nodeid (struct net_device *dev, int nodeid);
static irqreturn_t cops_interrupt (int irq, void *dev_id);
static void cops_poll(struct timer_list *t);
static void cops_timeout(struct net_device *dev, unsigned int txqueue);
static void cops_rx (struct net_device *dev);
static netdev_tx_t cops_send_packet (struct sk_buff *skb,
struct net_device *dev);
static void set_multicast_list (struct net_device *dev);
static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int cops_close (struct net_device *dev);
static void cleanup_card(struct net_device *dev)
{
if (dev->irq)
free_irq(dev->irq, dev);
release_region(dev->base_addr, COPS_IO_EXTENT);
}
/*
* Check for a network adaptor of this type, and return '0' iff one exists.
* If dev->base_addr == 0, probe all likely locations.
* If dev->base_addr in [1..0x1ff], always return failure.
* otherwise go with what we pass in.
*/
struct net_device * __init cops_probe(int unit)
{
struct net_device *dev;
unsigned *port;
int base_addr;
int err = 0;
dev = alloc_ltalkdev(sizeof(struct cops_local));
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0) {
sprintf(dev->name, "lt%d", unit);
netdev_boot_setup_check(dev);
irq = dev->irq;
base_addr = dev->base_addr;
} else {
base_addr = dev->base_addr = io;
}
if (base_addr > 0x1ff) { /* Check a single specified location. */
err = cops_probe1(dev, base_addr);
} else if (base_addr != 0) { /* Don't probe at all. */
err = -ENXIO;
} else {
/* FIXME Does this really work for cards which generate irq?
* It's definitely N.G. for polled Tangent. sh
* Dayna cards don't autoprobe well at all, but if your card is
* at IRQ 5 & IO 0x240 we find it every time. ;) JS
*/
for (port = ports; *port && cops_probe1(dev, *port) < 0; port++)
;
if (!*port)
err = -ENODEV;
}
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
return dev;
out1:
cleanup_card(dev);
out:
free_netdev(dev);
return ERR_PTR(err);
}
static const struct net_device_ops cops_netdev_ops = {
.ndo_open = cops_open,
.ndo_stop = cops_close,
.ndo_start_xmit = cops_send_packet,
.ndo_tx_timeout = cops_timeout,
.ndo_do_ioctl = cops_ioctl,
.ndo_set_rx_mode = set_multicast_list,
};
/*
* This is the real probe routine. Linux has a history of friendly device
* probes on the ISA bus. A good device probes avoids doing writes, and
* verifies that the correct device exists and functions.
*/
static int __init cops_probe1(struct net_device *dev, int ioaddr)
{
struct cops_local *lp;
static unsigned version_printed;
int board = board_type;
int retval;
if(cops_debug && version_printed++ == 0)
printk("%s", version);
/* Grab the region so no one else tries to probe our ioports. */
if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name))
return -EBUSY;
/*
* Since this board has jumpered interrupts, allocate the interrupt
* vector now. There is no point in waiting since no other device
* can use the interrupt, and this marks the irq as busy. Jumpered
* interrupts are typically not reported by the boards, and we must
* used AutoIRQ to find them.
*/
dev->irq = irq;
switch (dev->irq)
{
case 0:
/* COPS AutoIRQ routine */
dev->irq = cops_irq(ioaddr, board);
if (dev->irq)
break;
fallthrough; /* Once no IRQ found on this port */
case 1:
retval = -EINVAL;
goto err_out;
/* Fixup for users that don't know that IRQ 2 is really
* IRQ 9, or don't know which one to set.
*/
case 2:
dev->irq = 9;
break;
/* Polled operation requested. Although irq of zero passed as
* a parameter tells the init routines to probe, we'll
* overload it to denote polled operation at runtime.
*/
case 0xff:
dev->irq = 0;
break;
default:
break;
}
dev->base_addr = ioaddr;
/* Reserve any actual interrupt. */
if (dev->irq) {
retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
if (retval)
goto err_out;
}
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
/* Copy local board variable to lp struct. */
lp->board = board;
dev->netdev_ops = &cops_netdev_ops;
dev->watchdog_timeo = HZ * 2;
/* Tell the user where the card is and what mode we're in. */
if(board==DAYNA)
printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n",
dev->name, cardname, ioaddr, dev->irq);
if(board==TANGENT) {
if(dev->irq)
printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n",
dev->name, cardname, ioaddr, dev->irq);
else
printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n",
dev->name, cardname, ioaddr);
}
return 0;
err_out:
release_region(ioaddr, COPS_IO_EXTENT);
return retval;
}
static int __init cops_irq (int ioaddr, int board)
{ /*
* This does not use the IRQ to determine where the IRQ is. We just
* assume that when we get a correct status response that it's the IRQ.
* This really just verifies the IO port but since we only have access
* to such a small number of IRQs (5, 4, 3) this is not bad.
* This will probably not work for more than one card.
*/
int irqaddr=0;
int i, x, status;
if(board==DAYNA)
{
outb(0, ioaddr+DAYNA_RESET);
inb(ioaddr+DAYNA_RESET);
mdelay(333);
}
if(board==TANGENT)
{
inb(ioaddr);
outb(0, ioaddr);
outb(0, ioaddr+TANG_RESET);
}
for(i=0; cops_irqlist[i] !=0; i++)
{
irqaddr = cops_irqlist[i];
for(x = 0xFFFF; x>0; x --) /* wait for response */
{
if(board==DAYNA)
{
status = (inb(ioaddr+DAYNA_CARD_STATUS)&3);
if(status == 1)
return irqaddr;
}
if(board==TANGENT)
{
if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0)
return irqaddr;
}
}
}
return 0; /* no IRQ found */
}
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*/
static int cops_open(struct net_device *dev)
{
struct cops_local *lp = netdev_priv(dev);
if(dev->irq==0)
{
/*
* I don't know if the Dayna-style boards support polled
* operation. For now, only allow it for Tangent.
*/
if(lp->board==TANGENT) /* Poll 20 times per second */
{
cops_timer_dev = dev;
timer_setup(&cops_timer, cops_poll, 0);
cops_timer.expires = jiffies + HZ/20;
add_timer(&cops_timer);
}
else
{
printk(KERN_WARNING "%s: No irq line set\n", dev->name);
return -EAGAIN;
}
}
cops_jumpstart(dev); /* Start the card up. */
netif_start_queue(dev);
return 0;
}
/*
* This allows for a dynamic start/restart of the entire card.
*/
static int cops_jumpstart(struct net_device *dev)
{
struct cops_local *lp = netdev_priv(dev);
/*
* Once the card has the firmware loaded and has acquired
* the nodeid, if it is reset it will lose it all.
*/
cops_reset(dev,1); /* Need to reset card before load firmware. */
cops_load(dev); /* Load the firmware. */
/*
* If atalkd already gave us a nodeid we will use that
* one again, else we wait for atalkd to give us a nodeid
* in cops_ioctl. This may cause a problem if someone steals
* our nodeid while we are resetting.
*/
if(lp->nodeid == 1)
cops_nodeid(dev,lp->node_acquire);
return 0;
}
static void tangent_wait_reset(int ioaddr)
{
int timeout=0;
while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
mdelay(1); /* Wait 1 second */
}
/*
* Reset the LocalTalk board.
*/
static void cops_reset(struct net_device *dev, int sleep)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr=dev->base_addr;
if(lp->board==TANGENT)
{
inb(ioaddr); /* Clear request latch. */
outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */
outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */
tangent_wait_reset(ioaddr);
outb(0, ioaddr+TANG_CLEAR_INT);
}
if(lp->board==DAYNA)
{
outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
inb(ioaddr+DAYNA_RESET); /* Clear the reset */
if (sleep)
msleep(333);
else
mdelay(333);
}
netif_wake_queue(dev);
}
static void cops_load (struct net_device *dev)
{
struct ifreq ifr;
struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru;
struct cops_local *lp = netdev_priv(dev);
int ioaddr=dev->base_addr;
int length, i = 0;
strcpy(ifr.ifr_name,"lt0");
/* Get card's firmware code and do some checks on it. */
#ifdef CONFIG_COPS_DAYNA
if(lp->board==DAYNA)
{
ltf->length=sizeof(ffdrv_code);
ltf->data=ffdrv_code;
}
else
#endif
#ifdef CONFIG_COPS_TANGENT
if(lp->board==TANGENT)
{
ltf->length=sizeof(ltdrv_code);
ltf->data=ltdrv_code;
}
else
#endif
{
printk(KERN_INFO "%s; unsupported board type.\n", dev->name);
return;
}
/* Check to make sure firmware is correct length. */
if(lp->board==DAYNA && ltf->length!=5983)
{
printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name);
return;
}
if(lp->board==TANGENT && ltf->length!=2501)
{
printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name);
return;
}
if(lp->board==DAYNA)
{
/*
* We must wait for a status response
* with the DAYNA board.
*/
while(++i<65536)
{
if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1)
break;
}
if(i==65536)
return;
}
/*
* Upload the firmware and kick. Byte-by-byte works nicely here.
*/
i=0;
length = ltf->length;
while(length--)
{
outb(ltf->data[i], ioaddr);
i++;
}
if(cops_debug > 1)
printk("%s: Uploaded firmware - %d bytes of %d bytes.\n",
dev->name, i, ltf->length);
if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */
outb(1, ioaddr+DAYNA_INT_CARD);
else /* Tell Tang to run the firmware code. */
inb(ioaddr);
if(lp->board==TANGENT)
{
tangent_wait_reset(ioaddr);
inb(ioaddr); /* Clear initial ready signal. */
}
}
/*
* Get the LocalTalk Nodeid from the card. We can suggest
* any nodeid 1-254. The card will try and get that exact
* address else we can specify 0 as the nodeid and the card
* will autoprobe for a nodeid.
*/
static int cops_nodeid (struct net_device *dev, int nodeid)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
if(lp->board == DAYNA)
{
/* Empty any pending adapter responses. */
while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
{
outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */
if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
cops_rx(dev); /* Kick any packets waiting. */
schedule();
}
outb(2, ioaddr); /* Output command packet length as 2. */
outb(0, ioaddr);
outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */
outb(nodeid, ioaddr); /* Suggest node address. */
}
if(lp->board == TANGENT)
{
/* Empty any pending adapter responses. */
while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
{
outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */
cops_rx(dev); /* Kick out packets waiting. */
schedule();
}
/* Not sure what Tangent does if nodeid picked is used. */
if(nodeid == 0) /* Seed. */
nodeid = jiffies&0xFF; /* Get a random try */
outb(2, ioaddr); /* Command length LSB */
outb(0, ioaddr); /* Command length MSB */
outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */
outb(nodeid, ioaddr); /* LAP address hint. */
outb(0xFF, ioaddr); /* Int. level to use */
}
lp->node_acquire=0; /* Set nodeid holder to 0. */
while(lp->node_acquire==0) /* Get *True* nodeid finally. */
{
outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */
if(lp->board == DAYNA)
{
if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
}
if(lp->board == TANGENT)
{
if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
}
schedule();
}
if(cops_debug > 1)
printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n",
dev->name, lp->node_acquire);
lp->nodeid=1; /* Set got nodeid to 1. */
return 0;
}
/*
* Poll the Tangent type cards to see if we have work.
*/
static void cops_poll(struct timer_list *unused)
{
int ioaddr, status;
int boguscount = 0;
struct net_device *dev = cops_timer_dev;
del_timer(&cops_timer);
if(dev == NULL)
return; /* We've been downed */
ioaddr = dev->base_addr;
do {
status=inb(ioaddr+TANG_CARD_STATUS);
if(status & TANG_RX_READY)
cops_rx(dev);
if(status & TANG_TX_READY)
netif_wake_queue(dev);
status = inb(ioaddr+TANG_CARD_STATUS);
} while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
/* poll 20 times per second */
cops_timer.expires = jiffies + HZ/20;
add_timer(&cops_timer);
}
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
static irqreturn_t cops_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cops_local *lp;
int ioaddr, status;
int boguscount = 0;
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
if(lp->board==DAYNA)
{
do {
outb(0, ioaddr + COPS_CLEAR_INT);
status=inb(ioaddr+DAYNA_CARD_STATUS);
if((status&0x03)==DAYNA_RX_REQUEST)
cops_rx(dev);
netif_wake_queue(dev);
} while(++boguscount < 20);
}
else
{
do {
status=inb(ioaddr+TANG_CARD_STATUS);
if(status & TANG_RX_READY)
cops_rx(dev);
if(status & TANG_TX_READY)
netif_wake_queue(dev);
status=inb(ioaddr+TANG_CARD_STATUS);
} while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
}
return IRQ_HANDLED;
}
/*
* We have a good packet(s), get it/them out of the buffers.
*/
static void cops_rx(struct net_device *dev)
{
int pkt_len = 0;
int rsp_type = 0;
struct sk_buff *skb = NULL;
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
int boguscount = 0;
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags);
if(lp->board==DAYNA)
{
outb(0, ioaddr); /* Send out Zero length. */
outb(0, ioaddr);
outb(DATA_READ, ioaddr); /* Send read command out. */
/* Wait for DMA to turn around. */
while(++boguscount<1000000)
{
barrier();
if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY)
break;
}
if(boguscount==1000000)
{
printk(KERN_WARNING "%s: DMA timed out.\n",dev->name);
spin_unlock_irqrestore(&lp->lock, flags);
return;
}
}
/* Get response length. */
pkt_len = inb(ioaddr);
pkt_len |= (inb(ioaddr) << 8);
/* Input IO code. */
rsp_type=inb(ioaddr);
/* Malloc up new buffer. */
skb = dev_alloc_skb(pkt_len);
if(skb == NULL)
{
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
dev->name);
dev->stats.rx_dropped++;
while(pkt_len--) /* Discard packet */
inb(ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
return;
}
skb->dev = dev;
skb_put(skb, pkt_len);
skb->protocol = htons(ETH_P_LOCALTALK);
insb(ioaddr, skb->data, pkt_len); /* Eat the Data */
if(lp->board==DAYNA)
outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */
spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
/* Check for bad response length */
if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE)
{
printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
dev->name, pkt_len);
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return;
}
/* Set nodeid and then get out. */
if(rsp_type == LAP_INIT_RSP)
{ /* Nodeid taken from received packet. */
lp->node_acquire = skb->data[0];
dev_kfree_skb_any(skb);
return;
}
/* One last check to make sure we have a good packet. */
if(rsp_type != LAP_RESPONSE)
{
printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return;
}
skb_reset_mac_header(skb); /* Point to entire packet. */
skb_pull(skb,3);
skb_reset_transport_header(skb); /* Point to data (Skip header). */
/* Update the counters. */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
/* Send packet to a higher place. */
netif_rx(skb);
}
static void cops_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
dev->stats.tx_errors++;
if(lp->board==TANGENT)
{
if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name);
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
/*
* Make the card transmit a LocalTalk packet.
*/
static netdev_tx_t cops_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
unsigned long flags;
/*
* Block a timer-based transmit from overlapping.
*/
netif_stop_queue(dev);
spin_lock_irqsave(&lp->lock, flags);
if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */
while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
cpu_relax();
if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */
while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
cpu_relax();
/* Output IO length. */
outb(skb->len, ioaddr);
outb(skb->len >> 8, ioaddr);
/* Output IO code. */
outb(LAP_WRITE, ioaddr);
if(lp->board == DAYNA) /* Check the transmit buffer again. */
while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0);
outsb(ioaddr, skb->data, skb->len); /* Send out the data. */
if(lp->board==DAYNA) /* Dayna requires you kick the card */
outb(1, ioaddr+DAYNA_INT_CARD);
spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
/* Done sending packet, update counters and cleanup. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
/*
* Dummy function to keep the Appletalk layer happy.
*/
static void set_multicast_list(struct net_device *dev)
{
if(cops_debug >= 3)
printk("%s: set_multicast_list executed\n", dev->name);
}
/*
* System ioctls for the COPS LocalTalk card.
*/
static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cops_local *lp = netdev_priv(dev);
struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
struct atalk_addr *aa = &lp->node_addr;
switch(cmd)
{
case SIOCSIFADDR:
/* Get and set the nodeid and network # atalkd wants. */
cops_nodeid(dev, sa->sat_addr.s_node);
aa->s_net = sa->sat_addr.s_net;
aa->s_node = lp->node_acquire;
/* Set broardcast address. */
dev->broadcast[0] = 0xFF;
/* Set hardware address. */
dev->addr_len = 1;
dev_addr_set(dev, &aa->s_node);
return 0;
case SIOCGIFADDR:
sa->sat_addr.s_net = aa->s_net;
sa->sat_addr.s_node = aa->s_node;
return 0;
default:
return -EOPNOTSUPP;
}
}
/*
* The inverse routine to cops_open().
*/
static int cops_close(struct net_device *dev)
{
struct cops_local *lp = netdev_priv(dev);
/* If we were running polled, yank the timer.
*/
if(lp->board==TANGENT && dev->irq==0)
del_timer(&cops_timer);
netif_stop_queue(dev);
return 0;
}
#ifdef MODULE
static struct net_device *cops_dev;
MODULE_LICENSE("GPL");
module_param_hw(io, int, ioport, 0);
module_param_hw(irq, int, irq, 0);
module_param_hw(board_type, int, other, 0);
static int __init cops_module_init(void)
{
if (io == 0)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
cardname);
cops_dev = cops_probe(-1);
return PTR_ERR_OR_ZERO(cops_dev);
}
static void __exit cops_module_exit(void)
{
unregister_netdev(cops_dev);
cleanup_card(cops_dev);
free_netdev(cops_dev);
}
module_init(cops_module_init);
module_exit(cops_module_exit);
#endif /* MODULE */
| linux-master | drivers/net/appletalk/cops.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static struct dentry *bonding_debug_root;
/* Show RLB hash table */
static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
{
struct bonding *bond = m->private;
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
if (BOND_MODE(bond) != BOND_MODE_ALB)
return 0;
seq_printf(m, "SourceIP DestinationIP "
"Destination MAC DEV\n");
spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
&client_info->ip_src,
&client_info->ip_dst,
&client_info->mac_dst,
client_info->slave->dev->name);
}
spin_unlock_bh(&bond->mode_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(bond_debug_rlb_hash);
void bond_debug_register(struct bonding *bond)
{
bond->debug_dir =
debugfs_create_dir(bond->dev->name, bonding_debug_root);
debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir,
bond, &bond_debug_rlb_hash_fops);
}
void bond_debug_unregister(struct bonding *bond)
{
debugfs_remove_recursive(bond->debug_dir);
}
void bond_debug_reregister(struct bonding *bond)
{
struct dentry *d;
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
bond_debug_unregister(bond);
}
}
void __init bond_create_debugfs(void)
{
bonding_debug_root = debugfs_create_dir("bonding", NULL);
if (IS_ERR(bonding_debug_root))
pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
void bond_destroy_debugfs(void)
{
debugfs_remove_recursive(bonding_debug_root);
bonding_debug_root = NULL;
}
#else /* !CONFIG_DEBUG_FS */
void bond_debug_register(struct bonding *bond)
{
}
void bond_debug_unregister(struct bonding *bond)
{
}
void bond_debug_reregister(struct bonding *bond)
{
}
void __init bond_create_debugfs(void)
{
}
void bond_destroy_debugfs(void)
{
}
#endif /* CONFIG_DEBUG_FS */
| linux-master | drivers/net/bonding/bond_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
#include <net/bonding.h>
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
static ssize_t bonding_show_bonds(const struct class *cls,
const struct class_attribute *attr,
char *buf)
{
const struct bond_net *bn =
container_of_const(attr, struct bond_net, class_attr_bonding_masters);
int res = 0;
struct bonding *bond;
rtnl_lock();
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
rtnl_unlock();
return res;
}
static struct net_device *bond_get_by_name(const struct bond_net *bn, const char *ifname)
{
struct bonding *bond;
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
return bond->dev;
}
return NULL;
}
/* "store" function for the bond_masters attribute. This is what
* creates and deletes entire bonds.
*
* The class parameter is ignored.
*/
static ssize_t bonding_store_bonds(const struct class *cls,
const struct class_attribute *attr,
const char *buffer, size_t count)
{
const struct bond_net *bn =
container_of_const(attr, struct bond_net, class_attr_bonding_masters);
char command[IFNAMSIZ + 1] = {0, };
char *ifname;
int rv, res = count;
sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
ifname = command + 1;
if ((strlen(command) <= 1) ||
!dev_valid_name(ifname))
goto err_no_cmd;
if (command[0] == '+') {
pr_info("%s is being created...\n", ifname);
rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
pr_info("%s already exists\n", ifname);
else
pr_info("%s creation failed\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
struct net_device *bond_dev;
rtnl_lock();
bond_dev = bond_get_by_name(bn, ifname);
if (bond_dev) {
pr_info("%s is being deleted...\n", ifname);
unregister_netdevice(bond_dev);
} else {
pr_err("unable to delete non-existent %s\n", ifname);
res = -ENODEV;
}
rtnl_unlock();
} else
goto err_no_cmd;
/* Always return either count or an error. If you return 0, you'll
* get called forever, which is bad.
*/
return res;
err_no_cmd:
pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
return -EPERM;
}
/* class attribute for bond_masters file. This ends up in /sys/class/net */
static const struct class_attribute class_attr_bonding_masters = {
.attr = {
.name = "bonding_masters",
.mode = 0644,
},
.show = bonding_show_bonds,
.store = bonding_store_bonds,
};
/* Generic "store" method for bonding sysfs option setting */
static ssize_t bonding_sysfs_store_option(struct device *d,
struct device_attribute *attr,
const char *buffer, size_t count)
{
struct bonding *bond = to_bond(d);
const struct bond_option *opt;
char *buffer_clone;
int ret;
opt = bond_opt_get_by_name(attr->attr.name);
if (WARN_ON(!opt))
return -ENOENT;
buffer_clone = kstrndup(buffer, count, GFP_KERNEL);
if (!buffer_clone)
return -ENOMEM;
ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone);
if (!ret)
ret = count;
kfree(buffer_clone);
return ret;
}
/* Show the slaves in the current bond. */
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
struct list_head *iter;
struct slave *slave;
int res = 0;
if (!rtnl_trylock())
return restart_syscall();
bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
rtnl_unlock();
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
return res;
}
static DEVICE_ATTR(slaves, 0644, bonding_show_slaves,
bonding_sysfs_store_option);
/* Show the bonding mode. */
static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
/* Show the bonding transmit hash method. */
static ssize_t bonding_show_xmit_hash(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
/* Show arp_validate. */
static ssize_t bonding_show_arp_validate(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
/* Show arp_all_targets. */
static ssize_t bonding_show_arp_all_targets(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
bonding_show_arp_all_targets, bonding_sysfs_store_option);
/* Show fail_over_mac. */
static ssize_t bonding_show_fail_over_mac(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
/* Show the arp timer interval. */
static ssize_t bonding_show_arp_interval(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
/* Show the arp targets. */
static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
int i, res = 0;
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
res += sysfs_emit_at(buf, res, "%pI4 ",
&bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
return res;
}
static DEVICE_ATTR(arp_ip_target, 0644,
bonding_show_arp_targets, bonding_sysfs_store_option);
/* Show the arp missed max. */
static ssize_t bonding_show_missed_max(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%u\n", bond->params.missed_max);
}
static DEVICE_ATTR(arp_missed_max, 0644,
bonding_show_missed_max, bonding_sysfs_store_option);
/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
static ssize_t bonding_show_updelay(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
bonding_show_updelay, bonding_sysfs_store_option);
static ssize_t bonding_show_peer_notif_delay(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n",
bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
/* Show the LACP activity and interval. */
static ssize_t bonding_show_lacp_active(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
static ssize_t bonding_show_lacp_rate(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
/* Show the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
static DEVICE_ATTR(num_unsol_na, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
/* Show the primary slave. */
static ssize_t bonding_show_primary(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
struct slave *primary;
int count = 0;
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
}
static DEVICE_ATTR(primary, 0644,
bonding_show_primary, bonding_sysfs_store_option);
/* Show the primary_reselect flag. */
static ssize_t bonding_show_primary_reselect(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
/* Show the use_carrier flag. */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
/* Show currently active_slave. */
static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
struct net_device *slave_dev;
int count = 0;
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
}
static DEVICE_ATTR(active_slave, 0644,
bonding_show_active_slave, bonding_sysfs_store_option);
/* Show link status of the bond interface. */
static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
/* Show current 802.3ad aggregator ID. */
static ssize_t bonding_show_ad_aggregator(struct device *d,
struct device_attribute *attr,
char *buf)
{
int count = 0;
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sysfs_emit(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.aggregator_id);
}
return count;
}
static DEVICE_ATTR(ad_aggregator, 0444, bonding_show_ad_aggregator, NULL);
/* Show number of active 802.3ad ports. */
static ssize_t bonding_show_ad_num_ports(struct device *d,
struct device_attribute *attr,
char *buf)
{
int count = 0;
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sysfs_emit(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.ports);
}
return count;
}
static DEVICE_ATTR(ad_num_ports, 0444, bonding_show_ad_num_ports, NULL);
/* Show current 802.3ad actor key. */
static ssize_t bonding_show_ad_actor_key(struct device *d,
struct device_attribute *attr,
char *buf)
{
int count = 0;
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
count = sysfs_emit(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.actor_key);
}
return count;
}
static DEVICE_ATTR(ad_actor_key, 0444, bonding_show_ad_actor_key, NULL);
/* Show current 802.3ad partner key. */
static ssize_t bonding_show_ad_partner_key(struct device *d,
struct device_attribute *attr,
char *buf)
{
int count = 0;
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
count = sysfs_emit(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
? 0 : ad_info.partner_key);
}
return count;
}
static DEVICE_ATTR(ad_partner_key, 0444, bonding_show_ad_partner_key, NULL);
/* Show current 802.3ad partner mac. */
static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct device_attribute *attr,
char *buf)
{
int count = 0;
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
}
static DEVICE_ATTR(ad_partner_mac, 0444, bonding_show_ad_partner_mac, NULL);
/* Show the queue_ids of the slaves in the current bond. */
static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
struct list_head *iter;
struct slave *slave;
int res = 0;
if (!rtnl_trylock())
return restart_syscall();
bond_for_each_slave(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
res += sysfs_emit_at(buf, res, "%s:%d ",
slave->dev->name, slave->queue_id);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
rtnl_unlock();
return res;
}
static DEVICE_ATTR(queue_id, 0644, bonding_show_queue_id,
bonding_sysfs_store_option);
/* Show the all_slaves_active flag. */
static ssize_t bonding_show_slaves_active(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
/* Show the number of IGMP membership reports to send on link failure */
static ssize_t bonding_show_resend_igmp(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
static ssize_t bonding_show_lp_interval(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
static ssize_t bonding_show_packets_per_slave(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
static DEVICE_ATTR(ad_actor_sys_prio, 0644,
bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_actor_system(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
static DEVICE_ATTR(ad_actor_system, 0644,
bonding_show_ad_actor_system, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
static DEVICE_ATTR(ad_user_port_key, 0644,
bonding_show_ad_user_port_key, bonding_sysfs_store_option);
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
&dev_attr_fail_over_mac.attr,
&dev_attr_arp_validate.attr,
&dev_attr_arp_all_targets.attr,
&dev_attr_arp_interval.attr,
&dev_attr_arp_ip_target.attr,
&dev_attr_downdelay.attr,
&dev_attr_updelay.attr,
&dev_attr_peer_notif_delay.attr,
&dev_attr_lacp_active.attr,
&dev_attr_lacp_rate.attr,
&dev_attr_ad_select.attr,
&dev_attr_xmit_hash_policy.attr,
&dev_attr_num_grat_arp.attr,
&dev_attr_num_unsol_na.attr,
&dev_attr_miimon.attr,
&dev_attr_primary.attr,
&dev_attr_primary_reselect.attr,
&dev_attr_use_carrier.attr,
&dev_attr_active_slave.attr,
&dev_attr_mii_status.attr,
&dev_attr_ad_aggregator.attr,
&dev_attr_ad_num_ports.attr,
&dev_attr_ad_actor_key.attr,
&dev_attr_ad_partner_key.attr,
&dev_attr_ad_partner_mac.attr,
&dev_attr_queue_id.attr,
&dev_attr_all_slaves_active.attr,
&dev_attr_resend_igmp.attr,
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
&dev_attr_tlb_dynamic_lb.attr,
&dev_attr_ad_actor_sys_prio.attr,
&dev_attr_ad_actor_system.attr,
&dev_attr_ad_user_port_key.attr,
&dev_attr_arp_missed_max.attr,
NULL,
};
static const struct attribute_group bonding_group = {
.name = "bonding",
.attrs = per_bond_attrs,
};
/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
int __net_init bond_create_sysfs(struct bond_net *bn)
{
int ret;
bn->class_attr_bonding_masters = class_attr_bonding_masters;
sysfs_attr_init(&bn->class_attr_bonding_masters.attr);
ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
bn->net);
/* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
* created by second or subsequent loads of the module will
* not be listed in, or controllable by, bonding_masters, but
* will have the usual "bonding" sysfs directory.
*
* This is done to preserve backwards compatibility for
* initscripts/sysconfig, which load bonding multiple times to
* configure multiple bonding devices.
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
if (netdev_name_in_use(bn->net,
class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
}
return ret;
}
/* Remove /sys/class/net/bonding_masters. */
void __net_exit bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
/* Initialize sysfs for each bond. This sets up and registers
* the 'bondctl' directory for each individual bond under /sys/class/net.
*/
void bond_prepare_sysfs_group(struct bonding *bond)
{
bond->dev->sysfs_groups[0] = &bonding_group;
}
| linux-master | drivers/net/bonding/bond_sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/pkt_sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_bonding.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <net/arp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <asm/byteorder.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
#pragma pack(1)
struct learning_pkt {
u8 mac_dst[ETH_ALEN];
u8 mac_src[ETH_ALEN];
__be16 type;
u8 padding[ETH_ZLEN - ETH_HLEN];
};
struct arp_pkt {
__be16 hw_addr_space;
__be16 prot_addr_space;
u8 hw_addr_len;
u8 prot_addr_len;
__be16 op_code;
u8 mac_src[ETH_ALEN]; /* sender hardware address */
__be32 ip_src; /* sender IP address */
u8 mac_dst[ETH_ALEN]; /* target hardware address */
__be32 ip_dst; /* target IP address */
};
#pragma pack()
/* Forward declaration */
static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
bool strict_match);
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
static void rlb_src_unlink(struct bonding *bond, u32 index);
static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
u32 ip_dst_hash);
static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
{
int i;
u8 hash = 0;
for (i = 0; i < hash_size; i++)
hash ^= hash_start[i];
return hash;
}
/*********************** tlb specific functions ***************************/
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
{
if (save_load) {
entry->load_history = 1 + entry->tx_bytes /
BOND_TLB_REBALANCE_INTERVAL;
entry->tx_bytes = 0;
}
entry->tx_slave = NULL;
entry->next = TLB_NULL_INDEX;
entry->prev = TLB_NULL_INDEX;
}
static inline void tlb_init_slave(struct slave *slave)
{
SLAVE_TLB_INFO(slave).load = 0;
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
}
static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
int save_load)
{
struct tlb_client_info *tx_hash_table;
u32 index;
/* clear slave from tx_hashtbl */
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
/* skip this if we've already freed the tx hash table */
if (tx_hash_table) {
index = SLAVE_TLB_INFO(slave).head;
while (index != TLB_NULL_INDEX) {
u32 next_index = tx_hash_table[index].next;
tlb_init_table_entry(&tx_hash_table[index], save_load);
index = next_index;
}
}
tlb_init_slave(slave);
}
static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
int save_load)
{
spin_lock_bh(&bond->mode_lock);
__tlb_clear_slave(bond, slave, save_load);
spin_unlock_bh(&bond->mode_lock);
}
/* Must be called before starting the monitor timer */
static int tlb_initialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
struct tlb_client_info *new_hashtbl;
int i;
new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl)
return -ENOMEM;
spin_lock_bh(&bond->mode_lock);
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
spin_unlock_bh(&bond->mode_lock);
return 0;
}
/* Must be called only after all slaves have been released */
static void tlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
spin_lock_bh(&bond->mode_lock);
kfree(bond_info->tx_hashtbl);
bond_info->tx_hashtbl = NULL;
spin_unlock_bh(&bond->mode_lock);
}
static long long compute_gap(struct slave *slave)
{
return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
(s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
}
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
struct list_head *iter;
long long max_gap;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
long long gap = compute_gap(slave);
if (max_gap < gap) {
least_loaded = slave;
max_gap = gap;
}
}
}
return least_loaded;
}
static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
u32 skb_len)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct tlb_client_info *hash_table;
struct slave *assigned_slave;
hash_table = bond_info->tx_hashtbl;
assigned_slave = hash_table[hash_index].tx_slave;
if (!assigned_slave) {
assigned_slave = tlb_get_least_loaded_slave(bond);
if (assigned_slave) {
struct tlb_slave_info *slave_info =
&(SLAVE_TLB_INFO(assigned_slave));
u32 next_index = slave_info->head;
hash_table[hash_index].tx_slave = assigned_slave;
hash_table[hash_index].next = next_index;
hash_table[hash_index].prev = TLB_NULL_INDEX;
if (next_index != TLB_NULL_INDEX)
hash_table[next_index].prev = hash_index;
slave_info->head = hash_index;
slave_info->load +=
hash_table[hash_index].load_history;
}
}
if (assigned_slave)
hash_table[hash_index].tx_bytes += skb_len;
return assigned_slave;
}
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
u32 skb_len)
{
struct slave *tx_slave;
/* We don't need to disable softirq here, because
* tlb_choose_channel() is only called by bond_alb_xmit()
* which already has softirq disabled.
*/
spin_lock(&bond->mode_lock);
tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
spin_unlock(&bond->mode_lock);
return tx_slave;
}
/*********************** rlb specific functions ***************************/
/* when an ARP REPLY is received from a client update its info
* in the rx_hashtbl
*/
static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
spin_lock_bh(&bond->mode_lock);
hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->assigned) &&
(client_info->ip_src == arp->ip_dst) &&
(client_info->ip_dst == arp->ip_src) &&
(!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
ether_addr_copy(client_info->mac_dst, arp->mac_src);
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
spin_unlock_bh(&bond->mode_lock);
}
static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arp_pkt *arp, _arp;
if (skb->protocol != cpu_to_be16(ETH_P_ARP))
goto out;
arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
if (!arp)
goto out;
/* We received an ARP from arp->ip_src.
* We might have used this IP address previously (on the bonding host
* itself or on a system that is bridged together with the bond).
* However, if arp->mac_src is different than what is stored in
* rx_hashtbl, some other host is now using the IP and we must prevent
* sending out client updates with this IP address and the old MAC
* address.
* Clean up all hash table entries that have this address as ip_src but
* have a different mac_src.
*/
rlb_purge_src_ip(bond, arp);
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
rlb_update_entry_from_arp(bond, arp);
slave_dbg(bond->dev, slave->dev, "Server received an ARP Reply from client\n");
}
out:
return RX_HANDLER_ANOTHER;
}
/* Caller must hold rcu_read_lock() */
static struct slave *__rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *before = NULL, *rx_slave = NULL, *slave;
struct list_head *iter;
bool found = false;
bond_for_each_slave_rcu(bond, slave, iter) {
if (!bond_slave_can_tx(slave))
continue;
if (!found) {
if (!before || before->speed < slave->speed)
before = slave;
} else {
if (!rx_slave || rx_slave->speed < slave->speed)
rx_slave = slave;
}
if (slave == bond_info->rx_slave)
found = true;
}
/* we didn't find anything after the current or we have something
* better before and up to the current slave
*/
if (!rx_slave || (before && rx_slave->speed < before->speed))
rx_slave = before;
if (rx_slave)
bond_info->rx_slave = rx_slave;
return rx_slave;
}
/* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */
static struct slave *rlb_next_rx_slave(struct bonding *bond)
{
struct slave *rx_slave;
ASSERT_RTNL();
rcu_read_lock();
rx_slave = __rlb_next_rx_slave(bond);
rcu_read_unlock();
return rx_slave;
}
/* teach the switch the mac of a disabled slave
* on the primary for fault tolerance
*
* Caller must hold RTNL
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond,
const u8 addr[])
{
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
if (!curr_active)
return;
if (!bond->alb_info.primary_is_promisc) {
if (!dev_set_promiscuity(curr_active->dev, 1))
bond->alb_info.primary_is_promisc = 1;
else
bond->alb_info.primary_is_promisc = 0;
}
bond->alb_info.rlb_promisc_timeout_counter = 0;
alb_send_learning_packets(curr_active, addr, true);
}
/* slave being removed should not be active at this point
*
* Caller must hold rtnl.
*/
static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *rx_hash_table;
u32 index, next_index;
/* clear slave from rx_hashtbl */
spin_lock_bh(&bond->mode_lock);
rx_hash_table = bond_info->rx_hashtbl;
index = bond_info->rx_hashtbl_used_head;
for (; index != RLB_NULL_INDEX; index = next_index) {
next_index = rx_hash_table[index].used_next;
if (rx_hash_table[index].slave == slave) {
struct slave *assigned_slave = rlb_next_rx_slave(bond);
if (assigned_slave) {
rx_hash_table[index].slave = assigned_slave;
if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) {
bond_info->rx_hashtbl[index].ntt = 1;
bond_info->rx_ntt = 1;
/* A slave has been removed from the
* table because it is either disabled
* or being released. We must retry the
* update to avoid clients from not
* being updated & disconnecting when
* there is stress
*/
bond_info->rlb_update_retry_counter =
RLB_UPDATE_RETRY;
}
} else { /* there is no active slave */
rx_hash_table[index].slave = NULL;
}
}
}
spin_unlock_bh(&bond->mode_lock);
if (slave != rtnl_dereference(bond->curr_active_slave))
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
}
static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
return;
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
struct sk_buff *skb;
skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
client_info->ip_dst,
client_info->slave->dev,
client_info->ip_src,
client_info->mac_dst,
client_info->slave->dev->dev_addr,
client_info->mac_dst);
if (!skb) {
slave_err(client_info->slave->bond->dev,
client_info->slave->dev,
"failed to create an ARP packet\n");
continue;
}
skb->dev = client_info->slave->dev;
if (client_info->vlan_id) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
client_info->vlan_id);
}
arp_xmit(skb);
}
}
/* sends ARP REPLIES that update the clients that need updating */
static void rlb_update_rx_clients(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
if (bond_info->rlb_update_retry_counter == 0)
client_info->ntt = 0;
}
}
/* do not update the entries again until this counter is zero so that
* not to confuse the clients.
*/
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
spin_unlock_bh(&bond->mode_lock);
}
/* The slave was assigned a new mac address - update the clients */
static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
int ntt = 0;
u32 hash_index;
spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->slave == slave) &&
is_valid_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
ntt = 1;
}
}
/* update the team's flag only after the whole iteration */
if (ntt) {
bond_info->rx_ntt = 1;
/* fasten the change */
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
spin_unlock_bh(&bond->mode_lock);
}
/* mark all clients using src_ip to be updated */
static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *client_info;
u32 hash_index;
spin_lock(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (!client_info->slave) {
netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
continue;
}
/* update all clients using this src_ip, that are not assigned
* to the team's address (curr_active_slave) and have a known
* unicast mac address.
*/
if ((client_info->ip_src == src_ip) &&
!ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
bond->dev->dev_addr) &&
is_valid_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
}
spin_unlock(&bond->mode_lock);
}
static struct slave *rlb_choose_channel(struct sk_buff *skb,
struct bonding *bond,
const struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
spin_lock(&bond->mode_lock);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->assigned) {
if ((client_info->ip_src == arp->ip_src) &&
(client_info->ip_dst == arp->ip_dst)) {
/* the entry is already assigned to this client */
if (!is_broadcast_ether_addr(arp->mac_dst)) {
/* update mac address from arp */
ether_addr_copy(client_info->mac_dst, arp->mac_dst);
}
ether_addr_copy(client_info->mac_src, arp->mac_src);
assigned_slave = client_info->slave;
if (assigned_slave) {
spin_unlock(&bond->mode_lock);
return assigned_slave;
}
} else {
/* the entry is already assigned to some other client,
* move the old client to primary (curr_active_slave) so
* that the new client can be assigned to this entry.
*/
if (curr_active_slave &&
client_info->slave != curr_active_slave) {
client_info->slave = curr_active_slave;
rlb_update_client(client_info);
}
}
}
/* assign a new slave */
assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave) {
if (!(client_info->assigned &&
client_info->ip_src == arp->ip_src)) {
/* ip_src is going to be updated,
* fix the src hash list
*/
u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
sizeof(arp->ip_src));
rlb_src_unlink(bond, hash_index);
rlb_src_link(bond, hash_src, hash_index);
}
client_info->ip_src = arp->ip_src;
client_info->ip_dst = arp->ip_dst;
/* arp->mac_dst is broadcast for arp requests.
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
ether_addr_copy(client_info->mac_dst, arp->mac_dst);
ether_addr_copy(client_info->mac_src, arp->mac_src);
client_info->slave = assigned_slave;
if (is_valid_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
bond->alb_info.rx_ntt = 1;
} else {
client_info->ntt = 0;
}
if (vlan_get_tag(skb, &client_info->vlan_id))
client_info->vlan_id = 0;
if (!client_info->assigned) {
u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
bond_info->rx_hashtbl_used_head = hash_index;
client_info->used_next = prev_tbl_head;
if (prev_tbl_head != RLB_NULL_INDEX) {
bond_info->rx_hashtbl[prev_tbl_head].used_prev =
hash_index;
}
client_info->assigned = 1;
}
}
spin_unlock(&bond->mode_lock);
return assigned_slave;
}
/* chooses (and returns) transmit channel for arp reply
* does not choose channel for other arp types since they are
* sent on the curr_active_slave
*/
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate
* from the bond itself or a VLAN directly above the bond.
*/
if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
if (dev) {
if (netif_is_any_bridge_master(dev)) {
dev_put(dev);
return NULL;
}
dev_put(dev);
}
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
if (tx_slave)
bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
tx_slave->dev->addr_len);
netdev_dbg(bond->dev, "(slave %s): Server sent ARP Reply packet\n",
tx_slave ? tx_slave->dev->name : "NULL");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
* place holder.
* When the arp reply is received the entry will be updated
* with the correct unicast address of the client.
*/
tx_slave = rlb_choose_channel(skb, bond, arp);
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
/* arp requests are broadcast and are sent on the primary
* the arp request will collapse all clients on the subnet to
* the primary slave. We must register these clients to be
* updated with their assigned mac.
*/
rlb_req_update_subnet_clients(bond, arp->ip_src);
netdev_dbg(bond->dev, "(slave %s): Server sent ARP Request packet\n",
tx_slave ? tx_slave->dev->name : "NULL");
}
return tx_slave;
}
static void rlb_rebalance(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *assigned_slave;
struct rlb_client_info *client_info;
int ntt;
u32 hash_index;
spin_lock_bh(&bond->mode_lock);
ntt = 0;
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
if (!is_zero_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
ntt = 1;
}
}
}
/* update the team's flag only after the whole iteration */
if (ntt)
bond_info->rx_ntt = 1;
spin_unlock_bh(&bond->mode_lock);
}
/* Caller must hold mode_lock */
static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
{
entry->used_next = RLB_NULL_INDEX;
entry->used_prev = RLB_NULL_INDEX;
entry->assigned = 0;
entry->slave = NULL;
entry->vlan_id = 0;
}
static void rlb_init_table_entry_src(struct rlb_client_info *entry)
{
entry->src_first = RLB_NULL_INDEX;
entry->src_prev = RLB_NULL_INDEX;
entry->src_next = RLB_NULL_INDEX;
}
static void rlb_init_table_entry(struct rlb_client_info *entry)
{
memset(entry, 0, sizeof(struct rlb_client_info));
rlb_init_table_entry_dst(entry);
rlb_init_table_entry_src(entry);
}
static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 next_index = bond_info->rx_hashtbl[index].used_next;
u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
if (index == bond_info->rx_hashtbl_used_head)
bond_info->rx_hashtbl_used_head = next_index;
if (prev_index != RLB_NULL_INDEX)
bond_info->rx_hashtbl[prev_index].used_next = next_index;
if (next_index != RLB_NULL_INDEX)
bond_info->rx_hashtbl[next_index].used_prev = prev_index;
}
/* unlink a rlb hash table entry from the src list */
static void rlb_src_unlink(struct bonding *bond, u32 index)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 next_index = bond_info->rx_hashtbl[index].src_next;
u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
if (next_index != RLB_NULL_INDEX)
bond_info->rx_hashtbl[next_index].src_prev = prev_index;
if (prev_index == RLB_NULL_INDEX)
return;
/* is prev_index pointing to the head of this list? */
if (bond_info->rx_hashtbl[prev_index].src_first == index)
bond_info->rx_hashtbl[prev_index].src_first = next_index;
else
bond_info->rx_hashtbl[prev_index].src_next = next_index;
}
static void rlb_delete_table_entry(struct bonding *bond, u32 index)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
rlb_delete_table_entry_dst(bond, index);
rlb_init_table_entry_dst(entry);
rlb_src_unlink(bond, index);
}
/* add the rx_hashtbl[ip_dst_hash] entry to the list
* of entries with identical ip_src_hash
*/
static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 next;
bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
next = bond_info->rx_hashtbl[ip_src_hash].src_first;
bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
if (next != RLB_NULL_INDEX)
bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
}
/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
* not match arp->mac_src
*/
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
u32 index;
spin_lock_bh(&bond->mode_lock);
index = bond_info->rx_hashtbl[ip_src_hash].src_first;
while (index != RLB_NULL_INDEX) {
struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
u32 next_index = entry->src_next;
if (entry->ip_src == arp->ip_src &&
!ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
rlb_delete_table_entry(bond, index);
index = next_index;
}
spin_unlock_bh(&bond->mode_lock);
}
static int rlb_initialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct rlb_client_info *new_hashtbl;
int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
int i;
new_hashtbl = kmalloc(size, GFP_KERNEL);
if (!new_hashtbl)
return -1;
spin_lock_bh(&bond->mode_lock);
bond_info->rx_hashtbl = new_hashtbl;
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
spin_unlock_bh(&bond->mode_lock);
/* register to receive ARPs */
bond->recv_probe = rlb_arp_recv;
return 0;
}
static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
spin_lock_bh(&bond->mode_lock);
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
spin_unlock_bh(&bond->mode_lock);
}
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 curr_index;
spin_lock_bh(&bond->mode_lock);
curr_index = bond_info->rx_hashtbl_used_head;
while (curr_index != RLB_NULL_INDEX) {
struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
if (curr->vlan_id == vlan_id)
rlb_delete_table_entry(bond, curr_index);
curr_index = next_index;
}
spin_unlock_bh(&bond->mode_lock);
}
/*********************** tlb/rlb shared functions *********************/
static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[],
__be16 vlan_proto, u16 vid)
{
struct learning_pkt pkt;
struct sk_buff *skb;
int size = sizeof(struct learning_pkt);
memset(&pkt, 0, size);
ether_addr_copy(pkt.mac_dst, mac_addr);
ether_addr_copy(pkt.mac_src, mac_addr);
pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
skb = dev_alloc_skb(size);
if (!skb)
return;
skb_put_data(skb, &pkt, size);
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = pkt.type;
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
slave_dbg(slave->bond->dev, slave->dev,
"Send learning packet: mac %pM vlan %d\n", mac_addr, vid);
if (vid)
__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
dev_queue_xmit(skb);
}
struct alb_walk_data {
struct bonding *bond;
struct slave *slave;
const u8 *mac_addr;
bool strict_match;
};
static int alb_upper_dev_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{
struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
bool strict_match = data->strict_match;
const u8 *mac_addr = data->mac_addr;
struct bonding *bond = data->bond;
struct slave *slave = data->slave;
struct bond_vlan_tag *tags;
if (is_vlan_dev(upper) &&
bond->dev->lower_level == upper->lower_level - 1) {
if (upper->addr_assign_type == NET_ADDR_STOLEN) {
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
} else {
alb_send_lp_vid(slave, upper->dev_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
}
}
/* If this is a macvlan device, then only send updates
* when strict_match is turned off.
*/
if (netif_is_macvlan(upper) && !strict_match) {
tags = bond_verify_device_path(bond->dev, upper, 0);
if (IS_ERR_OR_NULL(tags))
BUG();
alb_send_lp_vid(slave, upper->dev_addr,
tags[0].vlan_proto, tags[0].vlan_id);
kfree(tags);
}
return 0;
}
static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct netdev_nested_priv priv;
struct alb_walk_data data = {
.strict_match = strict_match,
.mac_addr = mac_addr,
.slave = slave,
.bond = bond,
};
priv.data = (void *)&data;
/* send untagged */
alb_send_lp_vid(slave, mac_addr, 0, 0);
/* loop through all devices and see if we need to send a packet
* for that device.
*/
rcu_read_lock();
netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &priv);
rcu_read_unlock();
}
static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
unsigned int len)
{
struct net_device *dev = slave->dev;
struct sockaddr_storage ss;
if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
__dev_addr_set(dev, addr, len);
return 0;
}
/* for rlb each slave must have a unique hw mac addresses so that
* each slave will receive packets destined to a different mac
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
return 0;
}
/* Swap MAC addresses between two slaves.
*
* Called with RTNL held, and no other locks.
*/
static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
{
u8 tmp_mac_addr[MAX_ADDR_LEN];
bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr,
slave1->dev->addr_len);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr,
slave2->dev->addr_len);
alb_set_slave_mac_addr(slave2, tmp_mac_addr,
slave1->dev->addr_len);
}
/* Send learning packets after MAC address swap.
*
* Called with RTNL and no other locks
*/
static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
struct slave *slave2)
{
int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
struct slave *disabled_slave = NULL;
ASSERT_RTNL();
/* fasten the change in the switch */
if (bond_slave_can_tx(slave1)) {
alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
*/
rlb_req_update_slave_clients(bond, slave1);
}
} else {
disabled_slave = slave1;
}
if (bond_slave_can_tx(slave2)) {
alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
*/
rlb_req_update_slave_clients(bond, slave2);
}
} else {
disabled_slave = slave2;
}
if (bond->alb_info.rlb_enabled && slaves_state_differ) {
/* A disabled slave was assigned an active mac addr */
rlb_teach_disabled_mac_on_primary(bond,
disabled_slave->dev->dev_addr);
}
}
/**
* alb_change_hw_addr_on_detach
* @bond: bonding we're working on
* @slave: the slave that was just detached
*
* We assume that @slave was already detached from the slave list.
*
* If @slave's permanent hw address is different both from its current
* address and from @bond's address, then somewhere in the bond there's
* a slave that has @slave's permanet address as its current address.
* We'll make sure that slave no longer uses @slave's permanent address.
*
* Caller must hold RTNL and no other locks
*/
static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
{
int perm_curr_diff;
int perm_bond_diff;
struct slave *found_slave;
perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
slave->dev->dev_addr);
perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
bond->dev->dev_addr);
if (perm_curr_diff && perm_bond_diff) {
found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
if (found_slave) {
alb_swap_mac_addr(slave, found_slave);
alb_fasten_mac_swap(bond, slave, found_slave);
}
}
}
/**
* alb_handle_addr_collision_on_attach
* @bond: bonding we're working on
* @slave: the slave that was just attached
*
* checks uniqueness of slave's mac address and handles the case the
* new slave uses the bonds mac address.
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
* slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its
* permanent hw address with the current address of all the other slaves.
* If no match was found, then we've found a slave with a permanent address
* that isn't used by any other slave in the bond, so we can assign it to
* @slave.
*
* assumption: this function is called before @slave is attached to the
* bond slave list.
*/
static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
{
struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
struct slave *tmp_slave1, *free_mac_slave = NULL;
struct list_head *iter;
if (!bond_has_slaves(bond)) {
/* this is the first slave */
return 0;
}
/* if slave's mac address differs from bond's mac address
* check uniqueness of slave's mac address against the other
* slaves in the bond.
*/
if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
return 0;
/* Try setting slave mac to bond address and fall-through
* to code handling that situation below...
*/
alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
bond->dev->addr_len);
}
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
bond_for_each_slave(bond, tmp_slave1, iter) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
*/
free_mac_slave = tmp_slave1;
break;
}
if (!has_bond_addr) {
if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
bond->dev->dev_addr)) {
has_bond_addr = tmp_slave1;
}
}
}
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
free_mac_slave->dev->addr_len);
slave_warn(bond->dev, slave->dev, "the slave hw address is in use by the bond; giving it the hw address of %s\n",
free_mac_slave->dev->name);
} else if (has_bond_addr) {
slave_err(bond->dev, slave->dev, "the slave hw address is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n");
return -EFAULT;
}
return 0;
}
/**
* alb_set_mac_address
* @bond: bonding we're working on
* @addr: MAC address to set
*
* In TLB mode all slaves are configured to the bond's hw address, but set
* their dev_addr field to different addresses (based on their permanent hw
* addresses).
*
* For each slave, this function sets the interface to the new address and then
* changes its dev_addr field to its previous value.
*
* Unwinding assumes bond's mac address has not yet changed.
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
struct slave *slave, *rollback_slave;
struct list_head *iter;
struct sockaddr_storage ss;
char tmp_addr[MAX_ADDR_LEN];
int res;
if (bond->alb_info.rlb_enabled)
return 0;
bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
slave->dev->addr_len);
res = dev_set_mac_address(slave->dev, addr, NULL);
/* restore net_device's hw address */
dev_addr_set(slave->dev, tmp_addr);
if (res)
goto unwind;
}
return 0;
unwind:
memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&ss, NULL);
dev_addr_set(rollback_slave->dev, tmp_addr);
}
return res;
}
/* determine if the packet is NA or NS */
static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
{
struct ipv6hdr *ip6hdr;
struct icmp6hdr *hdr;
if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
return true;
ip6hdr = ipv6_hdr(skb);
if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
return false;
if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
return true;
hdr = icmp6_hdr(skb);
return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
}
/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
{
int res;
res = tlb_initialize(bond);
if (res)
return res;
if (rlb_enabled) {
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
return 0;
}
void bond_alb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
tlb_deinitialize(bond);
if (bond_info->rlb_enabled)
rlb_deinitialize(bond);
}
static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
struct slave *tx_slave)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct ethhdr *eth_data = eth_hdr(skb);
if (!tx_slave) {
/* unbalanced or unassigned, send through primary */
tx_slave = rcu_dereference(bond->curr_active_slave);
if (bond->params.tlb_dynamic_lb)
bond_info->unbalanced_load += skb->len;
}
if (tx_slave && bond_slave_can_tx(tx_slave)) {
if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
ether_addr_copy(eth_data->h_source,
tx_slave->dev->dev_addr);
}
return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
}
if (tx_slave && bond->params.tlb_dynamic_lb) {
spin_lock(&bond->mode_lock);
__tlb_clear_slave(bond, tx_slave, 0);
spin_unlock(&bond->mode_lock);
}
/* no suitable interface, frame not sent */
return bond_tx_drop(bond->dev, skb);
}
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *tx_slave = NULL;
struct ethhdr *eth_data;
u32 hash_index;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
/* Do not TX balance any multicast or broadcast */
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
case htons(ETH_P_IPV6):
if (alb_determine_nd(skb, bond))
break;
fallthrough;
case htons(ETH_P_IP):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
tx_slave = tlb_choose_channel(bond,
hash_index & 0xFF,
skb->len);
} else {
struct bond_up_slave *slaves;
unsigned int count;
slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
count];
}
break;
}
}
return tx_slave;
}
netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *tx_slave;
tx_slave = bond_xmit_tlb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
static const __be32 ip_bcast = htonl(0xffffffff);
struct slave *tx_slave = NULL;
const u8 *hash_start = NULL;
bool do_tx_balance = true;
struct ethhdr *eth_data;
u32 hash_index = 0;
int hash_size = 0;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph;
if (is_broadcast_ether_addr(eth_data->h_dest) ||
!pskb_network_may_pull(skb, sizeof(*iph))) {
do_tx_balance = false;
break;
}
iph = ip_hdr(skb);
if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr);
break;
}
case ETH_P_IPV6: {
const struct ipv6hdr *ip6hdr;
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
if (is_broadcast_ether_addr(eth_data->h_dest)) {
do_tx_balance = false;
break;
}
/* IPv6 uses all-nodes multicast as an equivalent to
* broadcasts in IPv4.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
do_tx_balance = false;
break;
}
if (alb_determine_nd(skb, bond)) {
do_tx_balance = false;
break;
}
/* The IPv6 header is pulled by alb_determine_nd */
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
*/
ip6hdr = ipv6_hdr(skb);
if (ipv6_addr_any(&ip6hdr->saddr)) {
do_tx_balance = false;
break;
}
hash_start = (char *)&ip6hdr->daddr;
hash_size = sizeof(ip6hdr->daddr);
break;
}
case ETH_P_ARP:
do_tx_balance = false;
if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
break;
default:
do_tx_balance = false;
break;
}
if (do_tx_balance) {
if (bond->params.tlb_dynamic_lb) {
hash_index = _simple_hash(hash_start, hash_size);
tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
} else {
/*
* do_tx_balance means we are free to select the tx_slave
* So we do exactly what tlb would do for hash selection
*/
struct bond_up_slave *slaves;
unsigned int count;
slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
count];
}
}
return tx_slave;
}
netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *tx_slave = NULL;
tx_slave = bond_xmit_alb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
void bond_alb_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct list_head *iter;
struct slave *slave;
if (!bond_has_slaves(bond)) {
atomic_set(&bond_info->tx_rebalance_counter, 0);
bond_info->lp_counter = 0;
goto re_arm;
}
rcu_read_lock();
atomic_inc(&bond_info->tx_rebalance_counter);
bond_info->lp_counter++;
/* send learning packets */
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
bool strict_match;
bond_for_each_slave_rcu(bond, slave, iter) {
/* If updating current_active, use all currently
* user mac addresses (!strict_match). Otherwise, only
* use mac of the slave device.
* In RLB mode, we always use strict matches.
*/
strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
bond_info->rlb_enabled);
alb_send_learning_packets(slave, slave->dev->dev_addr,
strict_match);
}
bond_info->lp_counter = 0;
}
/* rebalance tx traffic */
if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
SLAVE_TLB_INFO(slave).load =
bond_info->unbalanced_load /
BOND_TLB_REBALANCE_INTERVAL;
bond_info->unbalanced_load = 0;
}
}
atomic_set(&bond_info->tx_rebalance_counter, 0);
}
if (bond_info->rlb_enabled) {
if (bond_info->primary_is_promisc &&
(++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
/* dev_set_promiscuity requires rtnl and
* nothing else. Avoid race with bond_close.
*/
rcu_read_unlock();
if (!rtnl_trylock())
goto re_arm;
bond_info->rlb_promisc_timeout_counter = 0;
/* If the primary was set to promiscuous mode
* because a slave was disabled then
* it can now leave promiscuous mode.
*/
dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
-1);
bond_info->primary_is_promisc = 0;
rtnl_unlock();
rcu_read_lock();
}
if (bond_info->rlb_rebalance) {
bond_info->rlb_rebalance = 0;
rlb_rebalance(bond);
}
/* check if clients need updating */
if (bond_info->rx_ntt) {
if (bond_info->rlb_update_delay_counter) {
--bond_info->rlb_update_delay_counter;
} else {
rlb_update_rx_clients(bond);
if (bond_info->rlb_update_retry_counter)
--bond_info->rlb_update_retry_counter;
else
bond_info->rx_ntt = 0;
}
}
}
rcu_read_unlock();
re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
}
/* assumption: called before the slave is attached to the bond
* and not locked by the bond lock
*/
int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
{
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
slave->dev->addr_len);
if (res)
return res;
res = alb_handle_addr_collision_on_attach(bond, slave);
if (res)
return res;
tlb_init_slave(slave);
/* order a rebalance ASAP */
atomic_set(&bond->alb_info.tx_rebalance_counter,
BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
return 0;
}
/* Remove slave from tlb and rlb hash tables, and fix up MAC addresses
* if necessary.
*
* Caller must hold RTNL and no other locks
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
if (bond_has_slaves(bond))
alb_change_hw_addr_on_detach(bond, slave);
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rx_slave = NULL;
rlb_clear_slave(bond, slave);
}
}
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
if (link == BOND_LINK_DOWN) {
tlb_clear_slave(bond, slave, 0);
if (bond->alb_info.rlb_enabled)
rlb_clear_slave(bond, slave);
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
atomic_set(&bond_info->tx_rebalance_counter,
BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
/* If the updelay module parameter is smaller than the
* forwarding delay of the switch the rebalance will
* not work because the rebalance arp replies will
* not be forwarded to the clients..
*/
}
}
if (bond_is_nondyn_tlb(bond)) {
if (bond_update_slave_arr(bond, NULL))
pr_err("Failed to build slave-array for TLB mode.\n");
}
}
/**
* bond_alb_handle_active_change - assign new curr_active_slave
* @bond: our bonding struct
* @new_slave: new slave to assign
*
* Set the bond->curr_active_slave to @new_slave and handle
* mac address swapping and promiscuity changes as needed.
*
* Caller must hold RTNL
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
{
struct slave *swap_slave;
struct slave *curr_active;
curr_active = rtnl_dereference(bond->curr_active_slave);
if (curr_active == new_slave)
return;
if (curr_active && bond->alb_info.primary_is_promisc) {
dev_set_promiscuity(curr_active->dev, -1);
bond->alb_info.primary_is_promisc = 0;
bond->alb_info.rlb_promisc_timeout_counter = 0;
}
swap_slave = curr_active;
rcu_assign_pointer(bond->curr_active_slave, new_slave);
if (!new_slave || !bond_has_slaves(bond))
return;
/* set the new curr_active_slave to the bonds mac address
* i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
*/
if (!swap_slave)
swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
/* Arrange for swap_slave and new_slave to temporarily be
* ignored so we can mess with their MAC addresses without
* fear of interference from transmit activity.
*/
if (swap_slave)
tlb_clear_slave(bond, swap_slave, 1);
tlb_clear_slave(bond, new_slave, 1);
/* in TLB mode, the slave might flip down/up with the old dev_addr,
* and thus filter bond->dev_addr's packets, so force bond's mac
*/
if (BOND_MODE(bond) == BOND_MODE_TLB) {
struct sockaddr_storage ss;
u8 tmp_addr[MAX_ADDR_LEN];
bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr,
new_slave->dev->addr_len);
bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
NULL);
dev_addr_set(new_slave->dev, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
if (swap_slave) {
/* swap mac address */
alb_swap_mac_addr(swap_slave, new_slave);
alb_fasten_mac_swap(bond, swap_slave, new_slave);
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
bond->dev->addr_len);
alb_send_learning_packets(new_slave, bond->dev->dev_addr,
false);
}
}
/* Called with RTNL */
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
struct sockaddr_storage *ss = addr;
struct slave *curr_active;
struct slave *swap_slave;
int res;
if (!is_valid_ether_addr(ss->__data))
return -EADDRNOTAVAIL;
res = alb_set_mac_address(bond, addr);
if (res)
return res;
dev_addr_set(bond_dev, ss->__data);
/* If there is no curr_active_slave there is nothing else to do.
* Otherwise we'll need to pass the new address to it and handle
* duplications.
*/
curr_active = rtnl_dereference(bond->curr_active_slave);
if (!curr_active)
return 0;
swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
if (swap_slave) {
alb_swap_mac_addr(swap_slave, curr_active);
alb_fasten_mac_swap(bond, swap_slave, curr_active);
} else {
alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr,
bond_dev->addr_len);
alb_send_learning_packets(curr_active,
bond_dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform clients mac address has changed */
rlb_req_update_slave_clients(bond, curr_active);
}
}
return 0;
}
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
if (bond->alb_info.rlb_enabled)
rlb_clear_vlan(bond, vlan_id);
}
| linux-master | drivers/net/bonding/bond_alb.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* originally based on the dummy device.
*
* Copyright 1999, Thomas Davis, [email protected].
* Based on dummy.c, and eql.c devices.
*
* bonding.c: an Ethernet Bonding driver
*
* This is useful to talk to a Cisco EtherChannel compatible equipment:
* Cisco 5500
* Sun Trunking (Solaris)
* Alteon AceDirector Trunks
* Linux Bonding
* and probably many L2 switches ...
*
* How it works:
* ifconfig bond0 ipaddress netmask up
* will setup a network device, with an ip address. No mac address
* will be assigned at this time. The hw mac address will come from
* the first slave bonded to the channel. All slaves will then use
* this hw mac address.
*
* ifconfig bond0 down
* will release all slaves, marking them as down.
*
* ifenslave bond0 eth0
* will attach eth0 to bond0 as a slave. eth0 hw mac address will either
* a: be used as initial mac address
* b: if a hw mac address already is there, eth0's hw mac address
* will then be set from bond0.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <net/ip.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/socket.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <asm/dma.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
#include <linux/phy.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
#include <net/route.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_dissector.h>
#include <net/xfrm.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
#include <net/ip6_route.h>
#include <net/xdp.h>
#include "bonding_priv.h"
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int tx_queues = BOND_DEFAULT_TX_QUEUES;
static int num_peer_notif = 1;
static int miimon;
static int updelay;
static int downdelay;
static int use_carrier = 1;
static char *mode;
static char *primary;
static char *primary_reselect;
static char *lacp_rate;
static int min_links;
static char *ad_select;
static char *xmit_hash_policy;
static int arp_interval;
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *arp_all_targets;
static char *fail_over_mac;
static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
static int packets_per_slave = 1;
static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
"failover event (alias of num_unsol_na)");
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
"failover event (alias of num_grat_arp)");
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
"0 for off, 1 for on (default)");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
"3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
"6 for balance-alb");
module_param(primary, charp, 0);
MODULE_PARM_DESC(primary, "Primary network device to use");
module_param(primary_reselect, charp, 0);
MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
"once it comes up; "
"0 for always (default), "
"1 for only if speed of primary is "
"better, "
"2 for only on active slave "
"failure");
module_param(lacp_rate, charp, 0);
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
"0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
"0 for stable (default), 1 for bandwidth, "
"2 for count");
module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
"2 for layer 2+3, 3 for encap layer 2+3, "
"4 for encap layer 3+4, 5 for vlan+srcmac");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
module_param(arp_validate, charp, 0);
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
"0 for none (default), 1 for active, "
"2 for backup, 3 for all");
module_param(arp_all_targets, charp, 0);
MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
module_param(fail_over_mac, charp, 0);
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
"the same MAC; 0 for none (default), "
"1 for active, 2 for follow");
module_param(all_slaves_active, int, 0);
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
"by setting active flag for all slaves; "
"0 for never (default), 1 for always.");
module_param(resend_igmp, int, 0);
MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
"link failure");
module_param(packets_per_slave, int, 0);
MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
"mode; 0 for a random slave, 1 packet per "
"slave (default), >1 packets per slave.");
module_param(lp_interval, uint, 0);
MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
"the bonding driver sends learning packets to "
"each slaves peer switch. The default is 1.");
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
unsigned int bond_net_id __read_mostly;
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
.offset = offsetof(struct flow_keys, control),
},
{
.key_id = FLOW_DISSECTOR_KEY_BASIC,
.offset = offsetof(struct flow_keys, basic),
},
{
.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
.offset = offsetof(struct flow_keys, addrs.v4addrs),
},
{
.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
.offset = offsetof(struct flow_keys, addrs.v6addrs),
},
{
.key_id = FLOW_DISSECTOR_KEY_TIPC,
.offset = offsetof(struct flow_keys, addrs.tipckey),
},
{
.key_id = FLOW_DISSECTOR_KEY_PORTS,
.offset = offsetof(struct flow_keys, ports),
},
{
.key_id = FLOW_DISSECTOR_KEY_ICMP,
.offset = offsetof(struct flow_keys, icmp),
},
{
.key_id = FLOW_DISSECTOR_KEY_VLAN,
.offset = offsetof(struct flow_keys, vlan),
},
{
.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
.offset = offsetof(struct flow_keys, tags),
},
{
.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
.offset = offsetof(struct flow_keys, keyid),
},
};
static struct flow_dissector flow_keys_bonding __read_mostly;
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
static void bond_uninit(struct net_device *bond_dev);
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats);
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
const char *bond_mode_name(int mode)
{
static const char *names[] = {
[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
[BOND_MODE_XOR] = "load balancing (xor)",
[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
[BOND_MODE_TLB] = "transmit load balancing",
[BOND_MODE_ALB] = "adaptive load balancing",
};
if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
return "unknown";
return names[mode];
}
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
* @bond: bond device that got this skb for tx.
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
if (unlikely(netpoll_tx_running(bond->dev)))
return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
return dev_queue_xmit(skb);
}
static bool bond_sk_check(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
return true;
fallthrough;
default:
return false;
}
}
static bool bond_xdp_check(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_ACTIVEBACKUP:
return true;
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
/* vlan+srcmac is not supported with XDP as in most cases the 802.1q
* payload is not in the packet due to hardware offload.
*/
if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
return true;
fallthrough;
default:
return false;
}
}
/*---------------------------------- VLAN -----------------------------------*/
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
* We don't protect the slave list iteration with a lock because:
* a. This operation is performed in IOCTL context,
* b. The operation is protected by the RTNL semaphore in the 8021q code,
* c. Holding a lock with BH disabled while directly calling a base driver
* entry point is generally a BAD idea.
*
* The design of synchronization/protection for this operation in the 8021q
* module is good for one or more VLAN devices over a single physical device
* and cannot be extended for a teaming solution like bonding, so there is a
* potential race condition here where a net device from the vlan group might
* be referenced (either by a base driver or the 8021q code) while it is being
* removed from the system. However, it turns out we're not making matters
* worse, and if it works for regular VLAN usage it will work here too.
*/
/**
* bond_vlan_rx_add_vid - Propagates adding an id to slaves
* @bond_dev: bonding net device that got called
* @proto: network protocol ID
* @vid: vlan id being added
*/
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
struct list_head *iter;
int res;
bond_for_each_slave(bond, slave, iter) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
}
return 0;
unwind:
/* unwind to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
vlan_vid_del(rollback_slave->dev, proto, vid);
}
return res;
}
/**
* bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
* @bond_dev: bonding net device that got called
* @proto: network protocol ID
* @vid: vlan id being removed
*/
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
vlan_vid_del(slave->dev, proto, vid);
if (bond_is_lb(bond))
bond_alb_clear_vlan(bond, vid);
return 0;
}
/*---------------------------------- XFRM -----------------------------------*/
#ifdef CONFIG_XFRM_OFFLOAD
/**
* bond_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
static int bond_ipsec_add_sa(struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
int err;
if (!bond_dev)
return -EINVAL;
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
if (!slave) {
rcu_read_unlock();
return -ENODEV;
}
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
rcu_read_unlock();
return -EINVAL;
}
ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
if (!ipsec) {
rcu_read_unlock();
return -ENOMEM;
}
xs->xso.real_dev = slave->dev;
err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
spin_lock_bh(&bond->ipsec_lock);
list_add(&ipsec->list, &bond->ipsec_list);
spin_unlock_bh(&bond->ipsec_lock);
} else {
kfree(ipsec);
}
rcu_read_unlock();
return err;
}
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_ipsec *ipsec;
struct slave *slave;
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
goto out;
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
spin_lock_bh(&bond->ipsec_lock);
if (!list_empty(&bond->ipsec_list))
slave_warn(bond_dev, slave->dev,
"%s: no slave xdo_dev_state_add\n",
__func__);
spin_unlock_bh(&bond->ipsec_lock);
goto out;
}
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
ipsec->xs->xso.real_dev = slave->dev;
if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
}
spin_unlock_bh(&bond->ipsec_lock);
out:
rcu_read_unlock();
}
/**
* bond_ipsec_del_sa - clear out this specific SA
* @xs: pointer to transformer state struct
**/
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
if (!bond_dev)
return;
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
goto out;
if (!xs->xso.real_dev)
goto out;
WARN_ON(xs->xso.real_dev != slave->dev);
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
goto out;
}
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
out:
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (ipsec->xs == xs) {
list_del(&ipsec->list);
kfree(ipsec);
break;
}
}
spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_ipsec *ipsec;
struct slave *slave;
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
if (!slave) {
rcu_read_unlock();
return;
}
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (!ipsec->xs->xso.real_dev)
continue;
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
} else {
slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
}
ipsec->xs->xso.real_dev = NULL;
}
spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
}
/**
* bond_ipsec_offload_ok - can this packet use the xfrm hw offload
* @skb: current data packet
* @xs: pointer to transformer state struct
**/
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
struct slave *curr_active;
struct bonding *bond;
int err;
bond = netdev_priv(bond_dev);
rcu_read_lock();
curr_active = rcu_dereference(bond->curr_active_slave);
real_dev = curr_active->dev;
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
err = false;
goto out;
}
if (!xs->xso.real_dev) {
err = false;
goto out;
}
if (!real_dev->xfrmdev_ops ||
!real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
netif_is_bond_master(real_dev)) {
err = false;
goto out;
}
err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
out:
rcu_read_unlock();
return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
.xdo_dev_state_add = bond_ipsec_add_sa,
.xdo_dev_state_delete = bond_ipsec_del_sa,
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
};
#endif /* CONFIG_XFRM_OFFLOAD */
/*------------------------------- Link status -------------------------------*/
/* Set the carrier state for the master according to the state of its
* slaves. If any slaves are up, the master is up. In 802.3ad mode,
* do special 802.3ad magic.
*
* Returns zero if carrier state does not change, nonzero if it does.
*/
int bond_set_carrier(struct bonding *bond)
{
struct list_head *iter;
struct slave *slave;
if (!bond_has_slaves(bond))
goto down;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
return 1;
}
return 0;
}
}
down:
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
return 1;
}
return 0;
}
/* Get link speed and duplex from the slave's base driver
* using ethtool. If for some reason the call fails or the
* values are invalid, set speed and duplex to -1,
* and return. Return 1 if speed or duplex settings are
* UNKNOWN; 0 otherwise.
*/
static int bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
struct ethtool_link_ksettings ecmd;
int res;
slave->speed = SPEED_UNKNOWN;
slave->duplex = DUPLEX_UNKNOWN;
res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
if (res < 0)
return 1;
if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
return 1;
switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
default:
return 1;
}
slave->speed = ecmd.base.speed;
slave->duplex = ecmd.base.duplex;
return 0;
}
const char *bond_slave_link_status(s8 link)
{
switch (link) {
case BOND_LINK_UP:
return "up";
case BOND_LINK_FAIL:
return "going down";
case BOND_LINK_DOWN:
return "down";
case BOND_LINK_BACK:
return "going back";
default:
return "unknown";
}
}
/* if <dev> supports MII link status reporting, check its link status.
*
* We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
* depending upon the setting of the use_carrier parameter.
*
* Return either BMSR_LSTATUS, meaning that the link is up (or we
* can't tell and just pretend it is), or 0, meaning that the link is
* down.
*
* If reporting is non-zero, instead of faking link up, return -1 if
* both ETHTOOL and MII ioctls fail (meaning the device does not
* support them). If use_carrier is set, return whatever it says.
* It'd be nice if there was a good way to tell if a driver supports
* netif_carrier, but there really isn't.
*/
static int bond_check_dev_link(struct bonding *bond,
struct net_device *slave_dev, int reporting)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
int (*ioctl)(struct net_device *, struct ifreq *, int);
struct ifreq ifr;
struct mii_ioctl_data *mii;
if (!reporting && !netif_running(slave_dev))
return 0;
if (bond->params.use_carrier)
return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
/* Try to get link status using Ethtool first. */
if (slave_dev->ethtool_ops->get_link)
return slave_dev->ethtool_ops->get_link(slave_dev) ?
BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
ioctl = slave_ops->ndo_eth_ioctl;
if (ioctl) {
/* TODO: set pointer to correct ioctl on a per team member
* bases to make this more efficient. that is, once
* we determine the correct ioctl, we will always
* call it and not the others for that team
* member.
*/
/* We cannot assume that SIOCGMIIPHY will also read a
* register; not all network drivers (e.g., e100)
* support that.
*/
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
return mii->val_out & BMSR_LSTATUS;
}
}
/* If reporting, report that either there's no ndo_eth_ioctl,
* or both SIOCGMIIREG and get_link failed (meaning that we
* cannot report link status). If not reporting, pretend
* we're ok.
*/
return reporting ? -1 : BMSR_LSTATUS;
}
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
static int bond_set_promiscuity(struct bonding *bond, int inc)
{
struct list_head *iter;
int err = 0;
if (bond_uses_primary(bond)) {
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
if (curr_active)
err = dev_set_promiscuity(curr_active->dev, inc);
} else {
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
}
}
return err;
}
/* Push the allmulti flag down to all slaves */
static int bond_set_allmulti(struct bonding *bond, int inc)
{
struct list_head *iter;
int err = 0;
if (bond_uses_primary(bond)) {
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
if (curr_active)
err = dev_set_allmulti(curr_active->dev, inc);
} else {
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
}
}
return err;
}
/* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mcast_work.work);
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
rtnl_unlock();
}
/* Flush bond's hardware addresses from slave */
static void bond_hw_addr_flush(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
/* Update the hardware address list and promisc/allmulti for the new and
* old active slaves (if any). Modes that are not using primary keep all
* slaves up date at all times; only the modes that use primary need to call
* this function to swap these settings during a failover.
*/
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
if (old_active) {
if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(old_active->dev, -1);
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
if (bond->dev->flags & IFF_UP)
bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
/* FIXME: Signal errors upstream. */
if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(new_active->dev, 1);
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
if (bond->dev->flags & IFF_UP) {
netif_addr_lock_bh(bond->dev);
dev_uc_sync(new_active->dev, bond->dev);
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
}
}
}
/**
* bond_set_dev_addr - clone slave's address to bond
* @bond_dev: bond net device
* @slave_dev: slave net device
*
* Should be called with RTNL held.
*/
static int bond_set_dev_addr(struct net_device *bond_dev,
struct net_device *slave_dev)
{
int err;
slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->addr_len);
err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
__dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
return 0;
}
static struct slave *bond_get_old_active(struct bonding *bond,
struct slave *new_active)
{
struct slave *slave;
struct list_head *iter;
bond_for_each_slave(bond, slave, iter) {
if (slave == new_active)
continue;
if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
return slave;
}
return NULL;
}
/* bond_do_fail_over_mac
*
* Perform special MAC address swapping for fail_over_mac settings
*
* Called with RTNL
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
{
u8 tmp_mac[MAX_ADDR_LEN];
struct sockaddr_storage ss;
int rv;
switch (bond->params.fail_over_mac) {
case BOND_FOM_ACTIVE:
if (new_active) {
rv = bond_set_dev_addr(bond->dev, new_active->dev);
if (rv)
slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
-rv);
}
break;
case BOND_FOM_FOLLOW:
/* if new_active && old_active, swap them
* if just old_active, do nothing (going to no active slave)
* if just new_active, set new_active to bond's MAC
*/
if (!new_active)
return;
if (!old_active)
old_active = bond_get_old_active(bond, new_active);
if (old_active) {
bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
new_active->dev->addr_len);
bond_hw_addr_copy(ss.__data,
old_active->dev->dev_addr,
old_active->dev->addr_len);
ss.ss_family = new_active->dev->type;
} else {
bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
}
rv = dev_set_mac_address(new_active->dev,
(struct sockaddr *)&ss, NULL);
if (rv) {
slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
-rv);
goto out;
}
if (!old_active)
goto out;
bond_hw_addr_copy(ss.__data, tmp_mac,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev,
(struct sockaddr *)&ss, NULL);
if (rv)
slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
-rv);
out:
break;
default:
netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
bond->params.fail_over_mac);
break;
}
}
/**
* bond_choose_primary_or_current - select the primary or high priority slave
* @bond: our bonding struct
*
* - Check if there is a primary link. If the primary link was set and is up,
* go on and do link reselection.
*
* - If primary link is not set or down, find the highest priority link.
* If the highest priority link is not current slave, set it as primary
* link and do link reselection.
*/
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
struct slave *slave, *hprio = NULL;
struct list_head *iter;
if (!prim || prim->link != BOND_LINK_UP) {
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP) {
hprio = hprio ?: slave;
if (slave->prio > hprio->prio)
hprio = slave;
}
}
if (hprio && hprio != curr) {
prim = hprio;
goto link_reselect;
}
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
}
if (bond->force_primary) {
bond->force_primary = false;
return prim;
}
link_reselect:
if (!curr || curr->link != BOND_LINK_UP)
return prim;
/* At this point, prim and curr are both up */
switch (bond->params.primary_reselect) {
case BOND_PRI_RESELECT_ALWAYS:
return prim;
case BOND_PRI_RESELECT_BETTER:
if (prim->speed < curr->speed)
return curr;
if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
return curr;
return prim;
case BOND_PRI_RESELECT_FAILURE:
return curr;
default:
netdev_err(bond->dev, "impossible primary_reselect %d\n",
bond->params.primary_reselect);
return curr;
}
}
/**
* bond_find_best_slave - select the best available slave to be the active one
* @bond: our bonding struct
*/
static struct slave *bond_find_best_slave(struct bonding *bond)
{
struct slave *slave, *bestslave = NULL;
struct list_head *iter;
int mintime = bond->params.updelay;
slave = bond_choose_primary_or_current(bond);
if (slave)
return slave;
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
return slave;
if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
slave->delay < mintime) {
mintime = slave->delay;
bestslave = slave;
}
}
return bestslave;
}
static bool bond_should_notify_peers(struct bonding *bond)
{
struct slave *slave;
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
rcu_read_unlock();
if (!slave || !bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
!netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
slave ? slave->dev->name : "NULL");
return true;
}
/**
* bond_change_active_slave - change the active slave into the specified one
* @bond: our bonding struct
* @new_active: the new slave to make the active one
*
* Set the new slave to the bond's settings and unset them on the old
* curr_active_slave.
* Setting include flags, mc-list, promiscuity, allmulti, etc.
*
* If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
* Caller must hold RTNL.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
struct slave *old_active;
ASSERT_RTNL();
old_active = rtnl_dereference(bond->curr_active_slave);
if (old_active == new_active)
return;
#ifdef CONFIG_XFRM_OFFLOAD
bond_ipsec_del_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
if (new_active) {
new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (bond_uses_primary(bond)) {
slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
(bond->params.updelay - new_active->delay) * bond->params.miimon);
}
new_active->delay = 0;
bond_set_slave_link_state(new_active, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (bond_uses_primary(bond))
slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
}
}
if (bond_uses_primary(bond))
bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
if (old_active)
bond_set_slave_inactive_flags(old_active,
BOND_SLAVE_NOTIFY_NOW);
if (new_active)
bond_set_slave_active_flags(new_active,
BOND_SLAVE_NOTIFY_NOW);
} else {
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
bond_set_slave_inactive_flags(old_active,
BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
bool should_notify_peers = false;
bond_set_slave_active_flags(new_active,
BOND_SLAVE_NOTIFY_NOW);
if (bond->params.fail_over_mac)
bond_do_fail_over_mac(bond, new_active,
old_active);
if (netif_running(bond->dev)) {
bond->send_peer_notif =
bond->params.num_peer_notif *
max(1, bond->params.peer_notif_delay);
should_notify_peers =
bond_should_notify_peers(bond);
}
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers) {
bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
}
}
}
#ifdef CONFIG_XFRM_OFFLOAD
bond_ipsec_add_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
/* resend IGMP joins since active slave has changed or
* all were sent on curr_active_slave.
* resend only if bond is brought up with the affected
* bonding modes and the retransmission is enabled
*/
if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
((bond_uses_primary(bond) && new_active) ||
BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
}
}
/**
* bond_select_active_slave - select a new active slave, if needed
* @bond: our bonding struct
*
* This functions should be called when one of the following occurs:
* - The old curr_active_slave has been released or lost its link.
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
* Caller must hold RTNL.
*/
void bond_select_active_slave(struct bonding *bond)
{
struct slave *best_slave;
int rv;
ASSERT_RTNL();
best_slave = bond_find_best_slave(bond);
if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
bond_change_active_slave(bond, best_slave);
rv = bond_set_carrier(bond);
if (!rv)
return;
if (netif_carrier_ok(bond->dev))
netdev_info(bond->dev, "active interface up!\n");
else
netdev_info(bond->dev, "now running without any active interface!\n");
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline int slave_enable_netpoll(struct slave *slave)
{
struct netpoll *np;
int err = 0;
np = kzalloc(sizeof(*np), GFP_KERNEL);
err = -ENOMEM;
if (!np)
goto out;
err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
}
slave->np = np;
out:
return err;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
struct netpoll *np = slave->np;
if (!np)
return;
slave->np = NULL;
__netpoll_free(np);
}
static void bond_poll_controller(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
struct ad_info ad_info;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
if (bond_3ad_get_active_agg_info(bond, &ad_info))
return;
bond_for_each_slave_rcu(bond, slave, iter) {
if (!bond_slave_is_up(slave))
continue;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg =
SLAVE_AD_INFO(slave)->port.aggregator;
if (agg &&
agg->aggregator_identifier != ad_info.aggregator_id)
continue;
}
netpoll_poll_dev(slave->dev);
}
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
if (bond_slave_is_up(slave))
slave_disable_netpoll(slave);
}
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
struct slave *slave;
int err = 0;
bond_for_each_slave(bond, slave, iter) {
err = slave_enable_netpoll(slave);
if (err) {
bond_netpoll_cleanup(dev);
break;
}
}
return err;
}
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
return 0;
}
static inline void slave_disable_netpoll(struct slave *slave)
{
}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
#endif
/*---------------------------------- IOCTL ----------------------------------*/
static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
netdev_features_t mask;
struct slave *slave;
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
}
features = netdev_add_tso_features(features, mask);
return features;
}
#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_GSO_SOFTWARE)
static void bond_compute_features(struct bonding *bond)
{
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
#ifdef CONFIG_XFRM_OFFLOAD
netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
netdev_features_t mpls_features = BOND_MPLS_FEATURES;
struct net_device *bond_dev = bond->dev;
struct list_head *iter;
struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int tso_max_size = TSO_MAX_SIZE;
u16 tso_max_segs = TSO_MAX_SEGS;
if (!bond_has_slaves(bond))
goto done;
vlan_features &= NETIF_F_ALL_FOR_ALL;
mpls_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
enc_features = netdev_increment_features(enc_features,
slave->dev->hw_enc_features,
BOND_ENC_FEATURES);
#ifdef CONFIG_XFRM_OFFLOAD
xfrm_features = netdev_increment_features(xfrm_features,
slave->dev->hw_enc_features,
BOND_XFRM_FEATURES);
#endif /* CONFIG_XFRM_OFFLOAD */
mpls_features = netdev_increment_features(mpls_features,
slave->dev->mpls_features,
BOND_MPLS_FEATURES);
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
}
bond_dev->hard_header_len = max_hard_header_len;
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
bond_dev->mpls_features = mpls_features;
netif_set_tso_max_segs(bond_dev, tso_max_segs);
netif_set_tso_max_size(bond_dev, tso_max_size);
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
netdev_change_features(bond_dev);
}
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
bond_dev->hard_header_len = slave_dev->hard_header_len;
bond_dev->needed_headroom = slave_dev->needed_headroom;
bond_dev->addr_len = slave_dev->addr_len;
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
if (slave_dev->flags & IFF_POINTOPOINT) {
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
}
}
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for alb non-mcast/bcast.
*/
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
struct slave *slave,
struct bonding *bond)
{
if (bond_is_slave_inactive(slave)) {
if (BOND_MODE(bond) == BOND_MODE_ALB &&
skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return false;
return true;
}
return false;
}
static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
*pskb = skb;
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
recv_probe = READ_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
consume_skb(skb);
return ret;
}
}
/*
* For packets determined by bond_should_deliver_exact_match() call to
* be suppressed we want to make an exception for link-local packets.
* This is necessary for e.g. LLDP daemons to be able to monitor
* inactive slave links without being forced to bind to them
* explicitly.
*
* At the same time, packets that are passed to the bonding master
* (including link-local ones) can have their originating interface
* determined via PACKET_ORIGDEV socket option.
*/
if (bond_should_deliver_exact_match(skb, slave, bond)) {
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
return RX_HANDLER_PASS;
return RX_HANDLER_EXACT;
}
skb->dev = bond->dev;
if (BOND_MODE(bond) == BOND_MODE_ALB &&
netif_is_bridge_port(bond->dev) &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
skb->data - skb_mac_header(skb)))) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
bond->dev->addr_len);
}
return ret;
}
static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
case BOND_MODE_ACTIVEBACKUP:
return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
case BOND_MODE_BROADCAST:
return NETDEV_LAG_TX_TYPE_BROADCAST;
case BOND_MODE_XOR:
case BOND_MODE_8023AD:
return NETDEV_LAG_TX_TYPE_HASH;
default:
return NETDEV_LAG_TX_TYPE_UNKNOWN;
}
}
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
enum netdev_lag_tx_type type)
{
if (type != NETDEV_LAG_TX_TYPE_HASH)
return NETDEV_LAG_HASH_NONE;
switch (bond->params.xmit_policy) {
case BOND_XMIT_POLICY_LAYER2:
return NETDEV_LAG_HASH_L2;
case BOND_XMIT_POLICY_LAYER34:
return NETDEV_LAG_HASH_L34;
case BOND_XMIT_POLICY_LAYER23:
return NETDEV_LAG_HASH_L23;
case BOND_XMIT_POLICY_ENCAP23:
return NETDEV_LAG_HASH_E23;
case BOND_XMIT_POLICY_ENCAP34:
return NETDEV_LAG_HASH_E34;
case BOND_XMIT_POLICY_VLAN_SRCMAC:
return NETDEV_LAG_HASH_VLAN_SRCMAC;
default:
return NETDEV_LAG_HASH_UNKNOWN;
}
}
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
struct netlink_ext_ack *extack)
{
struct netdev_lag_upper_info lag_upper_info;
enum netdev_lag_tx_type type;
int err;
type = bond_lag_tx_type(bond);
lag_upper_info.tx_type = type;
lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
&lag_upper_info, extack);
if (err)
return err;
slave->dev->flags |= IFF_SLAVE;
return 0;
}
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
{
netdev_upper_dev_unlink(slave->dev, bond->dev);
slave->dev->flags &= ~IFF_SLAVE;
}
static void slave_kobj_release(struct kobject *kobj)
{
struct slave *slave = to_slave(kobj);
struct bonding *bond = bond_get_bond_by_slave(slave);
cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
kfree(slave);
}
static struct kobj_type slave_ktype = {
.release = slave_kobj_release,
#ifdef CONFIG_SYSFS
.sysfs_ops = &slave_sysfs_ops,
#endif
};
static int bond_kobj_init(struct slave *slave)
{
int err;
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
&(slave->dev->dev.kobj), "bonding_slave");
if (err)
kobject_put(&slave->kobj);
return err;
}
static struct slave *bond_alloc_slave(struct bonding *bond,
struct net_device *slave_dev)
{
struct slave *slave = NULL;
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave)
return NULL;
slave->bond = bond;
slave->dev = slave_dev;
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
if (bond_kobj_init(slave))
return NULL;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
GFP_KERNEL);
if (!SLAVE_AD_INFO(slave)) {
kobject_put(&slave->kobj);
return NULL;
}
}
return slave;
}
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
info->bond_mode = BOND_MODE(bond);
info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt;
}
static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
info->state = bond_slave_state(slave);
info->link_failure_count = slave->link_failure_count;
}
static void bond_netdev_notify_work(struct work_struct *_work)
{
struct slave *slave = container_of(_work, struct slave,
notify_work.work);
if (rtnl_trylock()) {
struct netdev_bonding_info binfo;
bond_fill_ifslave(slave, &binfo.slave);
bond_fill_ifbond(slave->bond, &binfo.master);
netdev_bonding_info_change(slave->dev, &binfo);
rtnl_unlock();
} else {
queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
}
}
void bond_queue_slave_event(struct slave *slave)
{
queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
void bond_lower_state_changed(struct slave *slave)
{
struct netdev_lag_lower_state_info info;
info.link_up = slave->link == BOND_LINK_UP ||
slave->link == BOND_LINK_FAIL;
info.tx_enabled = bond_is_active_slave(slave);
netdev_lower_state_changed(slave->dev, &info);
}
#define BOND_NL_ERR(bond_dev, extack, errmsg) do { \
if (extack) \
NL_SET_ERR_MSG(extack, errmsg); \
else \
netdev_err(bond_dev, "Error: %s\n", errmsg); \
} while (0)
#define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \
if (extack) \
NL_SET_ERR_MSG(extack, errmsg); \
else \
slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
} while (0)
/* The bonding driver uses ether_setup() to convert a master bond device
* to ARPHRD_ETHER, that resets the target netdevice's flags so we always
* have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
* if they were set
*/
static void bond_ether_setup(struct net_device *bond_dev)
{
unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
ether_setup(bond_dev);
bond_dev->flags |= IFF_MASTER | flags;
bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
}
void bond_xdp_set_features(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
xdp_features_t val = NETDEV_XDP_ACT_MASK;
struct list_head *iter;
struct slave *slave;
ASSERT_RTNL();
if (!bond_xdp_check(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
bond_for_each_slave(bond, slave, iter)
val &= slave->dev->xdp_features;
xdp_set_features_flag(bond_dev, val);
}
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
!netif_is_bond_master(slave_dev)) {
BOND_NL_ERR(bond_dev, extack,
"Device type (master device) cannot be enslaved");
return -EPERM;
}
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_eth_ioctl == NULL) {
slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
}
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Device is in use and cannot be enslaved");
return -EBUSY;
}
if (bond_dev == slave_dev) {
BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
return -EPERM;
}
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
if (vlan_uses_dev(bond_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Can not enslave VLAN challenged device to VLAN enabled bond");
return -EPERM;
} else {
slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
}
} else {
slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
}
if (slave_dev->features & NETIF_F_HW_ESP)
slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
/* Old ifenslave binaries are no longer supported. These can
* be identified with moderate accuracy by the state of the slave:
* the current ifenslave will set the interface down prior to
* enslaving it; the old ifenslave will not.
*/
if (slave_dev->flags & IFF_UP) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Device can not be enslaved while up");
return -EPERM;
}
/* set bonding device ether type by slave - bonding netdevices are
* created with ether_setup, so when the slave type is not ARPHRD_ETHER
* there is a need to override some of the type dependent attribs/funcs.
*
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
bond_dev->type, slave_dev->type);
res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
bond_dev);
res = notifier_to_errno(res);
if (res) {
slave_err(bond_dev, slave_dev, "refused to change device type\n");
return -EBUSY;
}
/* Flush unicast and multicast addresses */
dev_uc_flush(bond_dev);
dev_mc_flush(bond_dev);
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
else
bond_ether_setup(bond_dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Device type is different from other slaves");
return -EINVAL;
}
if (slave_dev->type == ARPHRD_INFINIBAND &&
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Only active-backup mode is supported for infiniband slaves");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
if (!slave_ops->ndo_set_mac_address ||
slave_dev->type == ARPHRD_INFINIBAND) {
slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
if (!bond_has_slaves(bond)) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
} else {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
}
}
call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's.
*/
if (!bond_has_slaves(bond) &&
bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
res = bond_set_dev_addr(bond->dev, slave_dev);
if (res)
goto err_undo_flags;
}
new_slave = bond_alloc_slave(bond, slave_dev);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
/* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
*/
new_slave->queue_id = 0;
/* Save slave's original mtu and then set it to match the bond */
new_slave->original_mtu = slave_dev->mtu;
res = dev_set_mtu(slave_dev, bond->dev->mtu);
if (res) {
slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
goto err_free;
}
/* Save slave's original ("permanent") mac address for modes
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
slave_dev->addr_len);
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
ss.ss_family = slave_dev->type;
res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
extack);
if (res) {
slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
goto err_restore_mtu;
}
}
/* set no_addrconf flag before open to prevent IPv6 addrconf */
slave_dev->priv_flags |= IFF_NO_ADDRCONF;
/* open the slave since the application closed it */
res = dev_open(slave_dev, extack);
if (res) {
slave_err(bond_dev, slave_dev, "Opening slave failed\n");
goto err_restore_mac;
}
slave_dev->priv_flags |= IFF_BONDING;
/* initialize slave stats */
dev_get_stats(new_slave->dev, &new_slave->slave_stats);
if (bond_is_lb(bond)) {
/* bond_alb_init_slave() must be called before all other stages since
* it might fail and we do not want to have to undo everything
*/
res = bond_alb_init_slave(bond, new_slave);
if (res)
goto err_close;
}
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
goto err_close;
}
prev_slave = bond_last_slave(bond);
new_slave->delay = 0;
new_slave->link_failure_count = 0;
if (bond_update_speed_duplex(new_slave) &&
bond_needs_speed_duplex(bond))
new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
new_slave->last_tx = new_slave->last_rx;
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
if ((link_reporting == -1) && !bond->params.arp_interval) {
/* miimon is set but a bonded network driver
* does not support ETHTOOL/MII and
* arp_interval is not set. Note: if
* use_carrier is enabled, we will never go
* here (because netif_carrier is always
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
}
}
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_NOW);
new_slave->delay = bond->params.updelay;
} else {
bond_set_slave_link_state(new_slave,
BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
}
} else {
bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
}
} else if (bond->params.arp_interval) {
bond_set_slave_link_state(new_slave,
(netif_carrier_ok(slave_dev) ?
BOND_LINK_UP : BOND_LINK_DOWN),
BOND_SLAVE_NOTIFY_NOW);
} else {
bond_set_slave_link_state(new_slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
}
if (new_slave->link != BOND_LINK_DOWN)
new_slave->last_link_up = jiffies;
slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
new_slave->link == BOND_LINK_DOWN ? "DOWN" :
(new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (bond_uses_primary(bond) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
rcu_assign_pointer(bond->primary_slave, new_slave);
bond->force_primary = true;
}
}
switch (BOND_MODE(bond)) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave,
BOND_SLAVE_NOTIFY_NOW);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
* will activate the slaves in the selected
* aggregator
*/
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
SLAVE_AD_INFO(new_slave)->id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
}
bond_3ad_bind_slave(new_slave);
break;
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
/* always active in trunk mode */
bond_set_active_slave(new_slave);
/* In trunking mode there is little meaning to curr_active_slave
* anyway (it holds no special properties of the bond device),
* so we can change it without calling change_active_interface()
*/
if (!rcu_access_pointer(bond->curr_active_slave) &&
new_slave->link == BOND_LINK_UP)
rcu_assign_pointer(bond->curr_active_slave, new_slave);
break;
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
goto err_detach;
}
}
#endif
if (!(bond_dev->features & NETIF_F_LRO))
dev_disable_lro(slave_dev);
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
goto err_detach;
}
res = bond_master_upper_dev_link(bond, new_slave, extack);
if (res) {
slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
goto err_unregister;
}
bond_lower_state_changed(new_slave);
res = bond_sysfs_slave_add(new_slave);
if (res) {
slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
goto err_upper_unlink;
}
/* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
if (!bond_uses_primary(bond)) {
/* set promiscuity level to new slave */
if (bond_dev->flags & IFF_PROMISC) {
res = dev_set_promiscuity(slave_dev, 1);
if (res)
goto err_sysfs_del;
}
/* set allmulti level to new slave */
if (bond_dev->flags & IFF_ALLMULTI) {
res = dev_set_allmulti(slave_dev, 1);
if (res) {
if (bond_dev->flags & IFF_PROMISC)
dev_set_promiscuity(slave_dev, -1);
goto err_sysfs_del;
}
}
if (bond_dev->flags & IFF_UP) {
netif_addr_lock_bh(bond_dev);
dev_mc_sync_multiple(slave_dev, bond_dev);
dev_uc_sync_multiple(slave_dev, bond_dev);
netif_addr_unlock_bh(bond_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
if (bond_uses_primary(bond)) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Slave does not support XDP");
res = -EOPNOTSUPP;
goto err_sysfs_del;
}
} else if (bond->xdp_prog) {
struct netdev_bpf xdp = {
.command = XDP_SETUP_PROG,
.flags = 0,
.prog = bond->xdp_prog,
.extack = extack,
};
if (dev_xdp_prog_count(slave_dev) > 0) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
"Slave has XDP program loaded, please unload before enslaving");
res = -EOPNOTSUPP;
goto err_sysfs_del;
}
res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
goto err_sysfs_del;
}
if (bond->xdp_prog)
bpf_prog_inc(bond->xdp_prog);
}
bond_xdp_set_features(bond_dev);
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
/* Undo stages on error */
err_sysfs_del:
bond_sysfs_slave_del(new_slave);
err_upper_unlink:
bond_upper_dev_unlink(bond, new_slave);
err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
block_netpoll_tx();
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
/* either primary_slave or curr_active_slave might've changed */
synchronize_rcu();
slave_disable_netpoll(new_slave);
err_close:
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_restore_mac:
slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
}
err_restore_mtu:
dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free:
kobject_put(&new_slave->kobj);
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
if (!bond_has_slaves(bond)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr,
slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
if (bond_dev->type != ARPHRD_ETHER) {
dev_close(bond_dev);
bond_ether_setup(bond_dev);
}
}
return res;
}
/* Try to release the slave device <slave> from the bond device <master>
* It is legal to access curr_active_slave without a lock because all the function
* is RTNL-locked. If "all" is true it means that the function is being called
* while destroying a bond interface and all slaves are being released.
*
* The rules for slave state should be:
* for Active/Backup:
* Active stays on all backups go down
* for Bonded connections:
* The first up interface should be left on and all others downed.
*/
static int __bond_release_one(struct net_device *bond_dev,
struct net_device *slave_dev,
bool all, bool unregister)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
return -EINVAL;
}
block_netpoll_tx();
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
slave_info(bond_dev, slave_dev, "interface not enslaved\n");
unblock_netpoll_tx();
return -EINVAL;
}
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
bond_get_stats(bond->dev, &bond->bond_stats);
if (bond->xdp_prog) {
struct netdev_bpf xdp = {
.command = XDP_SETUP_PROG,
.flags = 0,
.prog = NULL,
.extack = NULL,
};
if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
netdev_rx_handler_unregister(slave_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
bond_upper_dev_unlink(bond, slave);
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
bond_is_active_slave(slave) ? "active" : "backup");
oldcurrent = rcu_access_pointer(bond->curr_active_slave);
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
if (!all && (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
slave->perm_hwaddr);
}
if (rtnl_dereference(bond->primary_slave) == slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
* detached from the list and the curr_active_slave
* has been cleared (if our_slave == old_current),
* but before a new active slave is selected.
*/
bond_alb_deinit_slave(bond, slave);
}
if (all) {
RCU_INIT_POINTER(bond->curr_active_slave, NULL);
} else if (oldcurrent == slave) {
/* Note that we hold RTNL over this sequence, so there
* is no concern that another slave add/remove event
* will interfere.
*/
bond_select_active_slave(bond);
}
bond_set_carrier(bond);
if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
unblock_netpoll_tx();
synchronize_rcu();
bond->slave_cnt--;
if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode uses primary, then this case was handled above by
* bond_change_active_slave(..., NULL)
*/
if (!bond_uses_primary(bond)) {
/* unset promiscuity level from slave
* NOTE: The NETDEV_CHANGEADDR call above may change the value
* of the IFF_PROMISC flag in the bond_dev, but we need the
* value of that flag before that change, as that was the value
* when this slave was attached, so we cache at the start of the
* function and use it here. Same goes for ALLMULTI below
*/
if (old_flags & IFF_PROMISC)
dev_set_promiscuity(slave_dev, -1);
/* unset allmulti level from slave */
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
if (old_flags & IFF_UP)
bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
}
if (unregister)
__dev_set_mtu(slave_dev, slave->original_mtu);
else
dev_set_mtu(slave_dev, slave->original_mtu);
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
bond_xdp_set_features(bond_dev);
kobject_put(&slave->kobj);
return 0;
}
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
return __bond_release_one(bond_dev, slave_dev, false, false);
}
/* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
*/
static int bond_release_and_destroy(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int ret;
ret = __bond_release_one(bond_dev, slave_dev, false, true);
if (ret == 0 && !bond_has_slaves(bond) &&
bond_dev->reg_state != NETREG_UNREGISTERING) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond\n");
bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
return ret;
}
static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
bond_fill_ifbond(bond, info);
}
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
int i = 0, res = -ENODEV;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
bond_fill_ifslave(slave, info);
break;
}
}
return res;
}
/*-------------------------------- Monitoring -------------------------------*/
/* called with rcu_read_lock() */
static int bond_miimon_inspect(struct bonding *bond)
{
bool ignore_updelay = false;
int link_state, commit = 0;
struct list_head *iter;
struct slave *slave;
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
ignore_updelay = !rcu_dereference(bond->curr_active_slave);
} else {
struct bond_up_slave *usable_slaves;
usable_slaves = rcu_dereference(bond->usable_slaves);
if (usable_slaves && usable_slaves->count == 0)
ignore_updelay = true;
}
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
link_state = bond_check_dev_link(bond, slave->dev, 0);
switch (slave->link) {
case BOND_LINK_UP:
if (link_state)
continue;
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay;
if (slave->delay) {
slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
(bond_is_active_slave(slave) ?
"active " : "backup ") : "",
bond->params.downdelay * bond->params.miimon);
}
fallthrough;
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
(bond->params.downdelay - slave->delay) *
bond->params.miimon);
commit++;
continue;
}
if (slave->delay <= 0) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
continue;
}
slave->delay--;
break;
case BOND_LINK_DOWN:
if (!link_state)
continue;
bond_propose_link_state(slave, BOND_LINK_BACK);
commit++;
slave->delay = bond->params.updelay;
if (slave->delay) {
slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
ignore_updelay ? 0 :
bond->params.updelay *
bond->params.miimon);
}
fallthrough;
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
(bond->params.updelay - slave->delay) *
bond->params.miimon);
commit++;
continue;
}
if (ignore_updelay)
slave->delay = 0;
if (slave->delay <= 0) {
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
ignore_updelay = false;
continue;
}
slave->delay--;
break;
}
}
return commit;
}
static void bond_miimon_link_change(struct bonding *bond,
struct slave *slave,
char link)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_8023AD:
bond_3ad_handle_link_change(slave, link);
break;
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_alb_handle_link_change(bond, slave, link);
break;
case BOND_MODE_XOR:
bond_update_slave_arr(bond, NULL);
break;
}
}
static void bond_miimon_commit(struct bonding *bond)
{
struct slave *slave, *primary, *active;
bool do_failover = false;
struct list_head *iter;
ASSERT_RTNL();
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
/* For 802.3ad mode, check current slave speed and
* duplex again in case its port was disabled after
* invalid speed/duplex reporting but recovered before
* link monitoring could make a decision on the actual
* link status
*/
if (BOND_MODE(bond) == BOND_MODE_8023AD &&
slave->link == BOND_LINK_UP)
bond_3ad_adapter_speed_duplex_changed(slave);
continue;
case BOND_LINK_UP:
if (bond_update_speed_duplex(slave) &&
bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN;
if (net_ratelimit())
slave_warn(bond->dev, slave->dev,
"failed to get link speed/duplex\n");
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
slave->last_link_up = jiffies;
primary = rtnl_dereference(bond->primary_slave);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
}
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
active = rtnl_dereference(bond->curr_active_slave);
if (!active || slave == primary || slave->prio > active->prio)
do_failover = true;
continue;
case BOND_LINK_DOWN:
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
BOND_MODE(bond) == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
if (slave == rcu_access_pointer(bond->curr_active_slave))
do_failover = true;
continue;
default:
slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
slave->link_new_state);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
continue;
}
}
if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
bond_set_carrier(bond);
}
/* bond_mii_monitor
*
* Really a wrapper that splits the mii monitor into two phases: an
* inspection, then (if inspection indicates something needs to be done)
* an acquisition of appropriate locks followed by a commit phase to
* implement whatever link state changes are indicated.
*/
static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
bool commit;
unsigned long delay;
struct slave *slave;
struct list_head *iter;
delay = msecs_to_jiffies(bond->params.miimon);
if (!bond_has_slaves(bond))
goto re_arm;
rcu_read_lock();
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
if (bond->send_peer_notif) {
rcu_read_unlock();
if (rtnl_trylock()) {
bond->send_peer_notif--;
rtnl_unlock();
}
} else {
rcu_read_unlock();
}
if (commit) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
should_notify_peers = false;
goto re_arm;
}
bond_for_each_slave(bond, slave, iter) {
bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
}
bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
if (should_notify_peers) {
if (!rtnl_trylock())
return;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
}
static int bond_upper_dev_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{
__be32 ip = *(__be32 *)priv->data;
return ip == bond_confirm_addr(upper, 0, ip);
}
static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct netdev_nested_priv priv = {
.data = (void *)&ip,
};
bool ret = false;
if (ip == bond_confirm_addr(bond->dev, 0, ip))
return true;
rcu_read_lock();
if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
ret = true;
rcu_read_unlock();
return ret;
}
#define BOND_VLAN_PROTO_NONE cpu_to_be16(0xffff)
static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
struct sk_buff *skb)
{
struct net_device *bond_dev = slave->bond->dev;
struct net_device *slave_dev = slave->dev;
struct bond_vlan_tag *outer_tag = tags;
if (!tags || tags->vlan_proto == BOND_VLAN_PROTO_NONE)
return true;
tags++;
/* Go through all the tags backwards and add them to the packet */
while (tags->vlan_proto != BOND_VLAN_PROTO_NONE) {
if (!tags->vlan_id) {
tags++;
continue;
}
slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), tags->vlan_id);
skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
return false;
}
tags++;
}
/* Set the outer tag */
if (outer_tag->vlan_id) {
slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
outer_tag->vlan_id);
}
return true;
}
/* We go to the (large) trouble of VLAN tagging ARP frames because
* switches in VLAN mode (especially if ports are configured as
* "native" to a VLAN) might not pass non-tagged frames.
*/
static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
__be32 src_ip, struct bond_vlan_tag *tags)
{
struct net_device *bond_dev = slave->bond->dev;
struct net_device *slave_dev = slave->dev;
struct sk_buff *skb;
slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
arp_op, &dest_ip, &src_ip);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
net_err_ratelimited("ARP packet allocation failed\n");
return;
}
if (bond_handle_vlan(slave, tags, skb)) {
slave_update_last_tx(slave);
arp_xmit(skb);
}
return;
}
/* Validate the device path between the @start_dev and the @end_dev.
* The path is valid if the @end_dev is reachable through device
* stacking.
* When the path is validated, collect any vlan information in the
* path.
*/
struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
struct net_device *end_dev,
int level)
{
struct bond_vlan_tag *tags;
struct net_device *upper;
struct list_head *iter;
if (start_dev == end_dev) {
tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
if (!tags)
return ERR_PTR(-ENOMEM);
tags[level].vlan_proto = BOND_VLAN_PROTO_NONE;
return tags;
}
netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
tags = bond_verify_device_path(upper, end_dev, level + 1);
if (IS_ERR_OR_NULL(tags)) {
if (IS_ERR(tags))
return tags;
continue;
}
if (is_vlan_dev(upper)) {
tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
tags[level].vlan_id = vlan_dev_vlan_id(upper);
}
return tags;
}
return NULL;
}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
struct rtable *rt;
struct bond_vlan_tag *tags;
__be32 *targets = bond->params.arp_targets, addr;
int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
__func__, &targets[i]);
tags = NULL;
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
*/
if (bond->params.arp_validate)
pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
bond_arp_send(slave, ARPOP_REQUEST, targets[i],
0, tags);
continue;
}
/* bond device itself */
if (rt->dst.dev == bond->dev)
goto found;
rcu_read_lock();
tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
rcu_read_unlock();
if (!IS_ERR_OR_NULL(tags))
goto found;
/* Not our device - skip */
slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
&targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
ip_rt_put(rt);
continue;
found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
kfree(tags);
}
}
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
{
int i;
if (!sip || !bond_has_this_ip(bond, tip)) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
__func__, &sip, &tip);
return;
}
i = bond_get_targets_ip(bond->params.arp_targets, sip);
if (i == -1) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
__func__, &sip);
return;
}
slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
unsigned int alen;
alen = arp_hdr_len(bond->dev);
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
goto out_unlock;
if (skb_copy_bits(skb, 0, arp, alen) < 0)
goto out_unlock;
}
if (arp->ar_hln != bond->dev->addr_len ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
arp->ar_hrd != htons(ARPHRD_ETHER) ||
arp->ar_pro != htons(ETH_P_IP) ||
arp->ar_pln != 4)
goto out_unlock;
arp_ptr = (unsigned char *)(arp + 1);
arp_ptr += bond->dev->addr_len;
memcpy(&sip, arp_ptr, 4);
arp_ptr += 4 + bond->dev->addr_len;
memcpy(&tip, arp_ptr, 4);
slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
__func__, slave->dev->name, bond_slave_state(slave),
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
curr_arp_slave = rcu_dereference(bond->current_arp_slave);
/* We 'trust' the received ARP enough to validate it if:
*
* (a) the slave receiving the ARP is active (which includes the
* current ARP slave, if any), or
*
* (b) the receiving slave isn't active, but there is a currently
* active slave and it received valid arp reply(s) after it became
* the currently active slave, or
*
* (c) there is an ARP slave that sent an ARP during the prior ARP
* interval, and we receive an ARP reply on any slave. We accept
* these because switch FDB update delays may deliver the ARP
* reply to a slave other than the sender of the ARP request.
*
* Note: for (b), backup slaves are receiving the broadcast ARP
* request, not a reply. This request passes from the sending
* slave through the L2 switch(es) to the receiving slave. Since
* this is checking the request, sip/tip are swapped for
* validation.
*
* This is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
if (arp != (struct arphdr *)skb->data)
kfree(arp);
return RX_HANDLER_ANOTHER;
}
#if IS_ENABLED(CONFIG_IPV6)
static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
const struct in6_addr *saddr, struct bond_vlan_tag *tags)
{
struct net_device *bond_dev = slave->bond->dev;
struct net_device *slave_dev = slave->dev;
struct in6_addr mcaddr;
struct sk_buff *skb;
slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
daddr, saddr);
skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
if (!skb) {
net_err_ratelimited("NS packet allocation failed\n");
return;
}
addrconf_addr_solict_mult(daddr, &mcaddr);
if (bond_handle_vlan(slave, tags, skb)) {
slave_update_last_tx(slave);
ndisc_send_skb(skb, &mcaddr, saddr);
}
}
static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
{
struct in6_addr *targets = bond->params.ns_targets;
struct bond_vlan_tag *tags;
struct dst_entry *dst;
struct in6_addr saddr;
struct flowi6 fl6;
int i;
for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
__func__, &targets[i]);
tags = NULL;
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
dst_release(dst);
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
*/
if (bond->params.arp_validate)
pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
bond->dev->name,
&targets[i]);
bond_ns_send(slave, &targets[i], &in6addr_any, tags);
continue;
}
/* bond device itself */
if (dst->dev == bond->dev)
goto found;
rcu_read_lock();
tags = bond_verify_device_path(bond->dev, dst->dev, 0);
rcu_read_unlock();
if (!IS_ERR_OR_NULL(tags))
goto found;
/* Not our device - skip */
slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
&targets[i], dst->dev ? dst->dev->name : "NULL");
dst_release(dst);
continue;
found:
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
bond_ns_send(slave, &targets[i], &saddr, tags);
else
bond_ns_send(slave, &targets[i], &in6addr_any, tags);
dst_release(dst);
kfree(tags);
}
}
static int bond_confirm_addr6(struct net_device *dev,
struct netdev_nested_priv *priv)
{
struct in6_addr *addr = (struct in6_addr *)priv->data;
return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
}
static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
{
struct netdev_nested_priv priv = {
.data = addr,
};
int ret = false;
if (bond_confirm_addr6(bond->dev, &priv))
return true;
rcu_read_lock();
if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
ret = true;
rcu_read_unlock();
return ret;
}
static void bond_validate_na(struct bonding *bond, struct slave *slave,
struct in6_addr *saddr, struct in6_addr *daddr)
{
int i;
/* Ignore NAs that:
* 1. Source address is unspecified address.
* 2. Dest address is neither all-nodes multicast address nor
* exist on bond interface.
*/
if (ipv6_addr_any(saddr) ||
(!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
!bond_has_this_ip6(bond, daddr))) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
__func__, saddr, daddr);
return;
}
i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
if (i == -1) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
__func__, saddr);
return;
}
slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct slave *curr_active_slave, *curr_arp_slave;
struct in6_addr *saddr, *daddr;
struct {
struct ipv6hdr ip6;
struct icmp6hdr icmp6;
} *combined, _combined;
if (skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK)
goto out;
combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
(combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
goto out;
saddr = &combined->ip6.saddr;
daddr = &combined->ip6.daddr;
slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
__func__, slave->dev->name, bond_slave_state(slave),
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
saddr, daddr);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
curr_arp_slave = rcu_dereference(bond->current_arp_slave);
/* We 'trust' the received ARP enough to validate it if:
* see bond_arp_rcv().
*/
if (bond_is_active_slave(slave))
bond_validate_na(bond, slave, saddr, daddr);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
bond_validate_na(bond, slave, daddr, saddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_na(bond, slave, saddr, daddr);
out:
return RX_HANDLER_ANOTHER;
}
#endif
int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
#if IS_ENABLED(CONFIG_IPV6)
bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
#endif
bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
__func__, skb->dev->name);
/* Use arp validate logic for both ARP and NS */
if (!slave_do_arp_validate(bond, slave)) {
if ((slave_do_arp_validate_only(bond) && is_arp) ||
#if IS_ENABLED(CONFIG_IPV6)
(slave_do_arp_validate_only(bond) && is_ipv6) ||
#endif
!slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
} else if (is_arp) {
return bond_arp_rcv(skb, bond, slave);
#if IS_ENABLED(CONFIG_IPV6)
} else if (is_ipv6) {
return bond_na_rcv(skb, bond, slave);
#endif
} else {
return RX_HANDLER_ANOTHER;
}
}
static void bond_send_validate(struct bonding *bond, struct slave *slave)
{
bond_arp_send_all(bond, slave);
#if IS_ENABLED(CONFIG_IPV6)
bond_ns_send_all(bond, slave);
#endif
}
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
*/
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod)
{
int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
return time_in_range(jiffies,
last_act - delta_in_ticks,
last_act + mod * delta_in_ticks + delta_in_ticks/2);
}
/* This function is called regularly to monitor each slave's link
* ensuring that traffic is being sent and received when arp monitoring
* is used in load-balancing mode. if the adapter has been dormant, then an
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
static void bond_loadbalance_arp_mon(struct bonding *bond)
{
struct slave *slave, *oldcurrent;
struct list_head *iter;
int do_failover = 0, slave_state_changed = 0;
if (!bond_has_slaves(bond))
goto re_arm;
rcu_read_lock();
oldcurrent = rcu_dereference(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->last_link_up is not
* needed here because we send an arp on each slave and give a slave
* as long as it needs to get the tx/rx within the delta.
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
* mode. the window of a slave being up and
* curr_active_slave being null after enslaving
* is closed.
*/
if (!oldcurrent) {
slave_info(bond->dev, slave->dev, "link status definitely up\n");
do_failover = 1;
} else {
slave_info(bond->dev, slave->dev, "interface is now up\n");
}
}
} else {
/* slave->link == BOND_LINK_UP */
/* not all switches will respond to an arp request
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
slave_info(bond->dev, slave->dev, "interface is now down\n");
if (slave == oldcurrent)
do_failover = 1;
}
}
/* note: if switch is in round-robin mode, all links
* must tx arp to ensure all links rx an arp - otherwise
* links may oscillate or not come up at all; if switch is
* in something like xor mode, there is nothing we can
* do - all replies will be rx'ed on same link causing slaves
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
bond_send_validate(bond, slave);
}
rcu_read_unlock();
if (do_failover || slave_state_changed) {
if (!rtnl_trylock())
goto re_arm;
bond_for_each_slave(bond, slave, iter) {
if (slave->link_new_state != BOND_LINK_NOCHANGE)
slave->link = slave->link_new_state;
}
if (slave_state_changed) {
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
bond_update_slave_arr(bond, NULL);
}
if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
rtnl_unlock();
}
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work,
msecs_to_jiffies(bond->params.arp_interval));
}
/* Called to inspect slaves for active-backup mode ARP monitor link state
* changes. Sets proposed link state in slaves to specify what action
* should take place for the slave. Returns 0 if no changes are found, >0
* if changes to link states must be committed.
*
* Called with rcu_read_lock held.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
} else if (slave->link == BOND_LINK_BACK) {
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
}
continue;
}
/* Give slaves 2*delta after being enslaved or made
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
if (bond_time_in_interval(bond, slave->last_link_up, 2))
continue;
/* Backup slave is down if:
* - No current_arp_slave AND
* - more than (missed_max+1)*delta since last receive AND
* - the bond has an IP address
*
* Note: a non-null current_arp_slave indicates
* the curr_active_slave went down and we are
* searching for a new one; under this condition
* we only take the curr_active_slave down - this
* gives each slave a chance to tx/rx traffic
* before being taken out
*/
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
!bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
/* Active slave is down if:
* - more than missed_max*delta since transmitting OR
* - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
(!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
}
return commit;
}
/* Called to commit link state changes noted by inspection step of
* active-backup mode ARP monitor.
*
* Called with RTNL hold.
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
bool do_failover = false;
struct list_head *iter;
unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
case BOND_LINK_NOCHANGE:
continue;
case BOND_LINK_UP:
last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
if (current_arp_slave) {
bond_set_slave_inactive_flags(
current_arp_slave,
BOND_SLAVE_NOTIFY_NOW);
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
}
slave_info(bond->dev, slave->dev, "link status definitely up\n");
if (!rtnl_dereference(bond->curr_active_slave) ||
slave == rtnl_dereference(bond->primary_slave) ||
slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
do_failover = true;
}
continue;
case BOND_LINK_DOWN:
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_NOW);
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
if (slave == rtnl_dereference(bond->curr_active_slave)) {
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
do_failover = true;
}
continue;
case BOND_LINK_FAIL:
bond_set_slave_link_state(slave, BOND_LINK_FAIL,
BOND_SLAVE_NOTIFY_NOW);
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
/* A slave has just been enslaved and has become
* the current active slave.
*/
if (rtnl_dereference(bond->curr_active_slave))
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
continue;
default:
slave_err(bond->dev, slave->dev,
"impossible: link_new_state %d on slave\n",
slave->link_new_state);
continue;
}
}
if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
bond_set_carrier(bond);
}
/* Send ARP probes for active-backup mode ARP monitor.
*
* Called with rcu_read_lock held.
*/
static bool bond_ab_arp_probe(struct bonding *bond)
{
struct slave *slave, *before = NULL, *new_slave = NULL,
*curr_arp_slave = rcu_dereference(bond->current_arp_slave),
*curr_active_slave = rcu_dereference(bond->curr_active_slave);
struct list_head *iter;
bool found = false;
bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
if (curr_arp_slave && curr_active_slave)
netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
curr_arp_slave->dev->name,
curr_active_slave->dev->name);
if (curr_active_slave) {
bond_send_validate(bond, curr_active_slave);
return should_notify_rtnl;
}
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
if (!curr_arp_slave) {
curr_arp_slave = bond_first_slave_rcu(bond);
if (!curr_arp_slave)
return should_notify_rtnl;
}
bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && bond_slave_is_up(slave))
before = slave;
if (found && !new_slave && bond_slave_is_up(slave))
new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
* simultaneous link failures and
* reselect_active_interface doesn't make this
* one the current slave so it is still marked
* up when it is actually down
*/
if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
bond_set_slave_link_state(slave, BOND_LINK_DOWN,
BOND_SLAVE_NOTIFY_LATER);
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
slave_info(bond->dev, slave->dev, "backup interface is now down\n");
}
if (slave == curr_arp_slave)
found = true;
}
if (!new_slave && before)
new_slave = before;
if (!new_slave)
goto check_state;
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_send_validate(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
check_state:
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->should_notify || slave->should_notify_link) {
should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
break;
}
}
return should_notify_rtnl;
}
static void bond_activebackup_arp_mon(struct bonding *bond)
{
bool should_notify_peers = false;
bool should_notify_rtnl = false;
int delta_in_ticks;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (!bond_has_slaves(bond))
goto re_arm;
rcu_read_lock();
should_notify_peers = bond_should_notify_peers(bond);
if (bond_ab_arp_inspect(bond)) {
rcu_read_unlock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
bond_ab_arp_commit(bond);
rtnl_unlock();
rcu_read_lock();
}
should_notify_rtnl = bond_ab_arp_probe(bond);
rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
if (should_notify_peers || should_notify_rtnl) {
if (!rtnl_trylock())
return;
if (should_notify_peers) {
bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
}
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
}
rtnl_unlock();
}
}
static void bond_arp_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_activebackup_arp_mon(bond);
else
bond_loadbalance_arp_mon(bond);
}
/*-------------------------- netdev event handling --------------------------*/
/* Change device name */
static int bond_event_changename(struct bonding *bond)
{
bond_remove_proc_entry(bond);
bond_create_proc_entry(bond);
bond_debug_reregister(bond);
return NOTIFY_DONE;
}
static int bond_master_netdev_event(unsigned long event,
struct net_device *bond_dev)
{
struct bonding *event_bond = netdev_priv(bond_dev);
netdev_dbg(bond_dev, "%s called\n", __func__);
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
#ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
#endif /* CONFIG_XFRM_OFFLOAD */
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
default:
break;
}
return NOTIFY_DONE;
}
static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
struct bonding *bond;
struct net_device *bond_dev;
/* A netdev event can be generated while enslaving a device
* before netdev_rx_handler_register is called in which case
* slave will be NULL
*/
if (!slave) {
netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
return NOTIFY_DONE;
}
bond_dev = slave->bond->dev;
bond = slave->bond;
primary = rtnl_dereference(bond->primary_slave);
slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
switch (event) {
case NETDEV_UNREGISTER:
if (bond_dev->type != ARPHRD_ETHER)
bond_release_and_destroy(bond_dev, slave_dev);
else
__bond_release_one(bond_dev, slave_dev, false, true);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
/* For 802.3ad mode only:
* Getting invalid Speed/Duplex values here will put slave
* in weird state. Mark it as link-fail if the link was
* previously up or link-down if it hasn't yet come up, and
* let link-monitoring (miimon) set it right when correct
* speeds/duplex are available.
*/
if (bond_update_speed_duplex(slave) &&
BOND_MODE(bond) == BOND_MODE_8023AD) {
if (slave->last_link_up)
slave->link = BOND_LINK_FAIL;
else
slave->link = BOND_LINK_DOWN;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
fallthrough;
case NETDEV_DOWN:
/* Refresh slave-array if applicable!
* If the setup does not use miimon or arpmon (mode-specific!),
* then these events will not cause the slave-array to be
* refreshed. This will cause xmit to use a slave that is not
* usable. Avoid such situation by refeshing the array at these
* events. If these (miimon/arpmon) parameters are configured
* then array gets refreshed twice and that should be fine!
*/
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
break;
case NETDEV_CHANGEMTU:
/* TODO: Should slaves be allowed to
* independently alter their MTU? For
* an active-backup bond, slaves need
* not be the same type of device, so
* MTUs may vary. For other modes,
* slaves arguably should have the
* same MTUs. To do this, we'd need to
* take over the slave's change_mtu
* function for the duration of their
* servitude.
*/
break;
case NETDEV_CHANGENAME:
/* we don't care if we don't have primary set */
if (!bond_uses_primary(bond) ||
!bond->params.primary[0])
break;
if (slave == primary) {
/* slave's name changed - he's no longer primary */
RCU_INIT_POINTER(bond->primary_slave, NULL);
} else if (!strcmp(slave_dev->name, bond->params.primary)) {
/* we have a new primary slave */
rcu_assign_pointer(bond->primary_slave, slave);
} else { /* we didn't change primary - exit */
break;
}
netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
primary ? slave_dev->name : "none");
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
if (!bond->notifier_ctx) {
bond->notifier_ctx = true;
bond_compute_features(bond);
bond->notifier_ctx = false;
}
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, slave->bond->dev);
break;
case NETDEV_XDP_FEAT_CHANGE:
bond_xdp_set_features(bond_dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
/* bond_netdev_event: handle netdev notifier chain events.
*
* This function receives events for the netdev chain. The caller (an
* ioctl handler calling blocking_notifier_call_chain) holds the necessary
* locks for us to safely manipulate the slave devices (RTNL lock,
* dev_probe_lock).
*/
static int bond_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
netdev_dbg(event_dev, "%s received %s\n",
__func__, netdev_cmd_to_name(event));
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
int ret;
ret = bond_master_netdev_event(event, event_dev);
if (ret != NOTIFY_DONE)
return ret;
}
if (event_dev->flags & IFF_SLAVE)
return bond_slave_netdev_event(event, event_dev);
return NOTIFY_DONE;
}
static struct notifier_block bond_netdev_notifier = {
.notifier_call = bond_netdev_event,
};
/*---------------------------- Hashing Policies -----------------------------*/
/* Helper to access data in a packet, with or without a backing skb.
* If skb is given the data is linearized if necessary via pskb_may_pull.
*/
static inline const void *bond_pull_data(struct sk_buff *skb,
const void *data, int hlen, int n)
{
if (likely(n <= hlen))
return data;
else if (skb && likely(pskb_may_pull(skb, n)))
return skb->head;
return NULL;
}
/* L2 hash helper */
static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
{
struct ethhdr *ep;
data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
if (!data)
return 0;
ep = (struct ethhdr *)(data + mhoff);
return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
}
static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
{
const struct ipv6hdr *iph6;
const struct iphdr *iph;
if (l2_proto == htons(ETH_P_IP)) {
data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
if (!data)
return false;
iph = (const struct iphdr *)(data + *nhoff);
iph_to_flow_copy_v4addrs(fk, iph);
*nhoff += iph->ihl << 2;
if (!ip_is_fragment(iph))
*ip_proto = iph->protocol;
} else if (l2_proto == htons(ETH_P_IPV6)) {
data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
if (!data)
return false;
iph6 = (const struct ipv6hdr *)(data + *nhoff);
iph_to_flow_copy_v6addrs(fk, iph6);
*nhoff += sizeof(*iph6);
*ip_proto = iph6->nexthdr;
} else {
return false;
}
if (l34 && *ip_proto >= 0)
fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
{
u32 srcmac_vendor = 0, srcmac_dev = 0;
struct ethhdr *mac_hdr;
u16 vlan = 0;
int i;
data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
if (!data)
return 0;
mac_hdr = (struct ethhdr *)(data + mhoff);
for (i = 0; i < 3; i++)
srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
for (i = 3; i < ETH_ALEN; i++)
srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
if (skb && skb_vlan_tag_present(skb))
vlan = skb_vlan_tag_get(skb);
return vlan ^ srcmac_vendor ^ srcmac_dev;
}
/* Extract the appropriate headers based on bond's xmit policy */
static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
__be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
{
bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
int ip_proto = -1;
switch (bond->params.xmit_policy) {
case BOND_XMIT_POLICY_ENCAP23:
case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
fk, data, l2_proto, nhoff, hlen, 0);
default:
break;
}
fk->ports.ports = 0;
memset(&fk->icmp, 0, sizeof(fk->icmp));
if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
return false;
/* ICMP error packets contains at least 8 bytes of the header
* of the packet which generated the error. Use this information
* to correlate ICMP error packets within the same flow which
* generated the error.
*/
if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
if (ip_proto == IPPROTO_ICMP) {
if (!icmp_is_err(fk->icmp.type))
return true;
nhoff += sizeof(struct icmphdr);
} else if (ip_proto == IPPROTO_ICMPV6) {
if (!icmpv6_is_err(fk->icmp.type))
return true;
nhoff += sizeof(struct icmp6hdr);
}
return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
}
return true;
}
static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
/* discard lowest hash bit to deal with the common even ports pattern */
if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
xmit_policy == BOND_XMIT_POLICY_ENCAP34)
return hash >> 1;
return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
* the data as required, but this function can be used without it if the data is
* known to be linear (e.g. with xdp_buff).
*/
static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
__be16 l2_proto, int mhoff, int nhoff, int hlen)
{
struct flow_keys flow;
u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
return bond_eth_hash(skb, data, mhoff, hlen);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
hash = bond_eth_hash(skb, data, mhoff, hlen);
} else {
if (flow.icmp.id)
memcpy(&hash, &flow.icmp, sizeof(hash));
else
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
* @skb: buffer to use for headers
*
* This function will extract the necessary headers from the skb buffer and use
* them to generate a hash based on the xmit_policy set in the bonding device
*/
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
{
if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
skb->l4_hash)
return skb->hash;
return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
0, skb_network_offset(skb),
skb_headlen(skb));
}
/**
* bond_xmit_hash_xdp - generate a hash value based on the xmit policy
* @bond: bonding device
* @xdp: buffer to use for headers
*
* The XDP variant of bond_xmit_hash.
*/
static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
{
struct ethhdr *eth;
if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
return 0;
eth = (struct ethhdr *)xdp->data;
return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
sizeof(struct ethhdr), xdp->data_end - xdp->data);
}
/*-------------------------- Device entry points ----------------------------*/
void bond_work_init_all(struct bonding *bond)
{
INIT_DELAYED_WORK(&bond->mcast_work,
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
static void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
cancel_delayed_work_sync(&bond->alb_work);
cancel_delayed_work_sync(&bond->ad_work);
cancel_delayed_work_sync(&bond->mcast_work);
cancel_delayed_work_sync(&bond->slave_arr_work);
}
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
bond->rr_tx_counter = alloc_percpu(u32);
if (!bond->rr_tx_counter)
return -ENOMEM;
}
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
if (bond_uses_primary(bond) &&
slave != rcu_access_pointer(bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
bond_set_slave_active_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
}
}
}
if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer
* is started.
*/
if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
return -ENOMEM;
if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
if (bond->params.miimon) /* link check interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->mii_work, 0);
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
bond->recv_probe = bond_rcv_validate;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
bond_for_each_slave(bond, slave, iter)
dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
return 0;
}
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
if (bond_is_lb(bond))
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
if (bond_uses_primary(bond)) {
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_hw_addr_flush(bond_dev, slave->dev);
rcu_read_unlock();
} else {
struct list_head *iter;
bond_for_each_slave(bond, slave, iter)
bond_hw_addr_flush(bond_dev, slave->dev);
}
return 0;
}
/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
* that some drivers can provide 32bit values only.
*/
static void bond_fold_stats(struct rtnl_link_stats64 *_res,
const struct rtnl_link_stats64 *_new,
const struct rtnl_link_stats64 *_old)
{
const u64 *new = (const u64 *)_new;
const u64 *old = (const u64 *)_old;
u64 *res = (u64 *)_res;
int i;
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
s64 delta = nv - ov;
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
delta = (s64)(s32)((u32)nv - (u32)ov);
/* filter anomalies, some drivers reset their stats
* at down/up events.
*/
if (delta > 0)
res[i] += delta;
}
}
#ifdef CONFIG_LOCKDEP
static int bond_get_lowest_level_rcu(struct net_device *dev)
{
struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
int cur = 0, max = 0;
now = dev;
iter = &dev->adj_list.lower;
while (1) {
next = NULL;
while (1) {
ldev = netdev_next_lower_dev_rcu(now, &iter);
if (!ldev)
break;
next = ldev;
niter = &ldev->adj_list.lower;
dev_stack[cur] = now;
iter_stack[cur++] = iter;
if (max <= cur)
max = cur;
break;
}
if (!next) {
if (!cur)
return max;
next = dev_stack[--cur];
niter = iter_stack[cur];
}
now = next;
iter = niter;
}
return max;
}
#endif
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
struct list_head *iter;
struct slave *slave;
int nest_level = 0;
rcu_read_lock();
#ifdef CONFIG_LOCKDEP
nest_level = bond_get_lowest_level_rcu(bond_dev);
#endif
spin_lock_nested(&bond->stats_lock, nest_level);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
bond_for_each_slave_rcu(bond, slave, iter) {
const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
bond_fold_stats(stats, new, &slave->slave_stats);
/* save off the slave stats for the next run */
memcpy(&slave->slave_stats, new, sizeof(*new));
}
memcpy(&bond->bond_stats, stats, sizeof(*stats));
spin_unlock(&bond->stats_lock);
rcu_read_unlock();
}
static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
struct mii_ioctl_data *mii = NULL;
netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
switch (cmd) {
case SIOCGMIIPHY:
mii = if_mii(ifr);
if (!mii)
return -EINVAL;
mii->phy_id = 0;
fallthrough;
case SIOCGMIIREG:
/* We do this again just in case we were called by SIOCGMIIREG
* instead of SIOCGMIIPHY.
*/
mii = if_mii(ifr);
if (!mii)
return -EINVAL;
if (mii->reg_num == 1) {
mii->val_out = 0;
if (netif_carrier_ok(bond->dev))
mii->val_out = BMSR_LSTATUS;
}
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
struct net_device *slave_dev = NULL;
struct ifbond k_binfo;
struct ifbond __user *u_binfo = NULL;
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct bond_opt_value newval;
struct net *net;
int res = 0;
netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
switch (cmd) {
case SIOCBONDINFOQUERY:
u_binfo = (struct ifbond __user *)ifr->ifr_data;
if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
return -EFAULT;
bond_info_query(bond_dev, &k_binfo);
if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
return -EFAULT;
return 0;
case SIOCBONDSLAVEINFOQUERY:
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
return -EFAULT;
res = bond_slave_info_query(bond_dev, &k_sinfo);
if (res == 0 &&
copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
return -EFAULT;
return res;
default:
break;
}
net = dev_net(bond_dev);
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
if (!slave_dev)
return -ENODEV;
switch (cmd) {
case SIOCBONDENSLAVE:
res = bond_enslave(bond_dev, slave_dev, NULL);
break;
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
break;
case SIOCBONDSETHWADDR:
res = bond_set_dev_addr(bond_dev, slave_dev);
break;
case SIOCBONDCHANGEACTIVE:
bond_opt_initstr(&newval, slave_dev->name);
res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
&newval);
break;
default:
res = -EOPNOTSUPP;
}
return res;
}
static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct ifreq ifrdata = { .ifr_data = data };
switch (cmd) {
case BOND_INFO_QUERY_OLD:
return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
case BOND_SLAVE_INFO_QUERY_OLD:
return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
case BOND_ENSLAVE_OLD:
return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
case BOND_RELEASE_OLD:
return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
case BOND_SETHWADDR_OLD:
return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
case BOND_CHANGE_ACTIVE_OLD:
return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
}
return -EOPNOTSUPP;
}
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
{
struct bonding *bond = netdev_priv(bond_dev);
if (change & IFF_PROMISC)
bond_set_promiscuity(bond,
bond_dev->flags & IFF_PROMISC ? 1 : -1);
if (change & IFF_ALLMULTI)
bond_set_allmulti(bond,
bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
}
static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
rcu_read_lock();
if (bond_uses_primary(bond)) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
} else {
bond_for_each_slave_rcu(bond, slave, iter) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
rcu_read_unlock();
}
static int bond_neigh_init(struct neighbour *n)
{
struct bonding *bond = netdev_priv(n->dev);
const struct net_device_ops *slave_ops;
struct neigh_parms parms;
struct slave *slave;
int ret = 0;
rcu_read_lock();
slave = bond_first_slave_rcu(bond);
if (!slave)
goto out;
slave_ops = slave->dev->netdev_ops;
if (!slave_ops->ndo_neigh_setup)
goto out;
/* TODO: find another way [1] to implement this.
* Passing a zeroed structure is fragile,
* but at least we do not pass garbage.
*
* [1] One way would be that ndo_neigh_setup() never touch
* struct neigh_parms, but propagate the new neigh_setup()
* back to ___neigh_create() / neigh_parms_alloc()
*/
memset(&parms, 0, sizeof(parms));
ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
if (ret)
goto out;
if (parms.neigh_setup)
ret = parms.neigh_setup(n);
out:
rcu_read_unlock();
return ret;
}
/* The bonding ndo_neigh_setup is called at init time beofre any
* slave exists. So we must declare proxy setup function which will
* be used at run time to resolve the actual slave neigh param setup.
*
* It's also called by master devices (such as vlans) to setup their
* underlying devices. In that case - do nothing, we're already set up from
* our init.
*/
static int bond_neigh_setup(struct net_device *dev,
struct neigh_parms *parms)
{
/* modify only our neigh_parms */
if (parms->dev == dev)
parms->neigh_setup = bond_neigh_init;
return 0;
}
/* Change the MTU of all of a master's slaves to match the master */
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
struct list_head *iter;
int res = 0;
netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
bond_for_each_slave(bond, slave, iter) {
slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
if (res) {
/* If we failed to set the slave's mtu to the new value
* we must abort the operation even in ACTIVE_BACKUP
* mode, because if we allow the backup slaves to have
* different mtu values than the active slave we'll
* need to change their mtu when doing a failover. That
* means changing their mtu from timer context, which
* is probably not a good idea.
*/
slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
res, new_mtu);
goto unwind;
}
}
bond_dev->mtu = new_mtu;
return 0;
unwind:
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
if (rollback_slave == slave)
break;
tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
if (tmp_res)
slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
tmp_res);
}
return res;
}
/* Change HW address
*
* Note that many devices must be down to change the HW address, and
* downing the master releases all slaves. We can make bonds full of
* bonding devices to test this, however.
*/
static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
struct sockaddr_storage *ss = addr, tmp_ss;
struct list_head *iter;
int res = 0;
if (BOND_MODE(bond) == BOND_MODE_ALB)
return bond_alb_set_mac_address(bond_dev, addr);
netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
if (bond->params.fail_over_mac &&
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(ss->__data))
return -EADDRNOTAVAIL;
bond_for_each_slave(bond, slave, iter) {
slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
__func__, slave);
res = dev_set_mac_address(slave->dev, addr, NULL);
if (res) {
/* TODO: consider downing the slave
* and retry ?
* User should expect communications
* breakage anyway until ARP finish
* updating, so...
*/
slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
__func__, res);
goto unwind;
}
}
/* success */
dev_addr_set(bond_dev, ss->__data);
return 0;
unwind:
memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
tmp_ss.ss_family = bond_dev->type;
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
int tmp_res;
if (rollback_slave == slave)
break;
tmp_res = dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&tmp_ss, NULL);
if (tmp_res) {
slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
__func__, tmp_res);
}
}
return res;
}
/**
* bond_get_slave_by_id - get xmit slave with slave_id
* @bond: bonding device that is transmitting
* @slave_id: slave id up to slave_cnt-1 through which to transmit
*
* This function tries to get slave with slave_id but in case
* it fails, it tries to find the first available slave for transmission.
*/
static struct slave *bond_get_slave_by_id(struct bonding *bond,
int slave_id)
{
struct list_head *iter;
struct slave *slave;
int i = slave_id;
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (bond_slave_can_tx(slave))
return slave;
}
}
/* Here we start from the first slave up to slave_id */
i = slave_id;
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (bond_slave_can_tx(slave))
return slave;
}
/* no slave that can tx has been found */
return NULL;
}
/**
* bond_rr_gen_slave_id - generate slave id based on packets_per_slave
* @bond: bonding device to use
*
* Based on the value of the bonding device's packets_per_slave parameter
* this function generates a slave id, which is usually used as the next
* slave to transmit through.
*/
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
u32 slave_id;
struct reciprocal_value reciprocal_packets_per_slave;
int packets_per_slave = bond->params.packets_per_slave;
switch (packets_per_slave) {
case 0:
slave_id = get_random_u32();
break;
case 1:
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
break;
default:
reciprocal_packets_per_slave =
bond->params.reciprocal_packets_per_slave;
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
slave_id = reciprocal_divide(slave_id,
reciprocal_packets_per_slave);
break;
}
return slave_id;
}
static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *slave;
int slave_cnt;
u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
* needs to maintain some consistency for the interface that will
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
if (skb->protocol == htons(ETH_P_IP)) {
int noff = skb_network_offset(skb);
struct iphdr *iph;
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
goto non_igmp;
iph = ip_hdr(skb);
if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
return slave;
return bond_get_slave_by_id(bond, 0);
}
}
non_igmp:
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
return bond_get_slave_by_id(bond, slave_id);
}
return NULL;
}
static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
struct xdp_buff *xdp)
{
struct slave *slave;
int slave_cnt;
u32 slave_id;
const struct ethhdr *eth;
void *data = xdp->data;
if (data + sizeof(struct ethhdr) > xdp->data_end)
goto non_igmp;
eth = (struct ethhdr *)data;
data += sizeof(struct ethhdr);
/* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
if (eth->h_proto == htons(ETH_P_IP)) {
const struct iphdr *iph;
if (data + sizeof(struct iphdr) > xdp->data_end)
goto non_igmp;
iph = (struct iphdr *)data;
if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
return slave;
return bond_get_slave_by_id(bond, 0);
}
}
non_igmp:
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
return bond_get_slave_by_id(bond, slave_id);
}
return NULL;
}
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
slave = bond_xmit_roundrobin_slave_get(bond, skb);
if (likely(slave))
return bond_dev_queue_xmit(bond, skb, slave->dev);
return bond_tx_drop(bond_dev, skb);
}
static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
{
return rcu_dereference(bond->curr_active_slave);
}
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
*/
static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
slave = bond_xmit_activebackup_slave_get(bond);
if (slave)
return bond_dev_queue_xmit(bond, skb, slave->dev);
return bond_tx_drop(bond_dev, skb);
}
/* Use this to update slave_array when (a) it's not appropriate to update
* slave_array right away (note that update_slave_array() may sleep)
* and / or (b) RTNL is not held.
*/
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
{
queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
}
/* Slave array work handler. Holds only RTNL */
static void bond_slave_arr_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
slave_arr_work.work);
int ret;
if (!rtnl_trylock())
goto err;
ret = bond_update_slave_arr(bond, NULL);
rtnl_unlock();
if (ret) {
pr_warn_ratelimited("Failed to update slave array from WT\n");
goto err;
}
return;
err:
bond_slave_arr_work_rearm(bond, 1);
}
static void bond_skip_slave(struct bond_up_slave *slaves,
struct slave *skipslave)
{
int idx;
/* Rare situation where caller has asked to skip a specific
* slave but allocation failed (most likely!). BTW this is
* only possible when the call is initiated from
* __bond_release_one(). In this situation; overwrite the
* skipslave entry in the array with the last entry from the
* array to avoid a situation where the xmit path may choose
* this to-be-skipped slave to send a packet out.
*/
for (idx = 0; slaves && idx < slaves->count; idx++) {
if (skipslave == slaves->arr[idx]) {
slaves->arr[idx] =
slaves->arr[slaves->count - 1];
slaves->count--;
break;
}
}
}
static void bond_set_slave_arr(struct bonding *bond,
struct bond_up_slave *usable_slaves,
struct bond_up_slave *all_slaves)
{
struct bond_up_slave *usable, *all;
usable = rtnl_dereference(bond->usable_slaves);
rcu_assign_pointer(bond->usable_slaves, usable_slaves);
kfree_rcu(usable, rcu);
all = rtnl_dereference(bond->all_slaves);
rcu_assign_pointer(bond->all_slaves, all_slaves);
kfree_rcu(all, rcu);
}
static void bond_reset_slave_arr(struct bonding *bond)
{
bond_set_slave_arr(bond, NULL, NULL);
}
/* Build the usable slaves array in control path for modes that use xmit-hash
* to determine the slave interface -
* (a) BOND_MODE_8023AD
* (b) BOND_MODE_XOR
* (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
*
* The caller is expected to hold RTNL only and NO other lock!
*/
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
struct slave *slave;
struct list_head *iter;
int agg_id = 0;
int ret = 0;
might_sleep();
usable_slaves = kzalloc(struct_size(usable_slaves, arr,
bond->slave_cnt), GFP_KERNEL);
all_slaves = kzalloc(struct_size(all_slaves, arr,
bond->slave_cnt), GFP_KERNEL);
if (!usable_slaves || !all_slaves) {
ret = -ENOMEM;
goto out;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
spin_lock_bh(&bond->mode_lock);
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
spin_unlock_bh(&bond->mode_lock);
pr_debug("bond_3ad_get_active_agg_info failed\n");
/* No active aggragator means it's not safe to use
* the previous array.
*/
bond_reset_slave_arr(bond);
goto out;
}
spin_unlock_bh(&bond->mode_lock);
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
if (skipslave == slave)
continue;
all_slaves->arr[all_slaves->count++] = slave;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg;
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (!agg || agg->aggregator_identifier != agg_id)
continue;
}
if (!bond_slave_can_tx(slave))
continue;
slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
usable_slaves->count);
usable_slaves->arr[usable_slaves->count++] = slave;
}
bond_set_slave_arr(bond, usable_slaves, all_slaves);
return ret;
out:
if (ret != 0 && skipslave) {
bond_skip_slave(rtnl_dereference(bond->all_slaves),
skipslave);
bond_skip_slave(rtnl_dereference(bond->usable_slaves),
skipslave);
}
kfree_rcu(all_slaves, rcu);
kfree_rcu(usable_slaves, rcu);
return ret;
}
static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
struct sk_buff *skb,
struct bond_up_slave *slaves)
{
struct slave *slave;
unsigned int count;
u32 hash;
hash = bond_xmit_hash(bond, skb);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (unlikely(!count))
return NULL;
slave = slaves->arr[hash % count];
return slave;
}
static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
struct xdp_buff *xdp)
{
struct bond_up_slave *slaves;
unsigned int count;
u32 hash;
hash = bond_xmit_hash_xdp(bond, xdp);
slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (unlikely(!count))
return NULL;
return slaves->arr[hash % count];
}
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
*/
static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct bond_up_slave *slaves;
struct slave *slave;
slaves = rcu_dereference(bond->usable_slaves);
slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
if (likely(slave))
return bond_dev_queue_xmit(bond, skb, slave->dev);
return bond_tx_drop(dev, skb);
}
/* in broadcast mode, we send everything to all usable interfaces. */
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
bool xmit_suc = false;
bool skb_used = false;
bond_for_each_slave_rcu(bond, slave, iter) {
struct sk_buff *skb2;
if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
continue;
if (bond_is_last_slave(bond, slave)) {
skb2 = skb;
skb_used = true;
} else {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
}
if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
xmit_suc = true;
}
if (!skb_used)
dev_kfree_skb_any(skb);
if (xmit_suc)
return NETDEV_TX_OK;
dev_core_stats_tx_dropped_inc(bond_dev);
return NET_XMIT_DROP;
}
/*------------------------- Device initialization ---------------------------*/
/* Lookup the slave that corresponds to a qid */
static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
struct slave *slave = NULL;
struct list_head *iter;
if (!skb_rx_queue_recorded(skb))
return 1;
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb_get_queue_mapping(skb)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}
/* If the slave isn't UP, use default transmit policy. */
break;
}
}
return 1;
}
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
* skb_tx_hash and will put the skbs in the queue we expect on their
* way down to the bonding driver.
*/
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
/* Save the original txq to restore before passing to the driver */
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
txq -= dev->real_num_tx_queues;
} while (txq >= dev->real_num_tx_queues);
}
return txq;
}
static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
struct sk_buff *skb,
bool all_slaves)
{
struct bonding *bond = netdev_priv(master_dev);
struct bond_up_slave *slaves;
struct slave *slave = NULL;
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
slave = bond_xmit_roundrobin_slave_get(bond, skb);
break;
case BOND_MODE_ACTIVEBACKUP:
slave = bond_xmit_activebackup_slave_get(bond);
break;
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
if (all_slaves)
slaves = rcu_dereference(bond->all_slaves);
else
slaves = rcu_dereference(bond->usable_slaves);
slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
break;
case BOND_MODE_BROADCAST:
break;
case BOND_MODE_ALB:
slave = bond_xmit_alb_slave_get(bond, skb);
break;
case BOND_MODE_TLB:
slave = bond_xmit_tlb_slave_get(bond, skb);
break;
default:
/* Should never happen, mode already checked */
WARN_ONCE(true, "Unknown bonding mode");
break;
}
if (slave)
return slave->dev;
return NULL;
}
static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
{
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
break;
}
fallthrough;
#endif
default: /* AF_INET */
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
break;
}
flow->ports.src = inet_sk(sk)->inet_sport;
flow->ports.dst = inet_sk(sk)->inet_dport;
}
/**
* bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
* @sk: socket to use for headers
*
* This function will extract the necessary field from the socket and use
* them to generate a hash based on the LAYER34 xmit_policy.
* Assumes that sk is a TCP or UDP socket.
*/
static u32 bond_sk_hash_l34(struct sock *sk)
{
struct flow_keys flow;
u32 hash;
bond_sk_to_flow(sk, &flow);
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
struct sock *sk)
{
struct bond_up_slave *slaves;
struct slave *slave;
unsigned int count;
u32 hash;
slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (unlikely(!count))
return NULL;
hash = bond_sk_hash_l34(sk);
slave = slaves->arr[hash % count];
return slave->dev;
}
static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
struct sock *sk)
{
struct bonding *bond = netdev_priv(dev);
struct net_device *lower = NULL;
rcu_read_lock();
if (bond_sk_check(bond))
lower = __bond_sk_get_lower_dev(bond, sk);
rcu_read_unlock();
return lower;
}
#if IS_ENABLED(CONFIG_TLS_DEVICE)
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
/* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
* was true, if tls_device_down is running in parallel, but it's OK,
* because bond_get_slave_by_dev has a NULL check.
*/
if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
if (bond_should_override_tx_queue(bond) &&
!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
#if IS_ENABLED(CONFIG_TLS_DEVICE)
if (tls_is_skb_tx_device_offloaded(skb))
return bond_tls_device_xmit(bond, skb, dev);
#endif
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
return bond_xmit_broadcast(skb, dev);
case BOND_MODE_ALB:
return bond_alb_xmit(skb, dev);
case BOND_MODE_TLB:
return bond_tlb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
return bond_tx_drop(dev, skb);
}
}
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
netdev_tx_t ret = NETDEV_TX_OK;
/* If we risk deadlock from transmitting this in the
* netpoll path, tell netpoll to queue the frame for later tx
*/
if (unlikely(is_netpoll_tx_blocked(dev)))
return NETDEV_TX_BUSY;
rcu_read_lock();
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
ret = bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
}
static struct net_device *
bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
/* Caller needs to hold rcu_read_lock() */
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
break;
case BOND_MODE_ACTIVEBACKUP:
slave = bond_xmit_activebackup_slave_get(bond);
break;
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
break;
default:
/* Should never happen. Mode guarded by bond_xdp_check() */
netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
return NULL;
}
if (slave)
return slave->dev;
return NULL;
}
static int bond_xdp_xmit(struct net_device *bond_dev,
int n, struct xdp_frame **frames, u32 flags)
{
int nxmit, err = -ENXIO;
rcu_read_lock();
for (nxmit = 0; nxmit < n; nxmit++) {
struct xdp_frame *frame = frames[nxmit];
struct xdp_frame *frames1[] = {frame};
struct net_device *slave_dev;
struct xdp_buff xdp;
xdp_convert_frame_to_buff(frame, &xdp);
slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
if (!slave_dev) {
err = -ENXIO;
break;
}
err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
if (err < 1)
break;
}
rcu_read_unlock();
/* If error happened on the first frame then we can pass the error up, otherwise
* report the number of frames that were xmitted.
*/
if (err < 0)
return (nxmit == 0 ? err : nxmit);
return nxmit;
}
static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
struct slave *slave, *rollback_slave;
struct bpf_prog *old_prog;
struct netdev_bpf xdp = {
.command = XDP_SETUP_PROG,
.flags = 0,
.prog = prog,
.extack = extack,
};
int err;
ASSERT_RTNL();
if (!bond_xdp_check(bond))
return -EOPNOTSUPP;
old_prog = bond->xdp_prog;
bond->xdp_prog = prog;
bond_for_each_slave(bond, slave, iter) {
struct net_device *slave_dev = slave->dev;
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
SLAVE_NL_ERR(dev, slave_dev, extack,
"Slave device does not support XDP");
err = -EOPNOTSUPP;
goto err;
}
if (dev_xdp_prog_count(slave_dev) > 0) {
SLAVE_NL_ERR(dev, slave_dev, extack,
"Slave has XDP program loaded, please unload before enslaving");
err = -EOPNOTSUPP;
goto err;
}
err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
goto err;
}
if (prog)
bpf_prog_inc(prog);
}
if (prog) {
static_branch_inc(&bpf_master_redirect_enabled_key);
} else if (old_prog) {
bpf_prog_put(old_prog);
static_branch_dec(&bpf_master_redirect_enabled_key);
}
return 0;
err:
/* unwind the program changes */
bond->xdp_prog = old_prog;
xdp.prog = old_prog;
xdp.extack = NULL; /* do not overwrite original error */
bond_for_each_slave(bond, rollback_slave, iter) {
struct net_device *slave_dev = rollback_slave->dev;
int err_unwind;
if (slave == rollback_slave)
break;
err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
else if (xdp.prog)
bpf_prog_inc(xdp.prog);
}
return err;
}
static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return bond_xdp_set(dev, xdp->prog, xdp->extack);
default:
return -EINVAL;
}
}
static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
{
if (speed == 0 || speed == SPEED_UNKNOWN)
speed = slave->speed;
else
speed = min(speed, slave->speed);
return speed;
}
/* Set the BOND_PHC_INDEX flag to notify user space */
static int bond_set_phc_index_flag(struct kernel_hwtstamp_config *kernel_cfg)
{
struct ifreq *ifr = kernel_cfg->ifr;
struct hwtstamp_config cfg;
if (kernel_cfg->copied_to_user) {
/* Lower device has a legacy implementation */
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)))
return -EFAULT;
} else {
kernel_cfg->flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
}
return 0;
}
static int bond_hwtstamp_get(struct net_device *dev,
struct kernel_hwtstamp_config *cfg)
{
struct bonding *bond = netdev_priv(dev);
struct net_device *real_dev;
int err;
real_dev = bond_option_active_slave_get_rcu(bond);
if (!real_dev)
return -EOPNOTSUPP;
err = generic_hwtstamp_get_lower(real_dev, cfg);
if (err)
return err;
return bond_set_phc_index_flag(cfg);
}
static int bond_hwtstamp_set(struct net_device *dev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(dev);
struct net_device *real_dev;
int err;
if (!(cfg->flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
return -EOPNOTSUPP;
real_dev = bond_option_active_slave_get_rcu(bond);
if (!real_dev)
return -EOPNOTSUPP;
err = generic_hwtstamp_set_lower(real_dev, cfg, extack);
if (err)
return err;
return bond_set_phc_index_flag(cfg);
}
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
struct ethtool_link_ksettings *cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
u32 speed = 0;
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
* do not need to check mode. Though link speed might not represent
* the true receive or transmit bandwidth (not all modes are symmetric)
* this is an accurate maximum.
*/
bond_for_each_slave(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
bond_update_speed_duplex(slave);
if (slave->speed != SPEED_UNKNOWN) {
if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
speed = bond_mode_bcast_speed(slave,
speed);
else
speed += slave->speed;
}
if (cmd->base.duplex == DUPLEX_UNKNOWN &&
slave->duplex != DUPLEX_UNKNOWN)
cmd->base.duplex = slave->duplex;
}
}
cmd->base.speed = speed ? : SPEED_UNKNOWN;
return 0;
}
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
struct ethtool_ts_info *info)
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethtool_ts_info ts_info;
const struct ethtool_ops *ops;
struct net_device *real_dev;
bool sw_tx_support = false;
struct phy_device *phydev;
struct list_head *iter;
struct slave *slave;
int ret = 0;
rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
dev_hold(real_dev);
rcu_read_unlock();
if (real_dev) {
ops = real_dev->ethtool_ops;
phydev = real_dev->phydev;
if (phy_has_tsinfo(phydev)) {
ret = phy_ts_info(phydev, info);
goto out;
} else if (ops->get_ts_info) {
ret = ops->get_ts_info(real_dev, info);
goto out;
}
} else {
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
ret = -1;
ops = slave->dev->ethtool_ops;
phydev = slave->dev->phydev;
if (phy_has_tsinfo(phydev))
ret = phy_ts_info(phydev, &ts_info);
else if (ops->get_ts_info)
ret = ops->get_ts_info(slave->dev, &ts_info);
if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
sw_tx_support = true;
continue;
}
sw_tx_support = false;
break;
}
rcu_read_unlock();
}
ret = 0;
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
if (sw_tx_support)
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
info->phc_index = -1;
out:
dev_put(real_dev);
return ret;
}
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = bond_ethtool_get_link_ksettings,
.get_ts_info = bond_ethtool_get_ts_info,
};
static const struct net_device_ops bond_netdev_ops = {
.ndo_init = bond_init,
.ndo_uninit = bond_uninit,
.ndo_open = bond_open,
.ndo_stop = bond_close,
.ndo_start_xmit = bond_start_xmit,
.ndo_select_queue = bond_select_queue,
.ndo_get_stats64 = bond_get_stats,
.ndo_eth_ioctl = bond_eth_ioctl,
.ndo_siocbond = bond_do_ioctl,
.ndo_siocdevprivate = bond_siocdevprivate,
.ndo_change_rx_flags = bond_change_rx_flags,
.ndo_set_rx_mode = bond_set_rx_mode,
.ndo_change_mtu = bond_change_mtu,
.ndo_set_mac_address = bond_set_mac_address,
.ndo_neigh_setup = bond_neigh_setup,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
.ndo_poll_controller = bond_poll_controller,
#endif
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_get_xmit_slave = bond_xmit_get_slave,
.ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
.ndo_bpf = bond_xdp,
.ndo_xdp_xmit = bond_xdp_xmit,
.ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
.ndo_hwtstamp_get = bond_hwtstamp_get,
.ndo_hwtstamp_set = bond_hwtstamp_set,
};
static const struct device_type bond_type = {
.name = "bond",
};
static void bond_destructor(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
if (bond->wq)
destroy_workqueue(bond->wq);
free_percpu(bond->rr_tx_counter);
}
void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
bond->dev = bond_dev;
/* Initialize the device entry points */
ether_setup(bond_dev);
bond_dev->max_mtu = ETH_MAX_MTU;
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;
bond_dev->needs_free_netdev = true;
bond_dev->priv_destructor = bond_destructor;
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
/* Initialize the device options */
bond_dev->flags |= IFF_MASTER;
bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
#ifdef CONFIG_XFRM_OFFLOAD
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
INIT_LIST_HEAD(&bond->ipsec_list);
spin_lock_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
bond_dev->features |= NETIF_F_LLTX;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
* care is taken in the various xmit functions
* when there are slaves that are not hw accel
* capable
*/
/* Don't allow bond devices to change network namespaces. */
bond_dev->features |= NETIF_F_NETNS_LOCAL;
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
if (bond_xdp_check(bond))
bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
}
/* Destroy a bonding device.
* Must be under rtnl_lock when this function is called.
*/
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct list_head *iter;
struct slave *slave;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
bond_set_slave_arr(bond, NULL, NULL);
list_del(&bond->bond_list);
bond_debug_unregister(bond);
}
/*------------------------- Module initialization ---------------------------*/
static int __init bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
struct bond_opt_value newval;
const struct bond_opt_value *valptr;
int arp_all_targets_value = 0;
u16 ad_actor_sys_prio = 0;
u16 ad_user_port_key = 0;
__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
int arp_ip_count;
int bond_mode = BOND_MODE_ROUNDROBIN;
int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
int lacp_fast = 0;
int tlb_dynamic_lb;
/* Convert string parameters. */
if (mode) {
bond_opt_initstr(&newval, mode);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
if (!valptr) {
pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
return -EINVAL;
}
bond_mode = valptr->value;
}
if (xmit_hash_policy) {
if (bond_mode == BOND_MODE_ROUNDROBIN ||
bond_mode == BOND_MODE_ACTIVEBACKUP ||
bond_mode == BOND_MODE_BROADCAST) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, xmit_hash_policy);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
&newval);
if (!valptr) {
pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
xmit_hash_policy);
return -EINVAL;
}
xmit_hashtype = valptr->value;
}
}
if (lacp_rate) {
if (bond_mode != BOND_MODE_8023AD) {
pr_info("lacp_rate param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, lacp_rate);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
&newval);
if (!valptr) {
pr_err("Error: Invalid lacp rate \"%s\"\n",
lacp_rate);
return -EINVAL;
}
lacp_fast = valptr->value;
}
}
if (ad_select) {
bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval);
if (!valptr) {
pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
return -EINVAL;
}
params->ad_select = valptr->value;
if (bond_mode != BOND_MODE_8023AD)
pr_warn("ad_select param only affects 802.3ad mode\n");
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
miimon, INT_MAX);
miimon = 0;
}
if (updelay < 0) {
pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
use_carrier);
use_carrier = 1;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
num_peer_notif);
num_peer_notif = 1;
}
/* reset values for 802.3ad/TLB/ALB */
if (!bond_mode_uses_arp(bond_mode)) {
if (!miimon) {
pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warn("Forcing miimon to 100msec\n");
miimon = BOND_DEFAULT_MIIMON;
}
}
if (tx_queues < 1 || tx_queues > 255) {
pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
tx_queues, BOND_DEFAULT_TX_QUEUES);
tx_queues = BOND_DEFAULT_TX_QUEUES;
}
if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
all_slaves_active);
all_slaves_active = 0;
}
if (resend_igmp < 0 || resend_igmp > 255) {
pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
resend_igmp, BOND_DEFAULT_RESEND_IGMP);
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
bond_opt_initval(&newval, packets_per_slave);
if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
packets_per_slave, USHRT_MAX);
packets_per_slave = 1;
}
if (bond_mode == BOND_MODE_ALB) {
pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
updelay);
}
if (!miimon) {
if (updelay || downdelay) {
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
updelay, miimon, (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
downdelay, miimon,
(downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
arp_interval, INT_MAX);
arp_interval = 0;
}
for (arp_ip_count = 0, i = 0;
(arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
__be32 ip;
/* not a complete check, but good enough to catch mistakes */
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
!bond_is_ip_target_ok(ip)) {
pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;
} else {
if (bond_get_targets_ip(arp_target, ip) == -1)
arp_target[arp_ip_count++] = ip;
else
pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
&ip);
}
}
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
arp_interval);
arp_interval = 0;
}
if (arp_validate) {
if (!arp_interval) {
pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
}
bond_opt_initstr(&newval, arp_validate);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
&newval);
if (!valptr) {
pr_err("Error: invalid arp_validate \"%s\"\n",
arp_validate);
return -EINVAL;
}
arp_validate_value = valptr->value;
} else {
arp_validate_value = 0;
}
if (arp_all_targets) {
bond_opt_initstr(&newval, arp_all_targets);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
&newval);
if (!valptr) {
pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
arp_all_targets);
arp_all_targets_value = 0;
} else {
arp_all_targets_value = valptr->value;
}
}
if (miimon) {
pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
arp_validate_value);
pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
pr_cont(" %s", arp_ip_target[i]);
pr_cont("\n");
} else if (max_bonds) {
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
if (primary && !bond_mode_uses_primary(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
primary, bond_mode_name(bond_mode));
primary = NULL;
}
if (primary && primary_reselect) {
bond_opt_initstr(&newval, primary_reselect);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
&newval);
if (!valptr) {
pr_err("Error: Invalid primary_reselect \"%s\"\n",
primary_reselect);
return -EINVAL;
}
primary_reselect_value = valptr->value;
} else {
primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
}
if (fail_over_mac) {
bond_opt_initstr(&newval, fail_over_mac);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
&newval);
if (!valptr) {
pr_err("Error: invalid fail_over_mac \"%s\"\n",
fail_over_mac);
return -EINVAL;
}
fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(
bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
&newval);
if (!valptr) {
pr_err("Error: No ad_actor_sys_prio default value");
return -EINVAL;
}
ad_actor_sys_prio = valptr->value;
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
&newval);
if (!valptr) {
pr_err("Error: No ad_user_port_key default value");
return -EINVAL;
}
ad_user_port_key = valptr->value;
bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
if (!valptr) {
pr_err("Error: No tlb_dynamic_lb default value");
return -EINVAL;
}
tlb_dynamic_lb = valptr->value;
if (lp_interval == 0) {
pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
}
/* fill params struct with the proper values */
params->mode = bond_mode;
params->xmit_policy = xmit_hashtype;
params->miimon = miimon;
params->num_peer_notif = num_peer_notif;
params->arp_interval = arp_interval;
params->arp_validate = arp_validate_value;
params->arp_all_targets = arp_all_targets_value;
params->missed_max = 2;
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
params->use_carrier = use_carrier;
params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
params->primary_reselect = primary_reselect_value;
params->fail_over_mac = fail_over_mac_value;
params->tx_queues = tx_queues;
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
params->tlb_dynamic_lb = tlb_dynamic_lb;
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
} else {
/* reciprocal_packets_per_slave is unused if
* packets_per_slave is 0 or 1, just initialize it
*/
params->reciprocal_packets_per_slave =
(struct reciprocal_value) { 0 };
}
if (primary)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
#if IS_ENABLED(CONFIG_IPV6)
memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
#endif
return 0;
}
/* Called from registration process */
static int bond_init(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
netdev_dbg(bond_dev, "Begin bond_init\n");
bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
if (!bond->wq)
return -ENOMEM;
bond->notifier_ctx = false;
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
bond_debug_register(bond);
/* Ensure valid dev_addr */
if (is_zero_ether_addr(bond_dev->dev_addr) &&
bond_dev->addr_assign_type == NET_ADDR_PERM)
eth_hw_addr_random(bond_dev);
return 0;
}
unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
* set up our sysfs entries.
*/
int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
int res = -ENOMEM;
rtnl_lock();
bond_dev = alloc_netdev_mq(sizeof(struct bonding),
name ? name : "bond%d", NET_NAME_UNKNOWN,
bond_setup, tx_queues);
if (!bond_dev)
goto out;
bond = netdev_priv(bond_dev);
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
goto out;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
out:
rtnl_unlock();
return res;
}
static int __net_init bond_net_init(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
bn->net = net;
INIT_LIST_HEAD(&bn->dev_list);
bond_create_proc_dir(bn);
bond_create_sysfs(bn);
return 0;
}
static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
struct bond_net *bn;
struct net *net;
LIST_HEAD(list);
list_for_each_entry(net, net_list, exit_list) {
bn = net_generic(net, bond_net_id);
bond_destroy_sysfs(bn);
}
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct bonding *bond, *tmp_bond;
bn = net_generic(net, bond_net_id);
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
unregister_netdevice_queue(bond->dev, &list);
}
unregister_netdevice_many(&list);
rtnl_unlock();
list_for_each_entry(net, net_list, exit_list) {
bn = net_generic(net, bond_net_id);
bond_destroy_proc_dir(bn);
}
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
static int __init bonding_init(void)
{
int i;
int res;
res = bond_check_params(&bonding_defaults);
if (res)
goto out;
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
res = bond_netlink_init();
if (res)
goto err_link;
bond_create_debugfs();
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
goto err;
}
skb_flow_dissector_init(&flow_keys_bonding,
flow_keys_bonding_keys,
ARRAY_SIZE(flow_keys_bonding_keys));
register_netdevice_notifier(&bond_netdev_notifier);
out:
return res;
err:
bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
goto out;
}
static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
bond_destroy_debugfs();
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, [email protected] and many others");
| linux-master | drivers/net/bonding/bond_main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_bonding.h>
#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/netlink.h>
/* General definitions */
#define AD_SHORT_TIMEOUT 1
#define AD_LONG_TIMEOUT 0
#define AD_STANDBY 0x2
#define AD_MAX_TX_IN_SECOND 3
#define AD_COLLECTOR_MAX_DELAY 0
/* Timer definitions (43.4.4 in the 802.3ad standard) */
#define AD_FAST_PERIODIC_TIME 1
#define AD_SLOW_PERIODIC_TIME 30
#define AD_SHORT_TIMEOUT_TIME (3*AD_FAST_PERIODIC_TIME)
#define AD_LONG_TIMEOUT_TIME (3*AD_SLOW_PERIODIC_TIME)
#define AD_CHURN_DETECTION_TIME 60
#define AD_AGGREGATE_WAIT_TIME 2
/* Port Variables definitions used by the State Machines (43.4.7 in the
* 802.3ad standard)
*/
#define AD_PORT_BEGIN 0x1
#define AD_PORT_LACP_ENABLED 0x2
#define AD_PORT_ACTOR_CHURN 0x4
#define AD_PORT_PARTNER_CHURN 0x8
#define AD_PORT_READY 0x10
#define AD_PORT_READY_N 0x20
#define AD_PORT_MATCHED 0x40
#define AD_PORT_STANDBY 0x80
#define AD_PORT_SELECTED 0x100
#define AD_PORT_MOVED 0x200
#define AD_PORT_CHURNED (AD_PORT_ACTOR_CHURN | AD_PORT_PARTNER_CHURN)
/* Port Key definitions
* key is determined according to the link speed, duplex and
* user key (which is yet not supported)
* --------------------------------------------------------------
* Port key | User key (10 bits) | Speed (5 bits) | Duplex|
* --------------------------------------------------------------
* |15 6|5 1|0
*/
#define AD_DUPLEX_KEY_MASKS 0x1
#define AD_SPEED_KEY_MASKS 0x3E
#define AD_USER_KEY_MASKS 0xFFC0
enum ad_link_speed_type {
AD_LINK_SPEED_1MBPS = 1,
AD_LINK_SPEED_10MBPS,
AD_LINK_SPEED_100MBPS,
AD_LINK_SPEED_1000MBPS,
AD_LINK_SPEED_2500MBPS,
AD_LINK_SPEED_5000MBPS,
AD_LINK_SPEED_10000MBPS,
AD_LINK_SPEED_14000MBPS,
AD_LINK_SPEED_20000MBPS,
AD_LINK_SPEED_25000MBPS,
AD_LINK_SPEED_40000MBPS,
AD_LINK_SPEED_50000MBPS,
AD_LINK_SPEED_56000MBPS,
AD_LINK_SPEED_100000MBPS,
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
AD_LINK_SPEED_800000MBPS,
};
/* compare MAC addresses */
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_marker_info_received(struct bond_marker *marker_info,
struct port *port);
static void ad_marker_response_received(struct bond_marker *marker,
struct port *port);
static void ad_update_actor_keys(struct port *port, bool reset);
/* ================= api to bonding and kernel code ================== */
/**
* __get_bond_by_port - get the port's bonding struct
* @port: the port we're looking at
*
* Return @port's bonding struct, or %NULL if it can't be found.
*/
static inline struct bonding *__get_bond_by_port(struct port *port)
{
if (port->slave == NULL)
return NULL;
return bond_get_bond_by_slave(port->slave);
}
/**
* __get_first_agg - get the first aggregator in the bond
* @port: the port we're looking at
*
* Return the aggregator of the first slave in @bond, or %NULL if it can't be
* found.
* The caller must hold RCU or RTNL lock.
*/
static inline struct aggregator *__get_first_agg(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
struct slave *first_slave;
struct aggregator *agg;
/* If there's no bond for this port, or bond has no slaves */
if (bond == NULL)
return NULL;
rcu_read_lock();
first_slave = bond_first_slave_rcu(bond);
agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
rcu_read_unlock();
return agg;
}
/**
* __agg_has_partner - see if we have a partner
* @agg: the agregator we're looking at
*
* Return nonzero if aggregator has a partner (denoted by a non-zero ether
* address for the partner). Return 0 if not.
*/
static inline int __agg_has_partner(struct aggregator *agg)
{
return !is_zero_ether_addr(agg->partner_system.mac_addr_value);
}
/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
*/
static inline void __disable_port(struct port *port)
{
bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
*/
static inline void __enable_port(struct port *port)
{
struct slave *slave = port->slave;
if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
* __port_is_enabled - check if the port's slave is in active state
* @port: the port we're looking at
*/
static inline int __port_is_enabled(struct port *port)
{
return bond_is_active_slave(port->slave);
}
/**
* __get_agg_selection_mode - get the aggregator selection mode
* @port: the port we're looking at
*
* Get the aggregator selection mode. Can be %STABLE, %BANDWIDTH or %COUNT.
*/
static inline u32 __get_agg_selection_mode(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
if (bond == NULL)
return BOND_AD_STABLE;
return bond->params.ad_select;
}
/**
* __check_agg_selection_timer - check if the selection timer has expired
* @port: the port we're looking at
*/
static inline int __check_agg_selection_timer(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
if (bond == NULL)
return 0;
return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
* __get_link_speed - get a port's speed
* @port: the port we're looking at
*
* Return @port's speed in 802.3ad enum format. i.e. one of:
* 0,
* %AD_LINK_SPEED_10MBPS,
* %AD_LINK_SPEED_100MBPS,
* %AD_LINK_SPEED_1000MBPS,
* %AD_LINK_SPEED_2500MBPS,
* %AD_LINK_SPEED_5000MBPS,
* %AD_LINK_SPEED_10000MBPS
* %AD_LINK_SPEED_14000MBPS,
* %AD_LINK_SPEED_20000MBPS
* %AD_LINK_SPEED_25000MBPS
* %AD_LINK_SPEED_40000MBPS
* %AD_LINK_SPEED_50000MBPS
* %AD_LINK_SPEED_56000MBPS
* %AD_LINK_SPEED_100000MBPS
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
* %AD_LINK_SPEED_800000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
struct slave *slave = port->slave;
u16 speed;
/* this if covers only a special case: when the configuration starts
* with link down, it sets the speed to 0.
* This is done in spite of the fact that the e100 driver reports 0
* to be compatible with MVT in the future.
*/
if (slave->link != BOND_LINK_UP)
speed = 0;
else {
switch (slave->speed) {
case SPEED_10:
speed = AD_LINK_SPEED_10MBPS;
break;
case SPEED_100:
speed = AD_LINK_SPEED_100MBPS;
break;
case SPEED_1000:
speed = AD_LINK_SPEED_1000MBPS;
break;
case SPEED_2500:
speed = AD_LINK_SPEED_2500MBPS;
break;
case SPEED_5000:
speed = AD_LINK_SPEED_5000MBPS;
break;
case SPEED_10000:
speed = AD_LINK_SPEED_10000MBPS;
break;
case SPEED_14000:
speed = AD_LINK_SPEED_14000MBPS;
break;
case SPEED_20000:
speed = AD_LINK_SPEED_20000MBPS;
break;
case SPEED_25000:
speed = AD_LINK_SPEED_25000MBPS;
break;
case SPEED_40000:
speed = AD_LINK_SPEED_40000MBPS;
break;
case SPEED_50000:
speed = AD_LINK_SPEED_50000MBPS;
break;
case SPEED_56000:
speed = AD_LINK_SPEED_56000MBPS;
break;
case SPEED_100000:
speed = AD_LINK_SPEED_100000MBPS;
break;
case SPEED_200000:
speed = AD_LINK_SPEED_200000MBPS;
break;
case SPEED_400000:
speed = AD_LINK_SPEED_400000MBPS;
break;
case SPEED_800000:
speed = AD_LINK_SPEED_800000MBPS;
break;
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
pr_err_once("%s: (slave %s): unknown ethtool speed (%d) for port %d (set it to 0)\n",
slave->bond->dev->name,
slave->dev->name, slave->speed,
port->actor_port_number);
speed = 0;
break;
}
}
slave_dbg(slave->bond->dev, slave->dev, "Port %d Received link speed %d update from adapter\n",
port->actor_port_number, speed);
return speed;
}
/**
* __get_duplex - get a port's duplex
* @port: the port we're looking at
*
* Return @port's duplex in 802.3ad bitmask format. i.e.:
* 0x01 if in full duplex
* 0x00 otherwise
*/
static u8 __get_duplex(struct port *port)
{
struct slave *slave = port->slave;
u8 retval = 0x0;
/* handling a special case: when the configuration starts with
* link down, it sets the duplex to 0.
*/
if (slave->link == BOND_LINK_UP) {
switch (slave->duplex) {
case DUPLEX_FULL:
retval = 0x1;
slave_dbg(slave->bond->dev, slave->dev, "Port %d Received status full duplex update from adapter\n",
port->actor_port_number);
break;
case DUPLEX_HALF:
default:
retval = 0x0;
slave_dbg(slave->bond->dev, slave->dev, "Port %d Received status NOT full duplex update from adapter\n",
port->actor_port_number);
break;
}
}
return retval;
}
static void __ad_actor_update_port(struct port *port)
{
const struct bonding *bond = bond_get_bond_by_slave(port->slave);
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
}
/* Conversions */
/**
* __ad_timer_to_ticks - convert a given timer type to AD module ticks
* @timer_type: which timer to operate
* @par: timer parameter. see below
*
* If @timer_type is %current_while_timer, @par indicates long/short timer.
* If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME,
* %SLOW_PERIODIC_TIME.
*/
static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
{
u16 retval = 0; /* to silence the compiler */
switch (timer_type) {
case AD_CURRENT_WHILE_TIMER: /* for rx machine usage */
if (par)
retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec);
else
retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec);
break;
case AD_ACTOR_CHURN_TIMER: /* for local churn machine */
retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
break;
case AD_PERIODIC_TIMER: /* for periodic machine */
retval = (par*ad_ticks_per_sec); /* long timeout */
break;
case AD_PARTNER_CHURN_TIMER: /* for remote churn machine */
retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
break;
case AD_WAIT_WHILE_TIMER: /* for selection machine */
retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec);
break;
}
return retval;
}
/* ================= ad_rx_machine helper functions ================== */
/**
* __choose_matched - update a port's matched variable from a received lacpdu
* @lacpdu: the lacpdu we've received
* @port: the port we're looking at
*
* Update the value of the matched variable, using parameter values from a
* newly received lacpdu. Parameter values for the partner carried in the
* received PDU are compared with the corresponding operational parameter
* values for the actor. Matched is set to TRUE if all of these parameters
* match and the PDU parameter partner_state.aggregation has the same value as
* actor_oper_port_state.aggregation and lacp will actively maintain the link
* in the aggregation. Matched is also set to TRUE if the value of
* actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
* an individual link and lacp will actively maintain the link. Otherwise,
* matched is set to FALSE. LACP is considered to be actively maintaining the
* link if either the PDU's actor_state.lacp_activity variable is TRUE or both
* the actor's actor_oper_port_state.lacp_activity and the PDU's
* partner_state.lacp_activity variables are TRUE.
*
* Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is
* used here to implement the language from 802.3ad 43.4.9 that requires
* recordPDU to "match" the LACPDU parameters to the stored values.
*/
static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
{
/* check if all parameters are alike
* or this is individual link(aggregation == FALSE)
* then update the state machine Matched variable.
*/
if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
(ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
((lacpdu->partner_state & LACP_STATE_AGGREGATION) == (port->actor_oper_port_state & LACP_STATE_AGGREGATION))) ||
((lacpdu->actor_state & LACP_STATE_AGGREGATION) == 0)
) {
port->sm_vars |= AD_PORT_MATCHED;
} else {
port->sm_vars &= ~AD_PORT_MATCHED;
}
}
/**
* __record_pdu - record parameters from a received lacpdu
* @lacpdu: the lacpdu we've received
* @port: the port we're looking at
*
* Record the parameter values for the Actor carried in a received lacpdu as
* the current partner operational parameter values and sets
* actor_oper_port_state.defaulted to FALSE.
*/
static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
{
if (lacpdu && port) {
struct port_params *partner = &port->partner_oper;
__choose_matched(lacpdu, port);
/* record the new parameter values for the partner
* operational
*/
partner->port_number = ntohs(lacpdu->actor_port);
partner->port_priority = ntohs(lacpdu->actor_port_priority);
partner->system = lacpdu->actor_system;
partner->system_priority = ntohs(lacpdu->actor_system_priority);
partner->key = ntohs(lacpdu->actor_key);
partner->port_state = lacpdu->actor_state;
/* set actor_oper_port_state.defaulted to FALSE */
port->actor_oper_port_state &= ~LACP_STATE_DEFAULTED;
/* set the partner sync. to on if the partner is sync,
* and the port is matched
*/
if ((port->sm_vars & AD_PORT_MATCHED) &&
(lacpdu->actor_state & LACP_STATE_SYNCHRONIZATION)) {
partner->port_state |= LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=1\n");
} else {
partner->port_state &= ~LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=0\n");
}
}
}
/**
* __record_default - record default parameters
* @port: the port we're looking at
*
* This function records the default parameter values for the partner carried
* in the Partner Admin parameters as the current partner operational parameter
* values and sets actor_oper_port_state.defaulted to TRUE.
*/
static void __record_default(struct port *port)
{
if (port) {
/* record the partner admin parameters */
memcpy(&port->partner_oper, &port->partner_admin,
sizeof(struct port_params));
/* set actor_oper_port_state.defaulted to true */
port->actor_oper_port_state |= LACP_STATE_DEFAULTED;
}
}
/**
* __update_selected - update a port's Selected variable from a received lacpdu
* @lacpdu: the lacpdu we've received
* @port: the port we're looking at
*
* Update the value of the selected variable, using parameter values from a
* newly received lacpdu. The parameter values for the Actor carried in the
* received PDU are compared with the corresponding operational parameter
* values for the ports partner. If one or more of the comparisons shows that
* the value(s) received in the PDU differ from the current operational values,
* then selected is set to FALSE and actor_oper_port_state.synchronization is
* set to out_of_sync. Otherwise, selected remains unchanged.
*/
static void __update_selected(struct lacpdu *lacpdu, struct port *port)
{
if (lacpdu && port) {
const struct port_params *partner = &port->partner_oper;
/* check if any parameter is different then
* update the state machine selected variable.
*/
if (ntohs(lacpdu->actor_port) != partner->port_number ||
ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
!MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
(lacpdu->actor_state & LACP_STATE_AGGREGATION) != (partner->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
}
/**
* __update_default_selected - update a port's Selected variable from Partner
* @port: the port we're looking at
*
* This function updates the value of the selected variable, using the partner
* administrative parameter values. The administrative values are compared with
* the corresponding operational parameter values for the partner. If one or
* more of the comparisons shows that the administrative value(s) differ from
* the current operational values, then Selected is set to FALSE and
* actor_oper_port_state.synchronization is set to OUT_OF_SYNC. Otherwise,
* Selected remains unchanged.
*/
static void __update_default_selected(struct port *port)
{
if (port) {
const struct port_params *admin = &port->partner_admin;
const struct port_params *oper = &port->partner_oper;
/* check if any parameter is different then
* update the state machine selected variable.
*/
if (admin->port_number != oper->port_number ||
admin->port_priority != oper->port_priority ||
!MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
(admin->port_state & LACP_STATE_AGGREGATION)
!= (oper->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
}
/**
* __update_ntt - update a port's ntt variable from a received lacpdu
* @lacpdu: the lacpdu we've received
* @port: the port we're looking at
*
* Updates the value of the ntt variable, using parameter values from a newly
* received lacpdu. The parameter values for the partner carried in the
* received PDU are compared with the corresponding operational parameter
* values for the Actor. If one or more of the comparisons shows that the
* value(s) received in the PDU differ from the current operational values,
* then ntt is set to TRUE. Otherwise, ntt remains unchanged.
*/
static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
{
/* validate lacpdu and port */
if (lacpdu && port) {
/* check if any parameter is different then
* update the port->ntt.
*/
if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
(ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
!MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
((lacpdu->partner_state & LACP_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY)) ||
((lacpdu->partner_state & LACP_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT)) ||
((lacpdu->partner_state & LACP_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) ||
((lacpdu->partner_state & LACP_STATE_AGGREGATION) != (port->actor_oper_port_state & LACP_STATE_AGGREGATION))
) {
port->ntt = true;
}
}
}
/**
* __agg_ports_are_ready - check if all ports in an aggregator are ready
* @aggregator: the aggregator we're looking at
*
*/
static int __agg_ports_are_ready(struct aggregator *aggregator)
{
struct port *port;
int retval = 1;
if (aggregator) {
/* scan all ports in this aggregator to verfy if they are
* all ready.
*/
for (port = aggregator->lag_ports;
port;
port = port->next_port_in_aggregator) {
if (!(port->sm_vars & AD_PORT_READY_N)) {
retval = 0;
break;
}
}
}
return retval;
}
/**
* __set_agg_ports_ready - set value of Ready bit in all ports of an aggregator
* @aggregator: the aggregator we're looking at
* @val: Should the ports' ready bit be set on or off
*
*/
static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
{
struct port *port;
for (port = aggregator->lag_ports; port;
port = port->next_port_in_aggregator) {
if (val)
port->sm_vars |= AD_PORT_READY;
else
port->sm_vars &= ~AD_PORT_READY;
}
}
static int __agg_active_ports(struct aggregator *agg)
{
struct port *port;
int active = 0;
for (port = agg->lag_ports; port;
port = port->next_port_in_aggregator) {
if (port->is_enabled)
active++;
}
return active;
}
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
*
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_5000MBPS:
bandwidth = nports * 5000;
break;
case AD_LINK_SPEED_10000MBPS:
bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_14000MBPS:
bandwidth = nports * 14000;
break;
case AD_LINK_SPEED_20000MBPS:
bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_25000MBPS:
bandwidth = nports * 25000;
break;
case AD_LINK_SPEED_40000MBPS:
bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_50000MBPS:
bandwidth = nports * 50000;
break;
case AD_LINK_SPEED_56000MBPS:
bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
bandwidth = nports * 100000;
break;
case AD_LINK_SPEED_200000MBPS:
bandwidth = nports * 200000;
break;
case AD_LINK_SPEED_400000MBPS:
bandwidth = nports * 400000;
break;
case AD_LINK_SPEED_800000MBPS:
bandwidth = nports * 800000;
break;
default:
bandwidth = 0; /* to silence the compiler */
}
}
return bandwidth;
}
/**
* __get_active_agg - get the current active aggregator
* @aggregator: the aggregator we're looking at
*
* Caller must hold RCU lock.
*/
static struct aggregator *__get_active_agg(struct aggregator *aggregator)
{
struct bonding *bond = aggregator->slave->bond;
struct list_head *iter;
struct slave *slave;
bond_for_each_slave_rcu(bond, slave, iter)
if (SLAVE_AD_INFO(slave)->aggregator.is_active)
return &(SLAVE_AD_INFO(slave)->aggregator);
return NULL;
}
/**
* __update_lacpdu_from_port - update a port's lacpdu fields
* @port: the port we're looking at
*/
static inline void __update_lacpdu_from_port(struct port *port)
{
struct lacpdu *lacpdu = &port->lacpdu;
const struct port_params *partner = &port->partner_oper;
/* update current actual Actor parameters
* lacpdu->subtype initialized
* lacpdu->version_number initialized
* lacpdu->tlv_type_actor_info initialized
* lacpdu->actor_information_length initialized
*/
lacpdu->actor_system_priority = htons(port->actor_system_priority);
lacpdu->actor_system = port->actor_system;
lacpdu->actor_key = htons(port->actor_oper_port_key);
lacpdu->actor_port_priority = htons(port->actor_port_priority);
lacpdu->actor_port = htons(port->actor_port_number);
lacpdu->actor_state = port->actor_oper_port_state;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"update lacpdu: actor port state %x\n",
port->actor_oper_port_state);
/* lacpdu->reserved_3_1 initialized
* lacpdu->tlv_type_partner_info initialized
* lacpdu->partner_information_length initialized
*/
lacpdu->partner_system_priority = htons(partner->system_priority);
lacpdu->partner_system = partner->system;
lacpdu->partner_key = htons(partner->key);
lacpdu->partner_port_priority = htons(partner->port_priority);
lacpdu->partner_port = htons(partner->port_number);
lacpdu->partner_state = partner->port_state;
/* lacpdu->reserved_3_2 initialized
* lacpdu->tlv_type_collector_info initialized
* lacpdu->collector_information_length initialized
* collector_max_delay initialized
* reserved_12[12] initialized
* tlv_type_terminator initialized
* terminator_length initialized
* reserved_50[50] initialized
*/
}
/* ================= main 802.3ad protocol code ========================= */
/**
* ad_lacpdu_send - send out a lacpdu packet on a given port
* @port: the port we're looking at
*
* Returns: 0 on success
* < 0 on error
*/
static int ad_lacpdu_send(struct port *port)
{
struct slave *slave = port->slave;
struct sk_buff *skb;
struct lacpdu_header *lacpdu_header;
int length = sizeof(struct lacpdu_header);
skb = dev_alloc_skb(length);
if (!skb)
return -ENOMEM;
atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_tx);
atomic64_inc(&BOND_AD_INFO(slave->bond).stats.lacpdu_tx);
skb->dev = slave->dev;
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = PKT_TYPE_LACPDU;
skb->priority = TC_PRIO_CONTROL;
lacpdu_header = skb_put(skb, length);
ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback lacpdus in receive.
*/
ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
lacpdu_header->lacpdu = port->lacpdu;
dev_queue_xmit(skb);
return 0;
}
/**
* ad_marker_send - send marker information/response on a given port
* @port: the port we're looking at
* @marker: marker data to send
*
* Returns: 0 on success
* < 0 on error
*/
static int ad_marker_send(struct port *port, struct bond_marker *marker)
{
struct slave *slave = port->slave;
struct sk_buff *skb;
struct bond_marker_header *marker_header;
int length = sizeof(struct bond_marker_header);
skb = dev_alloc_skb(length + 16);
if (!skb)
return -ENOMEM;
switch (marker->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_tx);
atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_tx);
break;
case AD_MARKER_RESPONSE_SUBTYPE:
atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_resp_tx);
atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_resp_tx);
break;
}
skb_reserve(skb, 16);
skb->dev = slave->dev;
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = PKT_TYPE_LACPDU;
marker_header = skb_put(skb, length);
ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback MARKERs in receive.
*/
ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
marker_header->marker = *marker;
dev_queue_xmit(skb);
return 0;
}
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
*/
static void ad_mux_machine(struct port *port, bool *update_slave_arr)
{
mux_states_t last_state;
/* keep current State Machine state to compare later if it was
* changed
*/
last_state = port->sm_mux_state;
if (port->sm_vars & AD_PORT_BEGIN) {
port->sm_mux_state = AD_MUX_DETACHED;
} else {
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
if ((port->sm_vars & AD_PORT_SELECTED)
|| (port->sm_vars & AD_PORT_STANDBY))
/* if SELECTED or STANDBY */
port->sm_mux_state = AD_MUX_WAITING;
break;
case AD_MUX_WAITING:
/* if SELECTED == FALSE return to DETACH state */
if (!(port->sm_vars & AD_PORT_SELECTED)) {
port->sm_vars &= ~AD_PORT_READY_N;
/* in order to withhold the Selection Logic to
* check all ports READY_N value every callback
* cycle to update ready variable, we check
* READY_N and update READY here
*/
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
port->sm_mux_state = AD_MUX_DETACHED;
break;
}
/* check if the wait_while_timer expired */
if (port->sm_mux_timer_counter
&& !(--port->sm_mux_timer_counter))
port->sm_vars |= AD_PORT_READY_N;
/* in order to withhold the selection logic to check
* all ports READY_N value every callback cycle to
* update ready variable, we check READY_N and update
* READY here
*/
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
/* if the wait_while_timer expired, and the port is
* in READY state, move to ATTACHED state
*/
if ((port->sm_vars & AD_PORT_READY)
&& !port->sm_mux_timer_counter)
port->sm_mux_state = AD_MUX_ATTACHED;
break;
case AD_MUX_ATTACHED:
/* check also if agg_select_timer expired (so the
* edable port will take place only after this timer)
*/
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
if (port->aggregator->is_active)
port->sm_mux_state =
AD_MUX_COLLECTING_DISTRIBUTING;
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
port->sm_vars &= ~AD_PORT_READY_N;
/* in order to withhold the selection logic to
* check all ports READY_N value every callback
* cycle to update ready variable, we check
* READY_N and update READY here
*/
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
port->sm_mux_state = AD_MUX_DETACHED;
} else if (port->aggregator->is_active) {
port->actor_oper_port_state |=
LACP_STATE_SYNCHRONIZATION;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
!(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
port->sm_mux_state = AD_MUX_ATTACHED;
} else {
/* if port state hasn't changed make
* sure that a collecting distributing
* port in an active aggregator is enabled
*/
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
__enable_port(port);
*update_slave_arr = true;
}
}
break;
default:
break;
}
}
/* check if the state machine was changed */
if (port->sm_mux_state != last_state) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number,
last_state,
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
ad_disable_collecting_distributing(port,
update_slave_arr);
port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
port->ntt = true;
break;
case AD_MUX_WAITING:
port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
break;
case AD_MUX_ATTACHED:
if (port->aggregator->is_active)
port->actor_oper_port_state |=
LACP_STATE_SYNCHRONIZATION;
else
port->actor_oper_port_state &=
~LACP_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
port->actor_oper_port_state |= LACP_STATE_COLLECTING;
port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
ad_enable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
break;
default:
break;
}
}
}
/**
* ad_rx_machine - handle a port's rx State Machine
* @lacpdu: the lacpdu we've received
* @port: the port we're looking at
*
* If lacpdu arrived, stop previous timer (if exists) and set the next state as
* CURRENT. If timer expired set the state machine in the proper state.
* In other cases, this function checks if we need to switch to other state.
*/
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
{
rx_states_t last_state;
/* keep current State Machine state to compare later if it was
* changed
*/
last_state = port->sm_rx_state;
if (lacpdu) {
atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.lacpdu_rx);
atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.lacpdu_rx);
}
/* check if state machine should change state */
/* first, check if port was reinitialized */
if (port->sm_vars & AD_PORT_BEGIN) {
port->sm_rx_state = AD_RX_INITIALIZE;
port->sm_vars |= AD_PORT_CHURNED;
/* check if port is not enabled */
} else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled)
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* check if new lacpdu arrived */
else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
(port->sm_rx_state == AD_RX_DEFAULTED) ||
(port->sm_rx_state == AD_RX_CURRENT))) {
if (port->sm_rx_state != AD_RX_CURRENT)
port->sm_vars |= AD_PORT_CHURNED;
port->sm_rx_timer_counter = 0;
port->sm_rx_state = AD_RX_CURRENT;
} else {
/* if timer is on, and if it is expired */
if (port->sm_rx_timer_counter &&
!(--port->sm_rx_timer_counter)) {
switch (port->sm_rx_state) {
case AD_RX_EXPIRED:
port->sm_rx_state = AD_RX_DEFAULTED;
break;
case AD_RX_CURRENT:
port->sm_rx_state = AD_RX_EXPIRED;
break;
default:
break;
}
} else {
/* if no lacpdu arrived and no timer is on */
switch (port->sm_rx_state) {
case AD_RX_PORT_DISABLED:
if (port->is_enabled &&
(port->sm_vars & AD_PORT_LACP_ENABLED))
port->sm_rx_state = AD_RX_EXPIRED;
else if (port->is_enabled
&& ((port->sm_vars
& AD_PORT_LACP_ENABLED) == 0))
port->sm_rx_state = AD_RX_LACP_DISABLED;
break;
default:
break;
}
}
}
/* check if the State machine was changed or new lacpdu arrived */
if ((port->sm_rx_state != last_state) || (lacpdu)) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number,
last_state,
port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
else
port->sm_vars |= AD_PORT_LACP_ENABLED;
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
fallthrough;
case AD_RX_PORT_DISABLED:
port->sm_vars &= ~AD_PORT_MATCHED;
break;
case AD_RX_LACP_DISABLED:
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
port->partner_oper.port_state &= ~LACP_STATE_AGGREGATION;
port->sm_vars |= AD_PORT_MATCHED;
port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_EXPIRED:
/* Reset of the Synchronization flag (Standard 43.4.12)
* This reset cause to disable this port in the
* COLLECTING_DISTRIBUTING state of the mux machine in
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
break;
case AD_RX_DEFAULTED:
__update_default_selected(port);
__record_default(port);
port->sm_vars |= AD_PORT_MATCHED;
port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
/* detect loopback situation */
if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
&(port->actor_system))) {
slave_err(port->slave->bond->dev, port->slave->dev, "An illegal loopback occurred on slave\n"
"Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n");
return;
}
__update_selected(lacpdu, port);
__update_ntt(lacpdu, port);
__record_pdu(lacpdu, port);
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT));
port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
default:
break;
}
}
}
/**
* ad_churn_machine - handle port churn's state machine
* @port: the port we're looking at
*
*/
static void ad_churn_machine(struct port *port)
{
if (port->sm_vars & AD_PORT_CHURNED) {
port->sm_vars &= ~AD_PORT_CHURNED;
port->sm_churn_actor_state = AD_CHURN_MONITOR;
port->sm_churn_partner_state = AD_CHURN_MONITOR;
port->sm_churn_actor_timer_counter =
__ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0);
port->sm_churn_partner_timer_counter =
__ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0);
return;
}
if (port->sm_churn_actor_timer_counter &&
!(--port->sm_churn_actor_timer_counter) &&
port->sm_churn_actor_state == AD_CHURN_MONITOR) {
if (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_actor_state = AD_NO_CHURN;
} else {
port->churn_actor_count++;
port->sm_churn_actor_state = AD_CHURN;
}
}
if (port->sm_churn_partner_timer_counter &&
!(--port->sm_churn_partner_timer_counter) &&
port->sm_churn_partner_state == AD_CHURN_MONITOR) {
if (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_partner_state = AD_NO_CHURN;
} else {
port->churn_partner_count++;
port->sm_churn_partner_state = AD_CHURN;
}
}
}
/**
* ad_tx_machine - handle a port's tx state machine
* @port: the port we're looking at
*/
static void ad_tx_machine(struct port *port)
{
/* check if tx timer expired, to verify that we do not send more than
* 3 packets per second
*/
if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
/* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
if (ad_lacpdu_send(port) >= 0) {
slave_dbg(port->slave->bond->dev,
port->slave->dev,
"Sent LACPDU on port %d\n",
port->actor_port_number);
/* mark ntt as false, so it will not be sent
* again until demanded
*/
port->ntt = false;
}
}
/* restart tx timer(to verify that we will not exceed
* AD_MAX_TX_IN_SECOND
*/
port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
* @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
{
periodic_states_t last_state;
/* keep current state machine state to compare later if it was changed */
last_state = port->sm_periodic_state;
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
(!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
!bond_params->lacp_active) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
else if (port->sm_periodic_timer_counter) {
/* check if periodic state machine expired */
if (!(--port->sm_periodic_timer_counter)) {
/* if expired then do tx */
port->sm_periodic_state = AD_PERIODIC_TX;
} else {
/* If not expired, check if there is some new timeout
* parameter from the partner state
*/
switch (port->sm_periodic_state) {
case AD_FAST_PERIODIC:
if (!(port->partner_oper.port_state
& LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
break;
case AD_SLOW_PERIODIC:
if ((port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) {
port->sm_periodic_timer_counter = 0;
port->sm_periodic_state = AD_PERIODIC_TX;
}
break;
default:
break;
}
}
} else {
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
port->sm_periodic_state = AD_FAST_PERIODIC;
break;
case AD_PERIODIC_TX:
if (!(port->partner_oper.port_state &
LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
else
port->sm_periodic_state = AD_FAST_PERIODIC;
break;
default:
break;
}
}
/* check if the state machine was changed */
if (port->sm_periodic_state != last_state) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
port->sm_periodic_state);
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
port->sm_periodic_timer_counter = 0;
break;
case AD_FAST_PERIODIC:
/* decrement 1 tick we lost in the PERIODIC_TX cycle */
port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1;
break;
case AD_SLOW_PERIODIC:
/* decrement 1 tick we lost in the PERIODIC_TX cycle */
port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1;
break;
case AD_PERIODIC_TX:
port->ntt = true;
break;
default:
break;
}
}
}
/**
* ad_port_selection_logic - select aggregation groups
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
*
* Select aggregation groups, and assign each port for it's aggregetor. The
* selection logic is called in the inititalization (after all the handshkes),
* and after every lacpdu receive (if selected is off).
*/
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
{
struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
struct port *last_port = NULL, *curr_port;
struct list_head *iter;
struct bonding *bond;
struct slave *slave;
int found = 0;
/* if the port is already Selected, do nothing */
if (port->sm_vars & AD_PORT_SELECTED)
return;
bond = __get_bond_by_port(port);
/* if the port is connected to other aggregator, detach it */
if (port->aggregator) {
/* detach the port from its former aggregator */
temp_aggregator = port->aggregator;
for (curr_port = temp_aggregator->lag_ports; curr_port;
last_port = curr_port,
curr_port = curr_port->next_port_in_aggregator) {
if (curr_port == port) {
temp_aggregator->num_of_ports--;
/* if it is the first port attached to the
* aggregator
*/
if (!last_port) {
temp_aggregator->lag_ports =
port->next_port_in_aggregator;
} else {
/* not the first port attached to the
* aggregator
*/
last_port->next_port_in_aggregator =
port->next_port_in_aggregator;
}
/* clear the port's relations to this
* aggregator
*/
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
port->actor_port_aggregator_identifier = 0;
slave_dbg(bond->dev, port->slave->dev, "Port %d left LAG %d\n",
port->actor_port_number,
temp_aggregator->aggregator_identifier);
/* if the aggregator is empty, clear its
* parameters, and set it ready to be attached
*/
if (!temp_aggregator->lag_ports)
ad_clear_agg(temp_aggregator);
break;
}
}
if (!curr_port) {
/* meaning: the port was related to an aggregator
* but was not on the aggregator port list
*/
net_warn_ratelimited("%s: (slave %s): Warning: Port %d was related to aggregator %d but was not on its port list\n",
port->slave->bond->dev->name,
port->slave->dev->name,
port->actor_port_number,
port->aggregator->aggregator_identifier);
}
}
/* search on all aggregators for a suitable aggregator for this port */
bond_for_each_slave(bond, slave, iter) {
aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
/* keep a free aggregator for later use(if needed) */
if (!aggregator->lag_ports) {
if (!free_aggregator)
free_aggregator = aggregator;
continue;
}
/* check if current aggregator suits us */
if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
!aggregator->is_individual) /* but is not individual OR */
)
) {
/* attach to the founded aggregator */
port->aggregator = aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
port->next_port_in_aggregator = aggregator->lag_ports;
port->aggregator->num_of_ports++;
aggregator->lag_ports = port;
slave_dbg(bond->dev, slave->dev, "Port %d joined LAG %d (existing LAG)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
/* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
found = 1;
break;
}
}
/* the port couldn't find an aggregator - attach it to a new
* aggregator
*/
if (!found) {
if (free_aggregator) {
/* assign port a new aggregator */
port->aggregator = free_aggregator;
port->actor_port_aggregator_identifier =
port->aggregator->aggregator_identifier;
/* update the new aggregator's parameters
* if port was responsed from the end-user
*/
if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
/* if port is full duplex */
port->aggregator->is_individual = false;
else
port->aggregator->is_individual = true;
port->aggregator->actor_admin_aggregator_key =
port->actor_admin_port_key;
port->aggregator->actor_oper_aggregator_key =
port->actor_oper_port_key;
port->aggregator->partner_system =
port->partner_oper.system;
port->aggregator->partner_system_priority =
port->partner_oper.system_priority;
port->aggregator->partner_oper_aggregator_key = port->partner_oper.key;
port->aggregator->receive_state = 1;
port->aggregator->transmit_state = 1;
port->aggregator->lag_ports = port;
port->aggregator->num_of_ports++;
/* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
slave_dbg(bond->dev, port->slave->dev, "Port %d joined LAG %d (new LAG)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
} else {
slave_err(bond->dev, port->slave->dev,
"Port %d did not find a suitable aggregator\n",
port->actor_port_number);
return;
}
}
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
* in all aggregator's ports, else set ready=FALSE in all
* aggregator's ports
*/
__set_agg_ports_ready(port->aggregator,
__agg_ports_are_ready(port->aggregator));
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator, update_slave_arr);
if (!port->aggregator->is_active)
port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
}
/* Decide if "agg" is a better choice for the new active aggregator that
* the current best, according to the ad_select policy.
*/
static struct aggregator *ad_agg_selection_test(struct aggregator *best,
struct aggregator *curr)
{
/* 0. If no best, select current.
*
* 1. If the current agg is not individual, and the best is
* individual, select current.
*
* 2. If current agg is individual and the best is not, keep best.
*
* 3. Therefore, current and best are both individual or both not
* individual, so:
*
* 3a. If current agg partner replied, and best agg partner did not,
* select current.
*
* 3b. If current agg partner did not reply and best agg partner
* did reply, keep best.
*
* 4. Therefore, current and best both have partner replies or
* both do not, so perform selection policy:
*
* BOND_AD_COUNT: Select by count of ports. If count is equal,
* select by bandwidth.
*
* BOND_AD_STABLE, BOND_AD_BANDWIDTH: Select by bandwidth.
*/
if (!best)
return curr;
if (!curr->is_individual && best->is_individual)
return curr;
if (curr->is_individual && !best->is_individual)
return best;
if (__agg_has_partner(curr) && !__agg_has_partner(best))
return curr;
if (!__agg_has_partner(curr) && __agg_has_partner(best))
return best;
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
fallthrough;
case BOND_AD_STABLE:
case BOND_AD_BANDWIDTH:
if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best))
return curr;
break;
default:
net_warn_ratelimited("%s: (slave %s): Impossible agg select mode %d\n",
curr->slave->bond->dev->name,
curr->slave->dev->name,
__get_agg_selection_mode(curr->lag_ports));
break;
}
return best;
}
static int agg_device_up(const struct aggregator *agg)
{
struct port *port = agg->lag_ports;
if (!port)
return 0;
for (port = agg->lag_ports; port;
port = port->next_port_in_aggregator) {
if (netif_running(port->slave->dev) &&
netif_carrier_ok(port->slave->dev))
return 1;
}
return 0;
}
/**
* ad_agg_selection_logic - select an aggregation group for a team
* @agg: the aggregator we're looking at
* @update_slave_arr: Does slave array need update?
*
* It is assumed that only one aggregator may be selected for a team.
*
* The logic of this function is to select the aggregator according to
* the ad_select policy:
*
* BOND_AD_STABLE: select the aggregator with the most ports attached to
* it, and to reselect the active aggregator only if the previous
* aggregator has no more ports related to it.
*
* BOND_AD_BANDWIDTH: select the aggregator with the highest total
* bandwidth, and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
* BOND_AD_COUNT: select the aggregator with largest number of ports
* (slaves), and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
* FIXME: this function MUST be called with the first agg in the bond, or
* __get_active_agg() won't work correctly. This function should be better
* called with the bond itself, and retrieve the first agg from it.
*/
static void ad_agg_selection_logic(struct aggregator *agg,
bool *update_slave_arr)
{
struct aggregator *best, *active, *origin;
struct bonding *bond = agg->slave->bond;
struct list_head *iter;
struct slave *slave;
struct port *port;
rcu_read_lock();
origin = agg;
active = __get_active_agg(agg);
best = (active && agg_device_up(active)) ? active : NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave)->aggregator);
agg->is_active = 0;
if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
if (best &&
__get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
/* For the STABLE policy, don't replace the old active
* aggregator if it's still active (it has an answering
* partner) or if both the best and active don't have an
* answering partner.
*/
if (active && active->lag_ports &&
__agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
if (!(!active->actor_oper_aggregator_key &&
best->actor_oper_aggregator_key)) {
best = NULL;
active->is_active = 1;
}
}
}
if (best && (best == active)) {
best = NULL;
active->is_active = 1;
}
/* if there is new best aggregator, activate it */
if (best) {
netdev_dbg(bond->dev, "(slave %s): best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
best->slave ? best->slave->dev->name : "NULL",
best->aggregator_identifier, best->num_of_ports,
best->actor_oper_aggregator_key,
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
netdev_dbg(bond->dev, "(slave %s): best ports %p slave %p\n",
best->slave ? best->slave->dev->name : "NULL",
best->lag_ports, best->slave);
bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave)->aggregator);
slave_dbg(bond->dev, slave->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
agg->aggregator_identifier, agg->num_of_ports,
agg->actor_oper_aggregator_key,
agg->partner_oper_aggregator_key,
agg->is_individual, agg->is_active);
}
/* check if any partner replies */
if (best->is_individual)
net_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
bond->dev->name);
best->is_active = 1;
netdev_dbg(bond->dev, "(slave %s): LAG %d chosen as the active LAG\n",
best->slave ? best->slave->dev->name : "NULL",
best->aggregator_identifier);
netdev_dbg(bond->dev, "(slave %s): Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
best->slave ? best->slave->dev->name : "NULL",
best->aggregator_identifier, best->num_of_ports,
best->actor_oper_aggregator_key,
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
/* disable the ports that were related to the former
* active_aggregator
*/
if (active) {
for (port = active->lag_ports; port;
port = port->next_port_in_aggregator) {
__disable_port(port);
}
}
/* Slave array needs update. */
*update_slave_arr = true;
}
/* if the selected aggregator is of join individuals
* (partner_system is NULL), enable their ports
*/
active = __get_active_agg(origin);
if (active) {
if (!__agg_has_partner(active)) {
for (port = active->lag_ports; port;
port = port->next_port_in_aggregator) {
__enable_port(port);
}
*update_slave_arr = true;
}
}
rcu_read_unlock();
bond_3ad_set_carrier(bond);
}
/**
* ad_clear_agg - clear a given aggregator's parameters
* @aggregator: the aggregator we're looking at
*/
static void ad_clear_agg(struct aggregator *aggregator)
{
if (aggregator) {
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
aggregator->transmit_state = 0;
aggregator->lag_ports = NULL;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
pr_debug("%s: LAG %d was cleared\n",
aggregator->slave ?
aggregator->slave->dev->name : "NULL",
aggregator->aggregator_identifier);
}
}
/**
* ad_initialize_agg - initialize a given aggregator's parameters
* @aggregator: the aggregator we're looking at
*/
static void ad_initialize_agg(struct aggregator *aggregator)
{
if (aggregator) {
ad_clear_agg(aggregator);
eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}
}
/**
* ad_initialize_port - initialize a given port's parameters
* @port: the port we're looking at
* @lacp_fast: boolean. whether fast periodic should be used
*/
static void ad_initialize_port(struct port *port, int lacp_fast)
{
static const struct port_params tmpl = {
.system_priority = 0xffff,
.key = 1,
.port_number = 1,
.port_priority = 0xff,
.port_state = 1,
};
static const struct lacpdu lacpdu = {
.subtype = 0x01,
.version_number = 0x01,
.tlv_type_actor_info = 0x01,
.actor_information_length = 0x14,
.tlv_type_partner_info = 0x02,
.partner_information_length = 0x14,
.tlv_type_collector_info = 0x03,
.collector_information_length = 0x10,
.collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY),
};
if (port) {
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
port->actor_admin_port_state = LACP_STATE_AGGREGATION |
LACP_STATE_LACP_ACTIVITY;
port->actor_oper_port_state = LACP_STATE_AGGREGATION |
LACP_STATE_LACP_ACTIVITY;
if (lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
port->is_enabled = true;
/* private parameters */
port->sm_vars = AD_PORT_BEGIN | AD_PORT_LACP_ENABLED;
port->sm_rx_state = 0;
port->sm_rx_timer_counter = 0;
port->sm_periodic_state = 0;
port->sm_periodic_timer_counter = 0;
port->sm_mux_state = 0;
port->sm_mux_timer_counter = 0;
port->sm_tx_state = 0;
port->aggregator = NULL;
port->next_port_in_aggregator = NULL;
port->transaction_id = 0;
port->sm_churn_actor_timer_counter = 0;
port->sm_churn_actor_state = 0;
port->churn_actor_count = 0;
port->sm_churn_partner_timer_counter = 0;
port->sm_churn_partner_state = 0;
port->churn_partner_count = 0;
memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu));
}
}
/**
* ad_enable_collecting_distributing - enable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
*
* Enable @port if it's in an active aggregator
*/
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
if (port->aggregator->is_active) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Enabling port %d (LAG %d)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
}
}
/**
* ad_disable_collecting_distributing - disable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
*/
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
if (port->aggregator &&
!MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
&(null_mac_addr))) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Disabling port %d (LAG %d)\n",
port->actor_port_number,
port->aggregator->aggregator_identifier);
__disable_port(port);
/* Slave array needs an update */
*update_slave_arr = true;
}
}
/**
* ad_marker_info_received - handle receive of a Marker information frame
* @marker_info: Marker info received
* @port: the port we're looking at
*/
static void ad_marker_info_received(struct bond_marker *marker_info,
struct port *port)
{
struct bond_marker marker;
atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_rx);
atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_rx);
/* copy the received marker data to the response marker */
memcpy(&marker, marker_info, sizeof(struct bond_marker));
/* change the marker subtype to marker response */
marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE;
/* send the marker response */
if (ad_marker_send(port, &marker) >= 0)
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Sent Marker Response on port %d\n",
port->actor_port_number);
}
/**
* ad_marker_response_received - handle receive of a marker response frame
* @marker: marker PDU received
* @port: the port we're looking at
*
* This function does nothing since we decided not to implement send and handle
* response for marker PDU's, in this stage, but only to respond to marker
* information.
*/
static void ad_marker_response_received(struct bond_marker *marker,
struct port *port)
{
atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_resp_rx);
atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_resp_rx);
/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
/* ========= AD exported functions to the main bonding code ========= */
/* Check aggregators status in team every T seconds */
#define AD_AGGREGATOR_SELECTION_TIMER 8
/**
* bond_3ad_initiate_agg_selection - initate aggregator selection
* @bond: bonding struct
* @timeout: timeout value to set
*
* Set the aggregation selection timer, to initiate an agg selection in
* the very near future. Called during first initialization, and during
* any down to up transitions of the bond.
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
*
* Can be called only after the mac address of the bond is set.
*/
void bond_3ad_initialize(struct bonding *bond)
{
BOND_AD_INFO(bond).aggregator_identifier = 0;
BOND_AD_INFO(bond).system.sys_priority =
bond->params.ad_actor_sys_prio;
if (is_zero_ether_addr(bond->params.ad_actor_system))
BOND_AD_INFO(bond).system.sys_mac_addr =
*((struct mac_addr *)bond->dev->dev_addr);
else
BOND_AD_INFO(bond).system.sys_mac_addr =
*((struct mac_addr *)bond->params.ad_actor_system);
bond_3ad_initiate_agg_selection(bond,
AD_AGGREGATOR_SELECTION_TIMER *
ad_ticks_per_sec);
}
/**
* bond_3ad_bind_slave - initialize a slave's port
* @slave: slave struct to work on
*
* Returns: 0 on success
* < 0 on error
*/
void bond_3ad_bind_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct port *port;
struct aggregator *aggregator;
/* check that the slave has not been initialized yet. */
if (SLAVE_AD_INFO(slave)->port.slave != slave) {
/* port initialization */
port = &(SLAVE_AD_INFO(slave)->port);
ad_initialize_port(port, bond->params.lacp_fast);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
/* key is determined according to the link speed, duplex and
* user key
*/
port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
ad_update_actor_keys(port, false);
/* actor system is the bond's system */
__ad_actor_update_port(port);
/* tx timer(to verify that no more than MAX_TX_IN_SECOND
* lacpdu's are sent in one second)
*/
port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
__disable_port(port);
/* aggregator initialization */
aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
ad_initialize_agg(aggregator);
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
aggregator->slave = slave;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
}
}
/**
* bond_3ad_unbind_slave - deinitialize a slave's port
* @slave: slave struct to work on
*
* Search for the aggregator that is related to this port, remove the
* aggregator and assign another aggregator for other port related to it
* (if any), and remove the port.
*/
void bond_3ad_unbind_slave(struct slave *slave)
{
struct port *port, *prev_port, *temp_port;
struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
int select_new_active_agg = 0;
struct bonding *bond = slave->bond;
struct slave *slave_iter;
struct list_head *iter;
bool dummy_slave_update; /* Ignore this value as caller updates array */
/* Sync against bond_3ad_state_machine_handler() */
spin_lock_bh(&bond->mode_lock);
aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
slave_warn(bond->dev, slave->dev, "Trying to unbind an uninitialized port\n");
goto out;
}
slave_dbg(bond->dev, slave->dev, "Unbinding Link Aggregation Group %d\n",
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
port->actor_oper_port_state &= ~LACP_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
/* check if this aggregator is occupied */
if (aggregator->lag_ports) {
/* check if there are other ports related to this aggregator
* except the port related to this slave(thats ensure us that
* there is a reason to search for new aggregator, and that we
* will find one
*/
if ((aggregator->lag_ports != port) ||
(aggregator->lag_ports->next_port_in_aggregator)) {
/* find new aggregator for the related port(s) */
bond_for_each_slave(bond, slave_iter, iter) {
new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
/* if the new aggregator is empty, or it is
* connected to our port only
*/
if (!new_aggregator->lag_ports ||
((new_aggregator->lag_ports == port) &&
!new_aggregator->lag_ports->next_port_in_aggregator))
break;
}
if (!slave_iter)
new_aggregator = NULL;
/* if new aggregator found, copy the aggregator's
* parameters and connect the related lag_ports to the
* new aggregator
*/
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
slave_dbg(bond->dev, slave->dev, "Some port(s) related to LAG %d - replacing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
if ((new_aggregator->lag_ports == port) &&
new_aggregator->is_active) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
select_new_active_agg = 1;
}
new_aggregator->is_individual = aggregator->is_individual;
new_aggregator->actor_admin_aggregator_key = aggregator->actor_admin_aggregator_key;
new_aggregator->actor_oper_aggregator_key = aggregator->actor_oper_aggregator_key;
new_aggregator->partner_system = aggregator->partner_system;
new_aggregator->partner_system_priority = aggregator->partner_system_priority;
new_aggregator->partner_oper_aggregator_key = aggregator->partner_oper_aggregator_key;
new_aggregator->receive_state = aggregator->receive_state;
new_aggregator->transmit_state = aggregator->transmit_state;
new_aggregator->lag_ports = aggregator->lag_ports;
new_aggregator->is_active = aggregator->is_active;
new_aggregator->num_of_ports = aggregator->num_of_ports;
/* update the information that is written on
* the ports about the aggregator
*/
for (temp_port = aggregator->lag_ports; temp_port;
temp_port = temp_port->next_port_in_aggregator) {
temp_port->aggregator = new_aggregator;
temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier;
}
ad_clear_agg(aggregator);
if (select_new_active_agg)
ad_agg_selection_logic(__get_first_agg(port),
&dummy_slave_update);
} else {
slave_warn(bond->dev, slave->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n");
}
} else {
/* in case that the only port related to this
* aggregator is the one we want to remove
*/
select_new_active_agg = aggregator->is_active;
ad_clear_agg(aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
temp_aggregator = __get_first_agg(port);
if (temp_aggregator)
ad_agg_selection_logic(temp_aggregator,
&dummy_slave_update);
}
}
}
slave_dbg(bond->dev, slave->dev, "Unbinding port %d\n", port->actor_port_number);
/* find the aggregator that this port is connected to */
bond_for_each_slave(bond, slave_iter, iter) {
temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
prev_port = NULL;
/* search the port in the aggregator's related ports */
for (temp_port = temp_aggregator->lag_ports; temp_port;
prev_port = temp_port,
temp_port = temp_port->next_port_in_aggregator) {
if (temp_port == port) {
/* the aggregator found - detach the port from
* this aggregator
*/
if (prev_port)
prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator;
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
if (temp_aggregator->num_of_ports == 0)
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
ad_agg_selection_logic(__get_first_agg(port),
&dummy_slave_update);
}
}
break;
}
}
}
port->slave = NULL;
out:
spin_unlock_bh(&bond->mode_lock);
}
/**
* bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports
* @bond: bonding struct to work on
*
* If an ad_actor setting gets changed we need to update the individual port
* settings so the bond device will use the new values when it gets upped.
*/
void bond_3ad_update_ad_actor_settings(struct bonding *bond)
{
struct list_head *iter;
struct slave *slave;
ASSERT_RTNL();
BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio;
if (is_zero_ether_addr(bond->params.ad_actor_system))
BOND_AD_INFO(bond).system.sys_mac_addr =
*((struct mac_addr *)bond->dev->dev_addr);
else
BOND_AD_INFO(bond).system.sys_mac_addr =
*((struct mac_addr *)bond->params.ad_actor_system);
spin_lock_bh(&bond->mode_lock);
bond_for_each_slave(bond, slave, iter) {
struct port *port = &(SLAVE_AD_INFO(slave))->port;
__ad_actor_update_port(port);
port->ntt = true;
}
spin_unlock_bh(&bond->mode_lock);
}
/**
* bond_agg_timer_advance - advance agg_select_timer
* @bond: bonding structure
*
* Return true when agg_select_timer reaches 0.
*/
static bool bond_agg_timer_advance(struct bonding *bond)
{
int val, nval;
while (1) {
val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
if (!val)
return false;
nval = val - 1;
if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
val, nval) == val)
break;
}
return nval == 0;
}
/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
* The state machine handling concept in this module is to check every tick
* which state machine should operate any function. The execution order is
* round robin, so when we have an interaction between state machines, the
* reply of one to each other might be delayed until next tick.
*
* This function also complete the initialization when the agg_select_timer
* times out, and it selects an aggregator for the ports that are yet not
* related to any aggregator, and selects the active aggregator for a bond.
*/
void bond_3ad_state_machine_handler(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
ad_work.work);
struct aggregator *aggregator;
struct list_head *iter;
struct slave *slave;
struct port *port;
bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
bool update_slave_arr = false;
/* Lock to protect data accessed by all (e.g., port->sm_vars) and
* against running with bond_3ad_unbind_slave. ad_rx_machine may run
* concurrently due to incoming LACPDU as well.
*/
spin_lock_bh(&bond->mode_lock);
rcu_read_lock();
/* check if there are any slaves */
if (!bond_has_slaves(bond))
goto re_arm;
if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
/* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
net_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
bond->dev->name);
goto re_arm;
}
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator, &update_slave_arr);
}
bond_3ad_set_carrier(bond);
}
/* for each port run the state machines */
bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
if (!port->slave) {
net_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
bond->dev->name);
goto re_arm;
}
ad_rx_machine(NULL, port);
ad_periodic_machine(port, &bond->params);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
ad_churn_machine(port);
/* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
}
re_arm:
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->should_notify) {
should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
break;
}
}
rcu_read_unlock();
spin_unlock_bh(&bond->mode_lock);
if (update_slave_arr)
bond_slave_arr_work_rearm(bond, 0);
if (should_notify_rtnl && rtnl_trylock()) {
bond_slave_state_notify(bond);
rtnl_unlock();
}
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
/**
* bond_3ad_rx_indication - handle a received frame
* @lacpdu: received lacpdu
* @slave: slave struct to work on
*
* It is assumed that frames that were sent on this NIC don't returned as new
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave)
{
struct bonding *bond = slave->bond;
int ret = RX_HANDLER_ANOTHER;
struct bond_marker *marker;
struct port *port;
atomic64_t *stat;
port = &(SLAVE_AD_INFO(slave)->port);
if (!port->slave) {
net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
slave->dev->name, slave->bond->dev->name);
return ret;
}
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
ret = RX_HANDLER_CONSUMED;
slave_dbg(slave->bond->dev, slave->dev,
"Received LACPDU on port %d\n",
port->actor_port_number);
/* Protect against concurrent state machines */
spin_lock(&slave->bond->mode_lock);
ad_rx_machine(lacpdu, port);
spin_unlock(&slave->bond->mode_lock);
break;
case AD_TYPE_MARKER:
ret = RX_HANDLER_CONSUMED;
/* No need to convert fields to Little Endian since we
* don't use the marker's fields.
*/
marker = (struct bond_marker *)lacpdu;
switch (marker->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
slave_dbg(slave->bond->dev, slave->dev, "Received Marker Information on port %d\n",
port->actor_port_number);
ad_marker_info_received(marker, port);
break;
case AD_MARKER_RESPONSE_SUBTYPE:
slave_dbg(slave->bond->dev, slave->dev, "Received Marker Response on port %d\n",
port->actor_port_number);
ad_marker_response_received(marker, port);
break;
default:
slave_dbg(slave->bond->dev, slave->dev, "Received an unknown Marker subtype on port %d\n",
port->actor_port_number);
stat = &SLAVE_AD_INFO(slave)->stats.marker_unknown_rx;
atomic64_inc(stat);
stat = &BOND_AD_INFO(bond).stats.marker_unknown_rx;
atomic64_inc(stat);
}
break;
default:
atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_unknown_rx);
atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_unknown_rx);
}
return ret;
}
/**
* ad_update_actor_keys - Update the oper / admin keys for a port based on
* its current speed and duplex settings.
*
* @port: the port we'are looking at
* @reset: Boolean to just reset the speed and the duplex part of the key
*
* The logic to change the oper / admin keys is:
* (a) A full duplex port can participate in LACP with partner.
* (b) When the speed is changed, LACP need to be reinitiated.
*/
static void ad_update_actor_keys(struct port *port, bool reset)
{
u8 duplex = 0;
u16 ospeed = 0, speed = 0;
u16 old_oper_key = port->actor_oper_port_key;
port->actor_admin_port_key &= ~(AD_SPEED_KEY_MASKS|AD_DUPLEX_KEY_MASKS);
if (!reset) {
speed = __get_link_speed(port);
ospeed = (old_oper_key & AD_SPEED_KEY_MASKS) >> 1;
duplex = __get_duplex(port);
port->actor_admin_port_key |= (speed << 1) | duplex;
}
port->actor_oper_port_key = port->actor_admin_port_key;
if (old_oper_key != port->actor_oper_port_key) {
/* Only 'duplex' port participates in LACP */
if (duplex)
port->sm_vars |= AD_PORT_LACP_ENABLED;
else
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
if (!reset) {
if (!speed) {
slave_err(port->slave->bond->dev,
port->slave->dev,
"speed changed to 0 on port %d\n",
port->actor_port_number);
} else if (duplex && ospeed != speed) {
/* Speed change restarts LACP state-machine */
port->sm_vars |= AD_PORT_BEGIN;
}
}
}
}
/**
* bond_3ad_adapter_speed_duplex_changed - handle a slave's speed / duplex
* change indication
*
* @slave: slave struct to work on
*
* Handle reselection of aggregator (if needed) for this port.
*/
void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
{
struct port *port;
port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
slave_warn(slave->bond->dev, slave->dev,
"speed/duplex changed for uninitialized port\n");
return;
}
spin_lock_bh(&slave->bond->mode_lock);
ad_update_actor_keys(port, false);
spin_unlock_bh(&slave->bond->mode_lock);
slave_dbg(slave->bond->dev, slave->dev, "Port %d changed speed/duplex\n",
port->actor_port_number);
}
/**
* bond_3ad_handle_link_change - handle a slave's link status change indication
* @slave: slave struct to work on
* @link: whether the link is now up or down
*
* Handle reselection of aggregator (if needed) for this port.
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
struct aggregator *agg;
struct port *port;
bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
slave_warn(slave->bond->dev, slave->dev, "link status changed for uninitialized port\n");
return;
}
spin_lock_bh(&slave->bond->mode_lock);
/* on link down we are zeroing duplex and speed since
* some of the adaptors(ce1000.lan) report full duplex/speed
* instead of N/A(duplex) / 0(speed).
*
* on link up we are forcing recheck on the duplex and speed since
* some of he adaptors(ce1000.lan) report.
*/
if (link == BOND_LINK_UP) {
port->is_enabled = true;
ad_update_actor_keys(port, false);
} else {
/* link has failed */
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
agg = __get_first_agg(port);
ad_agg_selection_logic(agg, &dummy);
spin_unlock_bh(&slave->bond->mode_lock);
slave_dbg(slave->bond->dev, slave->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
/* RTNL is held and mode_lock is released so it's safe
* to update slave_array here.
*/
bond_update_slave_arr(slave->bond, NULL);
}
/**
* bond_3ad_set_carrier - set link state for bonding master
* @bond: bonding structure
*
* if we have an active aggregator, we're up, if not, we're down.
* Presumes that we cannot have an active aggregator if there are
* no slaves with link up.
*
* This behavior complies with IEEE 802.3 section 43.3.9.
*
* Called by bond_set_carrier(). Return zero if carrier state does not
* change, nonzero if it does.
*/
int bond_3ad_set_carrier(struct bonding *bond)
{
struct aggregator *active;
struct slave *first_slave;
int ret = 1;
rcu_read_lock();
first_slave = bond_first_slave_rcu(bond);
if (!first_slave) {
ret = 0;
goto out;
}
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;
}
} else if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
goto out;
}
} else if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
}
out:
rcu_read_unlock();
return ret;
}
/**
* __bond_3ad_get_active_agg_info - get information of the active aggregator
* @bond: bonding struct to work on
* @ad_info: ad_info struct to fill with the bond's info
*
* Returns: 0 on success
* < 0 on error
*/
int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct ad_info *ad_info)
{
struct aggregator *aggregator = NULL;
struct list_head *iter;
struct slave *slave;
struct port *port;
bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
if (port->aggregator && port->aggregator->is_active) {
aggregator = port->aggregator;
break;
}
}
if (!aggregator)
return -1;
ad_info->aggregator_id = aggregator->aggregator_identifier;
ad_info->ports = __agg_active_ports(aggregator);
ad_info->actor_key = aggregator->actor_oper_aggregator_key;
ad_info->partner_key = aggregator->partner_oper_aggregator_key;
ether_addr_copy(ad_info->partner_system,
aggregator->partner_system.mac_addr_value);
return 0;
}
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
{
int ret;
rcu_read_lock();
ret = __bond_3ad_get_active_agg_info(bond, ad_info);
rcu_read_unlock();
return ret;
}
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct lacpdu *lacpdu, _lacpdu;
if (skb->protocol != PKT_TYPE_LACPDU)
return RX_HANDLER_ANOTHER;
if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr))
return RX_HANDLER_ANOTHER;
lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
if (!lacpdu) {
atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_illegal_rx);
atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_illegal_rx);
return RX_HANDLER_ANOTHER;
}
return bond_3ad_rx_indication(lacpdu, slave);
}
/**
* bond_3ad_update_lacp_rate - change the lacp rate
* @bond: bonding struct
*
* When modify lacp_rate parameter via sysfs,
* update actor_oper_port_state of each port.
*
* Hold bond->mode_lock,
* so we can modify port->actor_oper_port_state,
* no matter bond is up or down.
*/
void bond_3ad_update_lacp_rate(struct bonding *bond)
{
struct port *port = NULL;
struct list_head *iter;
struct slave *slave;
int lacp_fast;
lacp_fast = bond->params.lacp_fast;
spin_lock_bh(&bond->mode_lock);
bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
if (lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
else
port->actor_oper_port_state &= ~LACP_STATE_LACP_TIMEOUT;
}
spin_unlock_bh(&bond->mode_lock);
}
size_t bond_3ad_stats_size(void)
{
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_TX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_UNKNOWN_RX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_ILLEGAL_RX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_TX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_RX */
nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_TX */
nla_total_size_64bit(sizeof(u64)); /* BOND_3AD_STAT_MARKER_UNKNOWN_RX */
}
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats)
{
u64 val;
val = atomic64_read(&stats->lacpdu_rx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_RX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->lacpdu_tx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_TX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->lacpdu_unknown_rx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_UNKNOWN_RX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->lacpdu_illegal_rx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_ILLEGAL_RX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->marker_rx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->marker_tx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_TX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->marker_resp_rx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_RX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->marker_resp_tx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_TX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
val = atomic64_read(&stats->marker_unknown_rx);
if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_UNKNOWN_RX, val,
BOND_3AD_STAT_PAD))
return -EMSGSIZE;
return 0;
}
| linux-master | drivers/net/bonding/bond_3ad.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/ethtool.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/bonding.h>
#include "bonding_priv.h"
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
loff_t off = 0;
rcu_read_lock();
if (*pos == 0)
return SEQ_START_TOKEN;
bond_for_each_slave_rcu(bond, slave, iter)
if (++off == *pos)
return slave;
return NULL;
}
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
bool found = false;
++*pos;
if (v == SEQ_START_TOKEN)
return bond_first_slave_rcu(bond);
bond_for_each_slave_rcu(bond, slave, iter) {
if (found)
return slave;
if (slave == v)
found = true;
}
return NULL;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = pde_data(file_inode(seq->file));
const struct bond_opt_value *optval;
struct slave *curr, *primary;
int i;
curr = rcu_dereference(bond->curr_active_slave);
seq_printf(seq, "Bonding Mode: %s",
bond_mode_name(BOND_MODE(bond)));
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac) {
optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
seq_printf(seq, " (fail_over_mac %s)", optval->string);
}
seq_printf(seq, "\n");
if (bond_mode_uses_xmit_hash(bond)) {
optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
bond->params.xmit_policy);
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
optval->string, bond->params.xmit_policy);
}
if (bond_uses_primary(bond)) {
primary = rcu_dereference(bond->primary_slave);
seq_printf(seq, "Primary Slave: %s",
primary ? primary->dev->name : "None");
if (primary) {
optval = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
seq_printf(seq, " (primary_reselect %s)",
optval->string);
}
seq_printf(seq, "\nCurrently Active Slave: %s\n",
(curr) ? curr->dev->name : "None");
}
seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
"up" : "down");
seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
seq_printf(seq, "Up Delay (ms): %d\n",
bond->params.updelay * bond->params.miimon);
seq_printf(seq, "Down Delay (ms): %d\n",
bond->params.downdelay * bond->params.miimon);
seq_printf(seq, "Peer Notification Delay (ms): %d\n",
bond->params.peer_notif_delay * bond->params.miimon);
/* ARP information */
if (bond->params.arp_interval > 0) {
int printed = 0;
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
seq_printf(seq, "ARP Missed Max: %u\n",
bond->params.missed_max);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
if (!bond->params.arp_targets[i])
break;
if (printed)
seq_printf(seq, ",");
seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
printed = 1;
}
seq_printf(seq, "\n");
#if IS_ENABLED(CONFIG_IPV6)
printed = 0;
seq_printf(seq, "NS IPv6 target/s (xx::xx form):");
for (i = 0; (i < BOND_MAX_NS_TARGETS); i++) {
if (ipv6_addr_any(&bond->params.ns_targets[i]))
break;
if (printed)
seq_printf(seq, ",");
seq_printf(seq, " %pI6c", &bond->params.ns_targets[i]);
printed = 1;
}
seq_printf(seq, "\n");
#endif
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
seq_puts(seq, "\n802.3ad info\n");
seq_printf(seq, "LACP active: %s\n",
(bond->params.lacp_active) ? "on" : "off");
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
seq_printf(seq, "Min links: %d\n", bond->params.min_links);
optval = bond_opt_get_val(BOND_OPT_AD_SELECT,
bond->params.ad_select);
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
optval->string);
if (capable(CAP_NET_ADMIN)) {
seq_printf(seq, "System priority: %d\n",
BOND_AD_INFO(bond).system.sys_priority);
seq_printf(seq, "System MAC address: %pM\n",
&BOND_AD_INFO(bond).system.sys_mac_addr);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq,
"bond %s has no active aggregator\n",
bond->dev->name);
} else {
seq_printf(seq, "Active Aggregator Info:\n");
seq_printf(seq, "\tAggregator ID: %d\n",
ad_info.aggregator_id);
seq_printf(seq, "\tNumber of ports: %d\n",
ad_info.ports);
seq_printf(seq, "\tActor Key: %d\n",
ad_info.actor_key);
seq_printf(seq, "\tPartner Key: %d\n",
ad_info.partner_key);
seq_printf(seq, "\tPartner Mac Address: %pM\n",
ad_info.partner_system);
}
}
}
}
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
struct bonding *bond = pde_data(file_inode(seq->file));
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
if (slave->speed == SPEED_UNKNOWN)
seq_printf(seq, "Speed: %s\n", "Unknown");
else
seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
if (slave->duplex == DUPLEX_UNKNOWN)
seq_printf(seq, "Duplex: %s\n", "Unknown");
else
seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
seq_printf(seq, "Link Failure Count: %u\n",
slave->link_failure_count);
seq_printf(seq, "Permanent HW addr: %*phC\n",
slave->dev->addr_len, slave->perm_hwaddr);
seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct port *port = &SLAVE_AD_INFO(slave)->port;
const struct aggregator *agg = port->aggregator;
if (agg) {
seq_printf(seq, "Aggregator ID: %d\n",
agg->aggregator_identifier);
seq_printf(seq, "Actor Churn State: %s\n",
bond_3ad_churn_desc(port->sm_churn_actor_state));
seq_printf(seq, "Partner Churn State: %s\n",
bond_3ad_churn_desc(port->sm_churn_partner_state));
seq_printf(seq, "Actor Churned Count: %d\n",
port->churn_actor_count);
seq_printf(seq, "Partner Churned Count: %d\n",
port->churn_partner_count);
if (capable(CAP_NET_ADMIN)) {
seq_puts(seq, "details actor lacp pdu:\n");
seq_printf(seq, " system priority: %d\n",
port->actor_system_priority);
seq_printf(seq, " system mac address: %pM\n",
&port->actor_system);
seq_printf(seq, " port key: %d\n",
port->actor_oper_port_key);
seq_printf(seq, " port priority: %d\n",
port->actor_port_priority);
seq_printf(seq, " port number: %d\n",
port->actor_port_number);
seq_printf(seq, " port state: %d\n",
port->actor_oper_port_state);
seq_puts(seq, "details partner lacp pdu:\n");
seq_printf(seq, " system priority: %d\n",
port->partner_oper.system_priority);
seq_printf(seq, " system mac address: %pM\n",
&port->partner_oper.system);
seq_printf(seq, " oper key: %d\n",
port->partner_oper.key);
seq_printf(seq, " port priority: %d\n",
port->partner_oper.port_priority);
seq_printf(seq, " port number: %d\n",
port->partner_oper.port_number);
seq_printf(seq, " port state: %d\n",
port->partner_oper.port_state);
}
} else {
seq_puts(seq, "Aggregator ID: N/A\n");
}
}
}
static int bond_info_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%s\n", bond_version);
bond_info_show_master(seq);
} else
bond_info_show_slave(seq, v);
return 0;
}
static const struct seq_operations bond_info_seq_ops = {
.start = bond_info_seq_start,
.next = bond_info_seq_next,
.stop = bond_info_seq_stop,
.show = bond_info_seq_show,
};
void bond_create_proc_entry(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
if (bn->proc_dir) {
bond->proc_entry = proc_create_seq_data(bond_dev->name, 0444,
bn->proc_dir, &bond_info_seq_ops, bond);
if (bond->proc_entry == NULL)
netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n",
DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
}
void bond_remove_proc_entry(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
if (bn->proc_dir && bond->proc_entry) {
remove_proc_entry(bond->proc_file_name, bn->proc_dir);
memset(bond->proc_file_name, 0, IFNAMSIZ);
bond->proc_entry = NULL;
}
}
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
pr_warn("Warning: Cannot create /proc/net/%s\n",
DRV_NAME);
}
}
/* Destroy the bonding directory under /proc/net, if empty.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
bn->proc_dir = NULL;
}
}
| linux-master | drivers/net/bonding/bond_procfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Sysfs attributes of bond slaves
*
* Copyright (c) 2014 Scott Feldman <[email protected]>
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/bonding.h>
struct slave_attribute {
struct attribute attr;
ssize_t (*show)(struct slave *, char *);
};
#define SLAVE_ATTR_RO(_name) \
const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
return sysfs_emit(buf, "backup\n");
default:
return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
return sysfs_emit(buf, "%d\n", slave->queue_id);
}
static SLAVE_ATTR_RO(queue_id);
static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
{
const struct aggregator *agg;
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
return sysfs_emit(buf, "%d\n",
agg->aggregator_identifier);
}
return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
{
const struct port *ad_port;
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
{
const struct port *ad_port;
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
static const struct attribute *slave_attrs[] = {
&slave_attr_state.attr,
&slave_attr_mii_status.attr,
&slave_attr_link_failure_count.attr,
&slave_attr_perm_hwaddr.attr,
&slave_attr_queue_id.attr,
&slave_attr_ad_aggregator_id.attr,
&slave_attr_ad_actor_oper_port_state.attr,
&slave_attr_ad_partner_oper_port_state.attr,
NULL
};
#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
static ssize_t slave_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct slave_attribute *slave_attr = to_slave_attr(attr);
struct slave *slave = to_slave(kobj);
return slave_attr->show(slave, buf);
}
const struct sysfs_ops slave_sysfs_ops = {
.show = slave_show,
};
int bond_sysfs_slave_add(struct slave *slave)
{
return sysfs_create_files(&slave->kobj, slave_attrs);
}
void bond_sysfs_slave_del(struct slave *slave)
{
sysfs_remove_files(&slave->kobj, slave_attrs);
}
| linux-master | drivers/net/bonding/bond_sysfs_slave.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/net/bond/bond_netlink.c - Netlink interface for bonding
* Copyright (c) 2013 Jiri Pirko <[email protected]>
* Copyright (c) 2013 Scott Feldman <[email protected]>
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_link.h>
#include <linux/if_ether.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <net/bonding.h>
#include <net/ipv6.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
{
return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
0;
}
static int bond_fill_slave_info(struct sk_buff *skb,
const struct net_device *bond_dev,
const struct net_device *slave_dev)
{
struct slave *slave = bond_slave_get_rtnl(slave_dev);
if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
slave->link_failure_count))
goto nla_put_failure;
if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
slave_dev->addr_len, slave->perm_hwaddr))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
goto nla_put_failure;
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
const struct port *ad_port;
ad_port = &SLAVE_AD_INFO(slave)->port;
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg) {
if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
agg->aggregator_identifier))
goto nla_put_failure;
if (nla_put_u8(skb,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
ad_port->actor_oper_port_state))
goto nla_put_failure;
if (nla_put_u16(skb,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
ad_port->partner_oper.port_state))
goto nla_put_failure;
}
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
/* Limit the max delay range to 300s */
static struct netlink_range_validation delay_range = {
.max = 300000,
};
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
[IFLA_BOND_MIIMON] = { .type = NLA_U32 },
[IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
[IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
[IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
[IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
[IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
[IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
[IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
[IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
[IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
[IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
[IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
[IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
[IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
[IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
[IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
[IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
[IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
[IFLA_BOND_AD_LACP_ACTIVE] = { .type = NLA_U8 },
[IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
[IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
[IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
[IFLA_BOND_AD_ACTOR_SYS_PRIO] = { .type = NLA_U16 },
[IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 },
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
[IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
return 0;
}
static int bond_slave_changelink(struct net_device *bond_dev,
struct net_device *slave_dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_opt_value newval;
int err;
if (!data)
return 0;
if (data[IFLA_BOND_SLAVE_QUEUE_ID]) {
u16 queue_id = nla_get_u16(data[IFLA_BOND_SLAVE_QUEUE_ID]);
char queue_id_str[IFNAMSIZ + 7];
/* queue_id option setting expects slave_name:queue_id */
snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
slave_dev->name, queue_id);
bond_opt_initstr(&newval, queue_id_str);
err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval,
data[IFLA_BOND_SLAVE_QUEUE_ID], extack);
if (err)
return err;
}
if (data[IFLA_BOND_SLAVE_PRIO]) {
int prio = nla_get_s32(data[IFLA_BOND_SLAVE_PRIO]);
bond_opt_slave_initval(&newval, &slave_dev, prio);
err = __bond_opt_set(bond, BOND_OPT_PRIO, &newval,
data[IFLA_BOND_SLAVE_PRIO], extack);
if (err)
return err;
}
return 0;
}
static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_opt_value newval;
int miimon = 0;
int err;
if (!data)
return 0;
if (data[IFLA_BOND_MODE]) {
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
bond_opt_initval(&newval, mode);
err = __bond_opt_set(bond, BOND_OPT_MODE, &newval,
data[IFLA_BOND_MODE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_ACTIVE_SLAVE]) {
int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
struct net_device *slave_dev;
char *active_slave = "";
if (ifindex != 0) {
slave_dev = __dev_get_by_index(dev_net(bond_dev),
ifindex);
if (!slave_dev)
return -ENODEV;
active_slave = slave_dev->name;
}
bond_opt_initstr(&newval, active_slave);
err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval,
data[IFLA_BOND_ACTIVE_SLAVE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_MIIMON]) {
miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
bond_opt_initval(&newval, miimon);
err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval,
data[IFLA_BOND_MIIMON], extack);
if (err)
return err;
}
if (data[IFLA_BOND_UPDELAY]) {
int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
bond_opt_initval(&newval, updelay);
err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval,
data[IFLA_BOND_UPDELAY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_DOWNDELAY]) {
int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
bond_opt_initval(&newval, downdelay);
err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval,
data[IFLA_BOND_DOWNDELAY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_PEER_NOTIF_DELAY]) {
int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]);
bond_opt_initval(&newval, delay);
err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval,
data[IFLA_BOND_PEER_NOTIF_DELAY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_USE_CARRIER]) {
int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
bond_opt_initval(&newval, use_carrier);
err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
data[IFLA_BOND_USE_CARRIER], extack);
if (err)
return err;
}
if (data[IFLA_BOND_ARP_INTERVAL]) {
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
"ARP monitoring cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_interval);
err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval,
data[IFLA_BOND_ARP_INTERVAL], extack);
if (err)
return err;
}
if (data[IFLA_BOND_ARP_IP_TARGET]) {
struct nlattr *attr;
int i = 0, rem;
bond_option_arp_ip_targets_clear(bond);
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
__be32 target;
if (nla_len(attr) < sizeof(target))
return -EINVAL;
target = nla_get_be32(attr);
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
&newval,
data[IFLA_BOND_ARP_IP_TARGET],
extack);
if (err)
break;
i++;
}
if (i == 0 && bond->params.arp_interval)
netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
if (err)
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
if (data[IFLA_BOND_NS_IP6_TARGET]) {
struct nlattr *attr;
int i = 0, rem;
bond_option_ns_ip6_targets_clear(bond);
nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) {
struct in6_addr addr6;
if (nla_len(attr) < sizeof(addr6)) {
NL_SET_ERR_MSG(extack, "Invalid IPv6 address");
return -EINVAL;
}
addr6 = nla_get_in6_addr(attr);
bond_opt_initextra(&newval, &addr6, sizeof(addr6));
err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
&newval,
data[IFLA_BOND_NS_IP6_TARGET],
extack);
if (err)
break;
i++;
}
if (i == 0 && bond->params.arp_interval)
netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n");
if (err)
return err;
}
#endif
if (data[IFLA_BOND_ARP_VALIDATE]) {
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
"ARP validating cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_validate);
err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval,
data[IFLA_BOND_ARP_VALIDATE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
int arp_all_targets =
nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
bond_opt_initval(&newval, arp_all_targets);
err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval,
data[IFLA_BOND_ARP_ALL_TARGETS], extack);
if (err)
return err;
}
if (data[IFLA_BOND_PRIMARY]) {
int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
struct net_device *dev;
char *primary = "";
dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
if (dev)
primary = dev->name;
bond_opt_initstr(&newval, primary);
err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval,
data[IFLA_BOND_PRIMARY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_PRIMARY_RESELECT]) {
int primary_reselect =
nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
bond_opt_initval(&newval, primary_reselect);
err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval,
data[IFLA_BOND_PRIMARY_RESELECT], extack);
if (err)
return err;
}
if (data[IFLA_BOND_FAIL_OVER_MAC]) {
int fail_over_mac =
nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
bond_opt_initval(&newval, fail_over_mac);
err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval,
data[IFLA_BOND_FAIL_OVER_MAC], extack);
if (err)
return err;
}
if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
int xmit_hash_policy =
nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
bond_opt_initval(&newval, xmit_hash_policy);
err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval,
data[IFLA_BOND_XMIT_HASH_POLICY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_RESEND_IGMP]) {
int resend_igmp =
nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
bond_opt_initval(&newval, resend_igmp);
err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval,
data[IFLA_BOND_RESEND_IGMP], extack);
if (err)
return err;
}
if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
int num_peer_notif =
nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
bond_opt_initval(&newval, num_peer_notif);
err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval,
data[IFLA_BOND_NUM_PEER_NOTIF], extack);
if (err)
return err;
}
if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
int all_slaves_active =
nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
bond_opt_initval(&newval, all_slaves_active);
err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval,
data[IFLA_BOND_ALL_SLAVES_ACTIVE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_MIN_LINKS]) {
int min_links =
nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
bond_opt_initval(&newval, min_links);
err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval,
data[IFLA_BOND_MIN_LINKS], extack);
if (err)
return err;
}
if (data[IFLA_BOND_LP_INTERVAL]) {
int lp_interval =
nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
bond_opt_initval(&newval, lp_interval);
err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval,
data[IFLA_BOND_LP_INTERVAL], extack);
if (err)
return err;
}
if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
int packets_per_slave =
nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
bond_opt_initval(&newval, packets_per_slave);
err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval,
data[IFLA_BOND_PACKETS_PER_SLAVE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_AD_LACP_ACTIVE]) {
int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]);
bond_opt_initval(&newval, lacp_active);
err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval,
data[IFLA_BOND_AD_LACP_ACTIVE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_AD_LACP_RATE]) {
int lacp_rate =
nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
bond_opt_initval(&newval, lacp_rate);
err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval,
data[IFLA_BOND_AD_LACP_RATE], extack);
if (err)
return err;
}
if (data[IFLA_BOND_AD_SELECT]) {
int ad_select =
nla_get_u8(data[IFLA_BOND_AD_SELECT]);
bond_opt_initval(&newval, ad_select);
err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval,
data[IFLA_BOND_AD_SELECT], extack);
if (err)
return err;
}
if (data[IFLA_BOND_AD_ACTOR_SYS_PRIO]) {
int actor_sys_prio =
nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
bond_opt_initval(&newval, actor_sys_prio);
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval,
data[IFLA_BOND_AD_ACTOR_SYS_PRIO], extack);
if (err)
return err;
}
if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
int port_key =
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
bond_opt_initval(&newval, port_key);
err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval,
data[IFLA_BOND_AD_USER_PORT_KEY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
return -EINVAL;
bond_opt_initval(&newval,
nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval,
data[IFLA_BOND_AD_ACTOR_SYSTEM], extack);
if (err)
return err;
}
if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
bond_opt_initval(&newval, dynamic_lb);
err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval,
data[IFLA_BOND_TLB_DYNAMIC_LB], extack);
if (err)
return err;
}
if (data[IFLA_BOND_MISSED_MAX]) {
int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
bond_opt_initval(&newval, missed_max);
err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval,
data[IFLA_BOND_MISSED_MAX], extack);
if (err)
return err;
}
return 0;
}
static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
int err;
err = bond_changelink(bond_dev, tb, data, extack);
if (err < 0)
return err;
err = register_netdevice(bond_dev);
if (!err) {
struct bonding *bond = netdev_priv(bond_dev);
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
}
return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
{
return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
/* IFLA_BOND_ARP_IP_TARGET */
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_ACTIVE */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
/* IFLA_BOND_NS_IP6_TARGET */
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
0;
}
static int bond_option_active_slave_get_ifindex(struct bonding *bond)
{
const struct net_device *slave;
int ifindex;
rcu_read_lock();
slave = bond_option_active_slave_get_rcu(bond);
ifindex = slave ? slave->ifindex : 0;
rcu_read_unlock();
return ifindex;
}
static int bond_fill_info(struct sk_buff *skb,
const struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
unsigned int packets_per_slave;
int ifindex, i, targets_added;
struct nlattr *targets;
struct slave *primary;
if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
goto nla_put_failure;
ifindex = bond_option_active_slave_get_ifindex(bond);
if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
bond->params.updelay * bond->params.miimon))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
bond->params.downdelay * bond->params.miimon))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_PEER_NOTIF_DELAY,
bond->params.peer_notif_delay * bond->params.miimon))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
goto nla_put_failure;
targets = nla_nest_start_noflag(skb, IFLA_BOND_ARP_IP_TARGET);
if (!targets)
goto nla_put_failure;
targets_added = 0;
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i]) {
if (nla_put_be32(skb, i, bond->params.arp_targets[i]))
goto nla_put_failure;
targets_added = 1;
}
}
if (targets_added)
nla_nest_end(skb, targets);
else
nla_nest_cancel(skb, targets);
if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
bond->params.arp_all_targets))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET);
if (!targets)
goto nla_put_failure;
targets_added = 0;
for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
if (!ipv6_addr_any(&bond->params.ns_targets[i])) {
if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i]))
goto nla_put_failure;
targets_added = 1;
}
}
if (targets_added)
nla_nest_end(skb, targets);
else
nla_nest_cancel(skb, targets);
#endif
primary = rtnl_dereference(bond->primary_slave);
if (primary &&
nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
bond->params.primary_reselect))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
bond->params.fail_over_mac))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
bond->params.xmit_policy))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
bond->params.resend_igmp))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
bond->params.num_peer_notif))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
bond->params.all_slaves_active))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
bond->params.min_links))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
bond->params.lp_interval))
goto nla_put_failure;
packets_per_slave = bond->params.packets_per_slave;
if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
packets_per_slave))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_AD_LACP_ACTIVE,
bond->params.lacp_active))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
bond->params.lacp_fast))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
bond->params.ad_select))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
bond->params.tlb_dynamic_lb))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX,
bond->params.missed_max))
goto nla_put_failure;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
if (capable(CAP_NET_ADMIN)) {
if (nla_put_u16(skb, IFLA_BOND_AD_ACTOR_SYS_PRIO,
bond->params.ad_actor_sys_prio))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BOND_AD_USER_PORT_KEY,
bond->params.ad_user_port_key))
goto nla_put_failure;
if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
ETH_ALEN, &bond->params.ad_actor_system))
goto nla_put_failure;
}
if (!bond_3ad_get_active_agg_info(bond, &info)) {
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, IFLA_BOND_AD_INFO);
if (!nest)
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
info.aggregator_id))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
info.ports))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
info.actor_key))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
info.partner_key))
goto nla_put_failure;
if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
sizeof(info.partner_system),
&info.partner_system))
goto nla_put_failure;
nla_nest_end(skb, nest);
}
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static size_t bond_get_linkxstats_size(const struct net_device *dev, int attr)
{
switch (attr) {
case IFLA_STATS_LINK_XSTATS:
case IFLA_STATS_LINK_XSTATS_SLAVE:
break;
default:
return 0;
}
return bond_3ad_stats_size() + nla_total_size(0);
}
static int bond_fill_linkxstats(struct sk_buff *skb,
const struct net_device *dev,
int *prividx, int attr)
{
struct nlattr *nla __maybe_unused;
struct slave *slave = NULL;
struct nlattr *nest, *nest2;
struct bonding *bond;
switch (attr) {
case IFLA_STATS_LINK_XSTATS:
bond = netdev_priv(dev);
break;
case IFLA_STATS_LINK_XSTATS_SLAVE:
slave = bond_slave_get_rtnl(dev);
if (!slave)
return 0;
bond = slave->bond;
break;
default:
return -EINVAL;
}
nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BOND);
if (!nest)
return -EMSGSIZE;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct bond_3ad_stats *stats;
if (slave)
stats = &SLAVE_AD_INFO(slave)->stats;
else
stats = &BOND_AD_INFO(bond).stats;
nest2 = nla_nest_start_noflag(skb, BOND_XSTATS_3AD);
if (!nest2) {
nla_nest_end(skb, nest);
return -EMSGSIZE;
}
if (bond_3ad_stats_fill(skb, stats)) {
nla_nest_cancel(skb, nest2);
nla_nest_end(skb, nest);
return -EMSGSIZE;
}
nla_nest_end(skb, nest2);
}
nla_nest_end(skb, nest);
return 0;
}
struct rtnl_link_ops bond_link_ops __read_mostly = {
.kind = "bond",
.priv_size = sizeof(struct bonding),
.setup = bond_setup,
.maxtype = IFLA_BOND_MAX,
.policy = bond_policy,
.validate = bond_validate,
.newlink = bond_newlink,
.changelink = bond_changelink,
.get_size = bond_get_size,
.fill_info = bond_fill_info,
.get_num_tx_queues = bond_get_num_tx_queues,
.get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
as for TX queues */
.fill_linkxstats = bond_fill_linkxstats,
.get_linkxstats_size = bond_get_linkxstats_size,
.slave_maxtype = IFLA_BOND_SLAVE_MAX,
.slave_policy = bond_slave_policy,
.slave_changelink = bond_slave_changelink,
.get_slave_size = bond_get_slave_size,
.fill_slave_info = bond_fill_slave_info,
};
int __init bond_netlink_init(void)
{
return rtnl_link_register(&bond_link_ops);
}
void bond_netlink_fini(void)
{
rtnl_link_unregister(&bond_link_ops);
}
MODULE_ALIAS_RTNL_LINK("bond");
| linux-master | drivers/net/bonding/bond_netlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/net/bond/bond_options.c - bonding options
* Copyright (c) 2013 Jiri Pirko <[email protected]>
* Copyright (c) 2013 Scott Feldman <[email protected]>
*/
#include <linux/errno.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/ctype.h>
#include <linux/inet.h>
#include <linux/sched/signal.h>
#include <net/bonding.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_downdelay_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_peer_notif_delay_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ns_ip6_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_reselect_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_fail_over_mac_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_xmit_hash_policy_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_resend_igmp_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_num_peer_notif_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_all_slaves_active_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_min_links_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_lp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_pps_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_lacp_active_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_lacp_rate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_select_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_queue_id_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
{ "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
{ "balance-xor", BOND_MODE_XOR, 0},
{ "broadcast", BOND_MODE_BROADCAST, 0},
{ "802.3ad", BOND_MODE_8023AD, 0},
{ "balance-tlb", BOND_MODE_TLB, 0},
{ "balance-alb", BOND_MODE_ALB, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_pps_tbl[] = {
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ "maxval", USHRT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
{ "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
{ "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
{ "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
{ "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
{ "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
{ "vlan+srcmac", BOND_XMIT_POLICY_VLAN_SRCMAC, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_arp_validate_tbl[] = {
{ "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
{ "active", BOND_ARP_VALIDATE_ACTIVE, 0},
{ "backup", BOND_ARP_VALIDATE_BACKUP, 0},
{ "all", BOND_ARP_VALIDATE_ALL, 0},
{ "filter", BOND_ARP_FILTER, 0},
{ "filter_active", BOND_ARP_FILTER_ACTIVE, 0},
{ "filter_backup", BOND_ARP_FILTER_BACKUP, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_arp_all_targets_tbl[] = {
{ "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
{ "all", BOND_ARP_TARGETS_ALL, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
{ "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT},
{ "active", BOND_FOM_ACTIVE, 0},
{ "follow", BOND_FOM_FOLLOW, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_intmax_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_lacp_active[] = {
{ "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_lacp_rate_tbl[] = {
{ "slow", AD_LACP_SLOW, 0},
{ "fast", AD_LACP_FAST, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_ad_select_tbl[] = {
{ "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
{ "bandwidth", BOND_AD_BANDWIDTH, 0},
{ "count", BOND_AD_COUNT, 0},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ "off", 0, 0},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
{ "off", 0, 0},
{ "maxval", 300000, BOND_VALFLAG_MAX},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
{ "failure", BOND_PRI_RESELECT_FAILURE, 0},
{ NULL, -1},
};
static const struct bond_opt_value bond_use_carrier_tbl[] = {
{ "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_all_slaves_active_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "on", 1, 0},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_resend_igmp_tbl[] = {
{ "off", 0, 0},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
{ "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN},
{ "maxval", 65535, BOND_VALFLAG_MAX | BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", 1023, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_missed_max_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 2, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0},
};
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
.name = "mode",
.desc = "bond device mode",
.flags = BOND_OPTFLAG_NOSLAVES | BOND_OPTFLAG_IFDOWN,
.values = bond_mode_tbl,
.set = bond_option_mode_set
},
[BOND_OPT_PACKETS_PER_SLAVE] = {
.id = BOND_OPT_PACKETS_PER_SLAVE,
.name = "packets_per_slave",
.desc = "Packets to send per slave in RR mode",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ROUNDROBIN)),
.values = bond_pps_tbl,
.set = bond_option_pps_set
},
[BOND_OPT_XMIT_HASH] = {
.id = BOND_OPT_XMIT_HASH,
.name = "xmit_hash_policy",
.desc = "balance-xor, 802.3ad, and tlb hashing method",
.values = bond_xmit_hashtype_tbl,
.set = bond_option_xmit_hash_policy_set
},
[BOND_OPT_ARP_VALIDATE] = {
.id = BOND_OPT_ARP_VALIDATE,
.name = "arp_validate",
.desc = "validate src/dst of ARP probes",
.unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
BIT(BOND_MODE_ALB),
.values = bond_arp_validate_tbl,
.set = bond_option_arp_validate_set
},
[BOND_OPT_ARP_ALL_TARGETS] = {
.id = BOND_OPT_ARP_ALL_TARGETS,
.name = "arp_all_targets",
.desc = "fail on any/all arp targets timeout",
.values = bond_arp_all_targets_tbl,
.set = bond_option_arp_all_targets_set
},
[BOND_OPT_FAIL_OVER_MAC] = {
.id = BOND_OPT_FAIL_OVER_MAC,
.name = "fail_over_mac",
.desc = "For active-backup, do not set all slaves to the same MAC",
.flags = BOND_OPTFLAG_NOSLAVES,
.values = bond_fail_over_mac_tbl,
.set = bond_option_fail_over_mac_set
},
[BOND_OPT_ARP_INTERVAL] = {
.id = BOND_OPT_ARP_INTERVAL,
.name = "arp_interval",
.desc = "arp interval in milliseconds",
.unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
BIT(BOND_MODE_ALB),
.values = bond_intmax_tbl,
.set = bond_option_arp_interval_set
},
[BOND_OPT_MISSED_MAX] = {
.id = BOND_OPT_MISSED_MAX,
.name = "arp_missed_max",
.desc = "Maximum number of missed ARP interval",
.unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
BIT(BOND_MODE_ALB),
.values = bond_missed_max_tbl,
.set = bond_option_missed_max_set
},
[BOND_OPT_ARP_TARGETS] = {
.id = BOND_OPT_ARP_TARGETS,
.name = "arp_ip_target",
.desc = "arp targets in n.n.n.n form",
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
[BOND_OPT_NS_TARGETS] = {
.id = BOND_OPT_NS_TARGETS,
.name = "ns_ip6_target",
.desc = "NS targets in ffff:ffff::ffff:ffff form",
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_ns_ip6_targets_set
},
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
.desc = "Delay before considering link down, in milliseconds",
.values = bond_intmax_tbl,
.set = bond_option_downdelay_set
},
[BOND_OPT_UPDELAY] = {
.id = BOND_OPT_UPDELAY,
.name = "updelay",
.desc = "Delay before considering link up, in milliseconds",
.values = bond_intmax_tbl,
.set = bond_option_updelay_set
},
[BOND_OPT_LACP_ACTIVE] = {
.id = BOND_OPT_LACP_ACTIVE,
.name = "lacp_active",
.desc = "Send LACPDU frames with configured lacp rate or acts as speak when spoken to",
.flags = BOND_OPTFLAG_IFDOWN,
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
.values = bond_lacp_active,
.set = bond_option_lacp_active_set
},
[BOND_OPT_LACP_RATE] = {
.id = BOND_OPT_LACP_RATE,
.name = "lacp_rate",
.desc = "LACPDU tx rate to request from 802.3ad partner",
.flags = BOND_OPTFLAG_IFDOWN,
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
.values = bond_lacp_rate_tbl,
.set = bond_option_lacp_rate_set
},
[BOND_OPT_MINLINKS] = {
.id = BOND_OPT_MINLINKS,
.name = "min_links",
.desc = "Minimum number of available links before turning on carrier",
.values = bond_intmax_tbl,
.set = bond_option_min_links_set
},
[BOND_OPT_AD_SELECT] = {
.id = BOND_OPT_AD_SELECT,
.name = "ad_select",
.desc = "803.ad aggregation selection logic",
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_select_tbl,
.set = bond_option_ad_select_set
},
[BOND_OPT_NUM_PEER_NOTIF] = {
.id = BOND_OPT_NUM_PEER_NOTIF,
.name = "num_unsol_na",
.desc = "Number of peer notifications to send on failover event",
.values = bond_num_peer_notif_tbl,
.set = bond_option_num_peer_notif_set
},
[BOND_OPT_MIIMON] = {
.id = BOND_OPT_MIIMON,
.name = "miimon",
.desc = "Link check interval in milliseconds",
.values = bond_intmax_tbl,
.set = bond_option_miimon_set
},
[BOND_OPT_PRIO] = {
.id = BOND_OPT_PRIO,
.name = "prio",
.desc = "Link priority for failover re-selection",
.flags = BOND_OPTFLAG_RAWVAL,
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
BIT(BOND_MODE_TLB) |
BIT(BOND_MODE_ALB)),
.set = bond_option_prio_set
},
[BOND_OPT_PRIMARY] = {
.id = BOND_OPT_PRIMARY,
.name = "primary",
.desc = "Primary network device to use",
.flags = BOND_OPTFLAG_RAWVAL,
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
BIT(BOND_MODE_TLB) |
BIT(BOND_MODE_ALB)),
.set = bond_option_primary_set
},
[BOND_OPT_PRIMARY_RESELECT] = {
.id = BOND_OPT_PRIMARY_RESELECT,
.name = "primary_reselect",
.desc = "Reselect primary slave once it comes up",
.values = bond_primary_reselect_tbl,
.set = bond_option_primary_reselect_set
},
[BOND_OPT_USE_CARRIER] = {
.id = BOND_OPT_USE_CARRIER,
.name = "use_carrier",
.desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
.values = bond_use_carrier_tbl,
.set = bond_option_use_carrier_set
},
[BOND_OPT_ACTIVE_SLAVE] = {
.id = BOND_OPT_ACTIVE_SLAVE,
.name = "active_slave",
.desc = "Currently active slave",
.flags = BOND_OPTFLAG_RAWVAL,
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
BIT(BOND_MODE_TLB) |
BIT(BOND_MODE_ALB)),
.set = bond_option_active_slave_set
},
[BOND_OPT_QUEUE_ID] = {
.id = BOND_OPT_QUEUE_ID,
.name = "queue_id",
.desc = "Set queue id of a slave",
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_queue_id_set
},
[BOND_OPT_ALL_SLAVES_ACTIVE] = {
.id = BOND_OPT_ALL_SLAVES_ACTIVE,
.name = "all_slaves_active",
.desc = "Keep all frames received on an interface by setting active flag for all slaves",
.values = bond_all_slaves_active_tbl,
.set = bond_option_all_slaves_active_set
},
[BOND_OPT_RESEND_IGMP] = {
.id = BOND_OPT_RESEND_IGMP,
.name = "resend_igmp",
.desc = "Number of IGMP membership reports to send on link failure",
.values = bond_resend_igmp_tbl,
.set = bond_option_resend_igmp_set
},
[BOND_OPT_LP_INTERVAL] = {
.id = BOND_OPT_LP_INTERVAL,
.name = "lp_interval",
.desc = "The number of seconds between instances where the bonding driver sends learning packets to each slave's peer switch",
.values = bond_lp_interval_tbl,
.set = bond_option_lp_interval_set
},
[BOND_OPT_SLAVES] = {
.id = BOND_OPT_SLAVES,
.name = "slaves",
.desc = "Slave membership management",
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_slaves_set
},
[BOND_OPT_TLB_DYNAMIC_LB] = {
.id = BOND_OPT_TLB_DYNAMIC_LB,
.name = "tlb_dynamic_lb",
.desc = "Enable dynamic flow shuffling",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB) | BIT(BOND_MODE_ALB)),
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
},
[BOND_OPT_AD_ACTOR_SYS_PRIO] = {
.id = BOND_OPT_AD_ACTOR_SYS_PRIO,
.name = "ad_actor_sys_prio",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
[BOND_OPT_AD_ACTOR_SYSTEM] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_ad_actor_system_set,
},
[BOND_OPT_AD_USER_PORT_KEY] = {
.id = BOND_OPT_AD_USER_PORT_KEY,
.name = "ad_user_port_key",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_user_port_key_tbl,
.set = bond_option_ad_user_port_key_set,
},
[BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
.id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
.name = "num_grat_arp",
.desc = "Number of peer notifications to send on failover event",
.values = bond_num_peer_notif_tbl,
.set = bond_option_num_peer_notif_set
},
[BOND_OPT_PEER_NOTIF_DELAY] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};
/* Searches for an option by name */
const struct bond_option *bond_opt_get_by_name(const char *name)
{
const struct bond_option *opt;
int option;
for (option = 0; option < BOND_OPT_LAST; option++) {
opt = bond_opt_get(option);
if (opt && !strcmp(opt->name, name))
return opt;
}
return NULL;
}
/* Searches for a value in opt's values[] table */
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
const struct bond_option *opt;
int i;
opt = bond_opt_get(option);
if (WARN_ON(!opt))
return NULL;
for (i = 0; opt->values && opt->values[i].string; i++)
if (opt->values[i].value == val)
return &opt->values[i];
return NULL;
}
/* Searches for a value in opt's values[] table which matches the flagmask */
static const struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
u32 flagmask)
{
int i;
for (i = 0; opt->values && opt->values[i].string; i++)
if (opt->values[i].flags & flagmask)
return &opt->values[i];
return NULL;
}
/* If maxval is missing then there's no range to check. In case minval is
* missing then it's considered to be 0.
*/
static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
{
const struct bond_opt_value *minval, *maxval;
minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
if (!maxval || (minval && val < minval->value) || val > maxval->value)
return false;
return true;
}
/**
* bond_opt_parse - parse option value
* @opt: the option to parse against
* @val: value to parse
*
* This function tries to extract the value from @val and check if it's
* a possible match for the option and returns NULL if a match isn't found,
* or the struct_opt_value that matched. It also strips the new line from
* @val->string if it's present.
*/
const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
struct bond_opt_value *val)
{
char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
const struct bond_opt_value *tbl;
const struct bond_opt_value *ret = NULL;
bool checkval;
int i, rv;
/* No parsing if the option wants a raw val */
if (opt->flags & BOND_OPTFLAG_RAWVAL)
return val;
tbl = opt->values;
if (!tbl)
goto out;
/* ULLONG_MAX is used to bypass string processing */
checkval = val->value != ULLONG_MAX;
if (!checkval) {
if (!val->string)
goto out;
p = strchr(val->string, '\n');
if (p)
*p = '\0';
for (p = val->string; *p; p++)
if (!(isdigit(*p) || isspace(*p)))
break;
/* The following code extracts the string to match or the value
* and sets checkval appropriately
*/
if (*p) {
rv = sscanf(val->string, "%32s", valstr);
} else {
rv = sscanf(val->string, "%llu", &val->value);
checkval = true;
}
if (!rv)
goto out;
}
for (i = 0; tbl[i].string; i++) {
/* Check for exact match */
if (checkval) {
if (val->value == tbl[i].value)
ret = &tbl[i];
} else {
if (!strcmp(valstr, "default") &&
(tbl[i].flags & BOND_VALFLAG_DEFAULT))
ret = &tbl[i];
if (!strcmp(valstr, tbl[i].string))
ret = &tbl[i];
}
/* Found an exact match */
if (ret)
goto out;
}
/* Possible range match */
if (checkval && bond_opt_check_range(opt, val->value))
ret = val;
out:
return ret;
}
/* Check opt's dependencies against bond mode and currently set options */
static int bond_opt_check_deps(struct bonding *bond,
const struct bond_option *opt)
{
struct bond_params *params = &bond->params;
if (test_bit(params->mode, &opt->unsuppmodes))
return -EACCES;
if ((opt->flags & BOND_OPTFLAG_NOSLAVES) && bond_has_slaves(bond))
return -ENOTEMPTY;
if ((opt->flags & BOND_OPTFLAG_IFDOWN) && (bond->dev->flags & IFF_UP))
return -EBUSY;
return 0;
}
static void bond_opt_dep_print(struct bonding *bond,
const struct bond_option *opt,
struct nlattr *bad_attr,
struct netlink_ext_ack *extack)
{
const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
if (test_bit(params->mode, &opt->unsuppmodes)) {
netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
opt->name, modeval->string, modeval->value);
NL_SET_ERR_MSG_ATTR(extack, bad_attr,
"option not supported in mode");
}
}
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
int error, const struct bond_opt_value *val,
struct nlattr *bad_attr,
struct netlink_ext_ack *extack)
{
const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
case -EINVAL:
NL_SET_ERR_MSG_ATTR(extack, bad_attr, "invalid option value");
if (val) {
if (val->string) {
/* sometimes RAWVAL opts may have new lines */
p = strchr(val->string, '\n');
if (p)
*p = '\0';
netdev_err(bond->dev, "option %s: invalid value (%s)\n",
opt->name, val->string);
} else {
netdev_err(bond->dev, "option %s: invalid value (%llu)\n",
opt->name, val->value);
}
}
minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
if (!maxval)
break;
netdev_err(bond->dev, "option %s: allowed values %llu - %llu\n",
opt->name, minval ? minval->value : 0, maxval->value);
break;
case -EACCES:
bond_opt_dep_print(bond, opt, bad_attr, extack);
break;
case -ENOTEMPTY:
NL_SET_ERR_MSG_ATTR(extack, bad_attr,
"unable to set option because the bond device has slaves");
netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
opt->name);
break;
case -EBUSY:
NL_SET_ERR_MSG_ATTR(extack, bad_attr,
"unable to set option because the bond is up");
netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
opt->name);
break;
case -ENODEV:
if (val && val->string) {
p = strchr(val->string, '\n');
if (p)
*p = '\0';
netdev_err(bond->dev, "option %s: interface %s does not exist!\n",
opt->name, val->string);
NL_SET_ERR_MSG_ATTR(extack, bad_attr,
"interface does not exist");
}
break;
default:
break;
}
}
/**
* __bond_opt_set - set a bonding option
* @bond: target bond device
* @option: option to set
* @val: value to set it to
* @bad_attr: netlink attribue that caused the error
* @extack: extended netlink error structure, used when an error message
* needs to be returned to the caller via netlink
*
* This function is used to change the bond's option value, it can be
* used for both enabling/changing an option and for disabling it. RTNL lock
* must be obtained before calling this function.
*/
int __bond_opt_set(struct bonding *bond,
unsigned int option, struct bond_opt_value *val,
struct nlattr *bad_attr, struct netlink_ext_ack *extack)
{
const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
int ret = -ENOENT;
ASSERT_RTNL();
opt = bond_opt_get(option);
if (WARN_ON(!val) || WARN_ON(!opt))
goto out;
ret = bond_opt_check_deps(bond, opt);
if (ret)
goto out;
retval = bond_opt_parse(opt, val);
if (!retval) {
ret = -EINVAL;
goto out;
}
ret = opt->set(bond, retval);
out:
if (ret)
bond_opt_error_interpret(bond, opt, ret, val, bad_attr, extack);
return ret;
}
/**
* __bond_opt_set_notify - set a bonding option
* @bond: target bond device
* @option: option to set
* @val: value to set it to
*
* This function is used to change the bond's option value and trigger
* a notification to user sapce. It can be used for both enabling/changing
* an option and for disabling it. RTNL lock must be obtained before calling
* this function.
*/
int __bond_opt_set_notify(struct bonding *bond,
unsigned int option, struct bond_opt_value *val)
{
int ret;
ASSERT_RTNL();
ret = __bond_opt_set(bond, option, val, NULL, NULL);
if (!ret && (bond->dev->reg_state == NETREG_REGISTERED))
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
return ret;
}
/**
* bond_opt_tryset_rtnl - try to acquire rtnl and call __bond_opt_set
* @bond: target bond device
* @option: option to set
* @buf: value to set it to
*
* This function tries to acquire RTNL without blocking and if successful
* calls __bond_opt_set. It is mainly used for sysfs option manipulation.
*/
int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
{
struct bond_opt_value optval;
int ret;
if (!rtnl_trylock())
return restart_syscall();
bond_opt_initstr(&optval, buf);
ret = __bond_opt_set_notify(bond, option, &optval);
rtnl_unlock();
return ret;
}
/**
* bond_opt_get - get a pointer to an option
* @option: option for which to return a pointer
*
* This function checks if option is valid and if so returns a pointer
* to its entry in the bond_opts[] option array.
*/
const struct bond_option *bond_opt_get(unsigned int option)
{
if (!BOND_OPT_VALID(option))
return NULL;
return &bond_opts[option];
}
static bool bond_set_xfrm_features(struct bonding *bond)
{
if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
return false;
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond->dev->wanted_features |= BOND_XFRM_FEATURES;
else
bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
return true;
}
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
}
if (!bond->params.miimon) {
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
bond->params.miimon);
}
}
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
if (bond->dev->reg_state == NETREG_REGISTERED) {
bool update = false;
update |= bond_set_xfrm_features(bond);
if (update)
netdev_update_features(bond->dev);
}
bond_xdp_set_features(bond->dev);
return 0;
}
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
char ifname[IFNAMSIZ] = { 0, };
struct net_device *slave_dev;
int ret = 0;
sscanf(newval->string, "%15s", ifname); /* IFNAMSIZ */
if (!strlen(ifname) || newval->string[0] == '\n') {
slave_dev = NULL;
} else {
slave_dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!slave_dev)
return -ENODEV;
}
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
slave_err(bond->dev, slave_dev, "Device is not bonding slave\n");
return -EINVAL;
}
if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
slave_err(bond->dev, slave_dev, "Device is not our slave\n");
return -EINVAL;
}
}
block_netpoll_tx();
/* check to see if we are clearing active */
if (!slave_dev) {
netdev_dbg(bond->dev, "Clearing current active slave\n");
RCU_INIT_POINTER(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
struct slave *new_active = bond_slave_get_rtnl(slave_dev);
BUG_ON(!new_active);
if (new_active == old_active) {
/* do nothing */
slave_dbg(bond->dev, new_active->dev, "is already the current active slave\n");
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
bond_slave_is_up(new_active)) {
slave_dbg(bond->dev, new_active->dev, "Setting as active slave\n");
bond_change_active_slave(bond, new_active);
} else {
slave_err(bond->dev, new_active->dev, "Could not set as active slave; either %s is down or the link is down\n",
new_active->dev->name);
ret = -EINVAL;
}
}
}
unblock_netpoll_tx();
return ret;
}
/* There are two tricky bits here. First, if MII monitoring is activated, then
* we must disable ARP monitoring. Second, if the timer isn't running, we must
* start it.
*/
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting MII monitoring interval to %llu\n",
newval->value);
bond->params.miimon = newval->value;
if (bond->params.updelay)
netdev_dbg(bond->dev, "Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
netdev_dbg(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
bond->params.downdelay * bond->params.miimon);
if (bond->params.peer_notif_delay)
netdev_dbg(bond->dev, "Note: Updating peer_notif_delay (to %d) since it is a multiple of the miimon value\n",
bond->params.peer_notif_delay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
netdev_dbg(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n");
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
* the MII timer. If the interface is down, the
* timer will get fired off when the open function
* is called.
*/
if (!newval->value) {
cancel_delayed_work_sync(&bond->mii_work);
} else {
cancel_delayed_work_sync(&bond->arp_work);
queue_delayed_work(bond->wq, &bond->mii_work, 0);
}
}
return 0;
}
/* Set up, down and peer notification delays. These must be multiples
* of the MII monitoring value, and are stored internally as the
* multiplier. Thus, we must translate to MS for the real world.
*/
static int _bond_option_delay_set(struct bonding *bond,
const struct bond_opt_value *newval,
const char *name,
int *target)
{
int value = newval->value;
if (!bond->params.miimon) {
netdev_err(bond->dev, "Unable to set %s as MII monitoring is disabled\n",
name);
return -EPERM;
}
if ((value % bond->params.miimon) != 0) {
netdev_warn(bond->dev,
"%s (%d) is not a multiple of miimon (%d), value rounded to %d ms\n",
name,
value, bond->params.miimon,
(value / bond->params.miimon) *
bond->params.miimon);
}
*target = value / bond->params.miimon;
netdev_dbg(bond->dev, "Setting %s to %d\n",
name,
*target * bond->params.miimon);
return 0;
}
static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
return _bond_option_delay_set(bond, newval, "up delay",
&bond->params.updelay);
}
static int bond_option_downdelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
return _bond_option_delay_set(bond, newval, "down delay",
&bond->params.downdelay);
}
static int bond_option_peer_notif_delay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
int ret = _bond_option_delay_set(bond, newval,
"peer notification delay",
&bond->params.peer_notif_delay);
return ret;
}
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
newval->value);
bond->params.use_carrier = newval->value;
return 0;
}
/* There are two tricky bits here. First, if ARP monitoring is activated, then
* we must disable MII monitoring. Second, if the ARP timer isn't running,
* we must start it.
*/
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting ARP monitoring interval to %llu\n",
newval->value);
bond->params.arp_interval = newval->value;
if (newval->value) {
if (bond->params.miimon) {
netdev_dbg(bond->dev, "ARP monitoring cannot be used with MII monitoring. Disabling MII monitoring\n");
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
netdev_dbg(bond->dev, "ARP monitoring has been set up, but no ARP targets have been specified\n");
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
* the ARP timer. If the interface is down, the
* timer will get fired off when the open function
* is called.
*/
if (!newval->value) {
if (bond->params.arp_validate)
bond->recv_probe = NULL;
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
bond->recv_probe = bond_rcv_validate;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
}
return 0;
}
static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
__be32 target,
unsigned long last_rx)
{
__be32 *targets = bond->params.arp_targets;
struct list_head *iter;
struct slave *slave;
if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
bond_for_each_slave(bond, slave, iter)
slave->target_last_arp_rx[slot] = last_rx;
targets[slot] = target;
}
}
static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
{
__be32 *targets = bond->params.arp_targets;
int ind;
if (!bond_is_ip_target_ok(target)) {
netdev_err(bond->dev, "invalid ARP target %pI4 specified for addition\n",
&target);
return -EINVAL;
}
if (bond_get_targets_ip(targets, target) != -1) { /* dup */
netdev_err(bond->dev, "ARP target %pI4 is already present\n",
&target);
return -EINVAL;
}
ind = bond_get_targets_ip(targets, 0); /* first free slot */
if (ind == -1) {
netdev_err(bond->dev, "ARP target table is full!\n");
return -EINVAL;
}
netdev_dbg(bond->dev, "Adding ARP target %pI4\n", &target);
_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
return 0;
}
static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
{
return _bond_option_arp_ip_target_add(bond, target);
}
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
{
__be32 *targets = bond->params.arp_targets;
struct list_head *iter;
struct slave *slave;
unsigned long *targets_rx;
int ind, i;
if (!bond_is_ip_target_ok(target)) {
netdev_err(bond->dev, "invalid ARP target %pI4 specified for removal\n",
&target);
return -EINVAL;
}
ind = bond_get_targets_ip(targets, target);
if (ind == -1) {
netdev_err(bond->dev, "unable to remove nonexistent ARP target %pI4\n",
&target);
return -EINVAL;
}
if (ind == 0 && !targets[1] && bond->params.arp_interval)
netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
netdev_dbg(bond->dev, "Removing ARP target %pI4\n", &target);
bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
targets_rx[i] = targets_rx[i+1];
targets_rx[i] = 0;
}
for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
targets[i] = targets[i+1];
targets[i] = 0;
return 0;
}
void bond_option_arp_ip_targets_clear(struct bonding *bond)
{
int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
_bond_options_arp_ip_target_set(bond, i, 0, 0);
}
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
int ret = -EPERM;
__be32 target;
if (newval->string) {
if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
&target);
return ret;
}
if (newval->string[0] == '+')
ret = bond_option_arp_ip_target_add(bond, target);
else if (newval->string[0] == '-')
ret = bond_option_arp_ip_target_rem(bond, target);
else
netdev_err(bond->dev, "no command found in arp_ip_targets file - use +<addr> or -<addr>\n");
} else {
target = newval->value;
ret = bond_option_arp_ip_target_add(bond, target);
}
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct in6_addr *target,
unsigned long last_rx)
{
struct in6_addr *targets = bond->params.ns_targets;
struct list_head *iter;
struct slave *slave;
if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
bond_for_each_slave(bond, slave, iter)
slave->target_last_arp_rx[slot] = last_rx;
targets[slot] = *target;
}
}
void bond_option_ns_ip6_targets_clear(struct bonding *bond)
{
struct in6_addr addr_any = in6addr_any;
int i;
for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
_bond_options_ns_ip6_target_set(bond, i, &addr_any, 0);
}
static int bond_option_ns_ip6_targets_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
struct in6_addr *target = (struct in6_addr *)newval->extra;
struct in6_addr *targets = bond->params.ns_targets;
struct in6_addr addr_any = in6addr_any;
int index;
if (!bond_is_ip6_target_ok(target)) {
netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n",
target);
return -EINVAL;
}
if (bond_get_targets_ip6(targets, target) != -1) { /* dup */
netdev_err(bond->dev, "NS target %pI6c is already present\n",
target);
return -EINVAL;
}
index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */
if (index == -1) {
netdev_err(bond->dev, "NS target table is full!\n");
return -EINVAL;
}
netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target);
_bond_options_ns_ip6_target_set(bond, index, target, jiffies);
return 0;
}
#else
static int bond_option_ns_ip6_targets_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
return -EPERM;
}
#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
bond->params.arp_validate = newval->value;
return 0;
}
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting arp_all_targets to %s (%llu)\n",
newval->string, newval->value);
bond->params.arp_all_targets = newval->value;
return 0;
}
static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n",
newval->string, newval->value);
bond->params.missed_max = newval->value;
return 0;
}
static int bond_option_prio_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
struct slave *slave;
slave = bond_slave_get_rtnl(newval->slave_dev);
if (!slave) {
netdev_dbg(newval->slave_dev, "%s called on NULL slave\n", __func__);
return -ENODEV;
}
slave->prio = newval->value;
if (rtnl_dereference(bond->primary_slave))
slave_warn(bond->dev, slave->dev,
"prio updated, but will not affect failover re-selection as primary slave have been set\n");
else
bond_select_active_slave(bond);
return 0;
}
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
char *p, *primary = newval->string;
struct list_head *iter;
struct slave *slave;
block_netpoll_tx();
p = strchr(primary, '\n');
if (p)
*p = '\0';
/* check to see if we are clearing primary */
if (!strlen(primary)) {
netdev_dbg(bond->dev, "Setting primary slave to None\n");
RCU_INIT_POINTER(bond->primary_slave, NULL);
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
goto out;
}
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
slave_dbg(bond->dev, slave->dev, "Setting as primary slave\n");
rcu_assign_pointer(bond->primary_slave, slave);
strcpy(bond->params.primary, slave->dev->name);
bond->force_primary = true;
bond_select_active_slave(bond);
goto out;
}
}
if (rtnl_dereference(bond->primary_slave)) {
netdev_dbg(bond->dev, "Setting primary slave to None\n");
RCU_INIT_POINTER(bond->primary_slave, NULL);
bond_select_active_slave(bond);
}
strscpy_pad(bond->params.primary, primary, IFNAMSIZ);
netdev_dbg(bond->dev, "Recording %s as primary, but it has not been enslaved yet\n",
primary);
out:
unblock_netpoll_tx();
return 0;
}
static int bond_option_primary_reselect_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting primary_reselect to %s (%llu)\n",
newval->string, newval->value);
bond->params.primary_reselect = newval->value;
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
return 0;
}
static int bond_option_fail_over_mac_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting fail_over_mac to %s (%llu)\n",
newval->string, newval->value);
bond->params.fail_over_mac = newval->value;
return 0;
}
static int bond_option_xmit_hash_policy_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
return 0;
}
static int bond_option_resend_igmp_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting resend_igmp to %llu\n",
newval->value);
bond->params.resend_igmp = newval->value;
return 0;
}
static int bond_option_num_peer_notif_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
bond->params.num_peer_notif = newval->value;
return 0;
}
static int bond_option_all_slaves_active_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
struct list_head *iter;
struct slave *slave;
if (newval->value == bond->params.all_slaves_active)
return 0;
bond->params.all_slaves_active = newval->value;
bond_for_each_slave(bond, slave, iter) {
if (!bond_is_active_slave(slave)) {
if (newval->value)
slave->inactive = 0;
else
slave->inactive = 1;
}
}
return 0;
}
static int bond_option_min_links_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting min links value to %llu\n",
newval->value);
bond->params.min_links = newval->value;
bond_set_carrier(bond);
return 0;
}
static int bond_option_lp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
bond->params.lp_interval = newval->value;
return 0;
}
static int bond_option_pps_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting packets per slave to %llu\n",
newval->value);
bond->params.packets_per_slave = newval->value;
if (newval->value > 0) {
bond->params.reciprocal_packets_per_slave =
reciprocal_value(newval->value);
} else {
/* reciprocal_packets_per_slave is unused if
* packets_per_slave is 0 or 1, just initialize it
*/
bond->params.reciprocal_packets_per_slave =
(struct reciprocal_value) { 0 };
}
return 0;
}
static int bond_option_lacp_active_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_active = newval->value;
return 0;
}
static int bond_option_lacp_rate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting LACP rate to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_fast = newval->value;
bond_3ad_update_lacp_rate(bond);
return 0;
}
static int bond_option_ad_select_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting ad_select to %s (%llu)\n",
newval->string, newval->value);
bond->params.ad_select = newval->value;
return 0;
}
static int bond_option_queue_id_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
struct slave *slave, *update_slave;
struct net_device *sdev;
struct list_head *iter;
char *delim;
int ret = 0;
u16 qid;
/* delim will point to queue id if successful */
delim = strchr(newval->string, ':');
if (!delim)
goto err_no_cmd;
/* Terminate string that points to device name and bump it
* up one, so we can read the queue id there.
*/
*delim = '\0';
if (sscanf(++delim, "%hd\n", &qid) != 1)
goto err_no_cmd;
/* Check buffer length, valid ifname and queue id */
if (!dev_valid_name(newval->string) ||
qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
/* Get the pointer to that interface if it exists */
sdev = __dev_get_by_name(dev_net(bond->dev), newval->string);
if (!sdev)
goto err_no_cmd;
/* Search for thes slave and check for duplicate qids */
update_slave = NULL;
bond_for_each_slave(bond, slave, iter) {
if (sdev == slave->dev)
/* We don't need to check the matching
* slave for dups, since we're overwriting it
*/
update_slave = slave;
else if (qid && qid == slave->queue_id) {
goto err_no_cmd;
}
}
if (!update_slave)
goto err_no_cmd;
/* Actually set the qids for the slave */
update_slave->queue_id = qid;
out:
return ret;
err_no_cmd:
netdev_dbg(bond->dev, "invalid input for queue_id set\n");
ret = -EPERM;
goto out;
}
static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
char command[IFNAMSIZ + 1] = { 0, };
struct net_device *dev;
char *ifname;
int ret;
sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/
ifname = command + 1;
if ((strlen(command) <= 1) ||
(command[0] != '+' && command[0] != '-') ||
!dev_valid_name(ifname))
goto err_no_cmd;
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
netdev_dbg(bond->dev, "interface %s does not exist!\n",
ifname);
ret = -ENODEV;
goto out;
}
switch (command[0]) {
case '+':
slave_dbg(bond->dev, dev, "Enslaving interface\n");
ret = bond_enslave(bond->dev, dev, NULL);
break;
case '-':
slave_dbg(bond->dev, dev, "Releasing interface\n");
ret = bond_release(bond->dev, dev);
break;
default:
/* should not run here. */
goto err_no_cmd;
}
out:
return ret;
err_no_cmd:
netdev_err(bond->dev, "no command found in slaves file - use +ifname or -ifname\n");
ret = -EPERM;
goto out;
}
static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting dynamic-lb to %s (%llu)\n",
newval->string, newval->value);
bond->params.tlb_dynamic_lb = newval->value;
return 0;
}
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
newval->value);
bond->params.ad_actor_sys_prio = newval->value;
bond_3ad_update_ad_actor_settings(bond);
return 0;
}
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
u8 macaddr[ETH_ALEN];
u8 *mac;
if (newval->string) {
if (!mac_pton(newval->string, macaddr))
goto err;
mac = macaddr;
} else {
mac = (u8 *)&newval->value;
}
if (is_multicast_ether_addr(mac))
goto err;
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
ether_addr_copy(bond->params.ad_actor_system, mac);
bond_3ad_update_ad_actor_settings(bond);
return 0;
err:
netdev_err(bond->dev, "Invalid ad_actor_system MAC address.\n");
return -EINVAL;
}
static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
netdev_dbg(bond->dev, "Setting ad_user_port_key to %llu\n",
newval->value);
bond->params.ad_user_port_key = newval->value;
return 0;
}
| linux-master | drivers/net/bonding/bond_options.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
/* PLIP: A parallel port "network" driver for Linux. */
/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
/*
* Authors: Donald Becker <[email protected]>
* Tommy Thorn <[email protected]>
* Tanabe Hiroyasu <[email protected]>
* Alan Cox <[email protected]>
* Peter Bauer <[email protected]>
* Niibe Yutaka <[email protected]>
* Nimrod Zimerman <[email protected]>
*
* Enhancements:
* Modularization and ifreq/ifmap support by Alan Cox.
* Rewritten by Niibe Yutaka.
* parport-sharing awareness code by Philip Blundell.
* SMP locking by Niibe Yutaka.
* Support for parallel ports with no IRQ (poll mode),
* Modifications to use the parallel port API
* by Nimrod Zimerman.
*
* Fixes:
* Niibe Yutaka
* - Module initialization.
* - MTU fix.
* - Make sure other end is OK, before sending a packet.
* - Fix immediate timer problem.
*
* Al Viro
* - Changed {enable,disable}_irq handling to make it work
* with new ("stack") semantics.
*/
/*
* Original version and the name 'PLIP' from Donald Becker <[email protected]>
* inspired by Russ Nelson's parallel port packet driver.
*
* NOTE:
* Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
* Because of the necessity to communicate to DOS machines with the
* Crynwr packet driver, Peter Bauer changed the protocol again
* back to original protocol.
*
* This version follows original PLIP protocol.
* So, this PLIP can't communicate the PLIP of Linux v1.0.
*/
/*
* To use with DOS box, please do (Turn on ARP switch):
* # ifconfig plip[0-2] arp
*/
static const char version[] = "NET3 PLIP version 2.4-parport [email protected]\n";
/*
Sources:
Ideas and protocols came from Russ Nelson's <[email protected]>
"parallel.asm" parallel port packet driver.
The "Crynwr" parallel port standard specifies the following protocol:
Trigger by sending nibble '0x8' (this causes interrupt on other end)
count-low octet
count-high octet
... data octets
checksum octet
Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
The packet is encapsulated as if it were ethernet.
The cable used is a de facto standard parallel null cable -- sold as
a "LapLink" cable by various places. You'll need a 12-conductor cable to
make one yourself. The wiring is:
SLCTIN 17 - 17
GROUND 25 - 25
D0->ERROR 2 - 15 15 - 2
D1->SLCT 3 - 13 13 - 3
D2->PAPOUT 4 - 12 12 - 4
D3->ACK 5 - 10 10 - 5
D4->BUSY 6 - 11 11 - 6
Do not connect the other pins. They are
D5,D6,D7 are 7,8,9
STROBE is 1, FEED is 14, INIT is 16
extra grounds are 18,19,20,21,22,23,24
*/
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <linux/if_plip.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/parport.h>
#include <linux/bitops.h>
#include <net/neighbour.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
/* Maximum number of devices to support. */
#define PLIP_MAX 8
/* Use 0 for production, 1 for verification, >2 for debug */
#ifndef NET_DEBUG
#define NET_DEBUG 1
#endif
static const unsigned int net_debug = NET_DEBUG;
#define ENABLE(irq) if (irq != -1) enable_irq(irq)
#define DISABLE(irq) if (irq != -1) disable_irq(irq)
/* In micro second */
#define PLIP_DELAY_UNIT 1
/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
#define PLIP_TRIGGER_WAIT 500
/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
#define PLIP_NIBBLE_WAIT 3000
/* Bottom halves */
static void plip_kick_bh(struct work_struct *work);
static void plip_bh(struct work_struct *work);
static void plip_timer_bh(struct work_struct *work);
/* Interrupt handler */
static void plip_interrupt(void *dev_id);
/* Functions for DEV methods */
static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
static int plip_hard_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type);
static int plip_open(struct net_device *dev);
static int plip_close(struct net_device *dev);
static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd);
static int plip_preempt(void *handle);
static void plip_wakeup(void *handle);
enum plip_connection_state {
PLIP_CN_NONE=0,
PLIP_CN_RECEIVE,
PLIP_CN_SEND,
PLIP_CN_CLOSING,
PLIP_CN_ERROR
};
enum plip_packet_state {
PLIP_PK_DONE=0,
PLIP_PK_TRIGGER,
PLIP_PK_LENGTH_LSB,
PLIP_PK_LENGTH_MSB,
PLIP_PK_DATA,
PLIP_PK_CHECKSUM
};
enum plip_nibble_state {
PLIP_NB_BEGIN,
PLIP_NB_1,
PLIP_NB_2,
};
struct plip_local {
enum plip_packet_state state;
enum plip_nibble_state nibble;
union {
struct {
#if defined(__LITTLE_ENDIAN)
unsigned char lsb;
unsigned char msb;
#elif defined(__BIG_ENDIAN)
unsigned char msb;
unsigned char lsb;
#else
#error "Please fix the endianness defines in <asm/byteorder.h>"
#endif
} b;
unsigned short h;
} length;
unsigned short byte;
unsigned char checksum;
unsigned char data;
struct sk_buff *skb;
};
struct net_local {
struct net_device *dev;
struct work_struct immediate;
struct delayed_work deferred;
struct delayed_work timer;
struct plip_local snd_data;
struct plip_local rcv_data;
struct pardevice *pardev;
unsigned long trigger;
unsigned long nibble;
enum plip_connection_state connection;
unsigned short timeout_count;
int is_deferred;
int port_owner;
int should_relinquish;
spinlock_t lock;
atomic_t kill_timer;
struct completion killed_timer_cmp;
};
static inline void enable_parport_interrupts (struct net_device *dev)
{
if (dev->irq != -1)
{
struct parport *port =
((struct net_local *)netdev_priv(dev))->pardev->port;
port->ops->enable_irq (port);
}
}
static inline void disable_parport_interrupts (struct net_device *dev)
{
if (dev->irq != -1)
{
struct parport *port =
((struct net_local *)netdev_priv(dev))->pardev->port;
port->ops->disable_irq (port);
}
}
static inline void write_data (struct net_device *dev, unsigned char data)
{
struct parport *port =
((struct net_local *)netdev_priv(dev))->pardev->port;
port->ops->write_data (port, data);
}
static inline unsigned char read_status (struct net_device *dev)
{
struct parport *port =
((struct net_local *)netdev_priv(dev))->pardev->port;
return port->ops->read_status (port);
}
static const struct header_ops plip_header_ops = {
.create = plip_hard_header,
.cache = plip_hard_header_cache,
};
static const struct net_device_ops plip_netdev_ops = {
.ndo_open = plip_open,
.ndo_stop = plip_close,
.ndo_start_xmit = plip_tx_packet,
.ndo_siocdevprivate = plip_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/* Entry point of PLIP driver.
Probe the hardware, and register/initialize the driver.
PLIP is rather weird, because of the way it interacts with the parport
system. It is _not_ initialised from Space.c. Instead, plip_init()
is called, and that function makes up a "struct net_device" for each port, and
then calls us here.
*/
static void
plip_init_netdev(struct net_device *dev)
{
static const u8 addr_init[ETH_ALEN] = {
0xfc, 0xfc, 0xfc,
0xfc, 0xfc, 0xfc,
};
struct net_local *nl = netdev_priv(dev);
/* Then, override parts of it */
dev->tx_queue_len = 10;
dev->flags = IFF_POINTOPOINT|IFF_NOARP;
eth_hw_addr_set(dev, addr_init);
dev->netdev_ops = &plip_netdev_ops;
dev->header_ops = &plip_header_ops;
nl->port_owner = 0;
/* Initialize constants */
nl->trigger = PLIP_TRIGGER_WAIT;
nl->nibble = PLIP_NIBBLE_WAIT;
/* Initialize task queue structures */
INIT_WORK(&nl->immediate, plip_bh);
INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
if (dev->irq == -1)
INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
spin_lock_init(&nl->lock);
}
/* Bottom half handler for the delayed request.
This routine is kicked by do_timer().
Request `plip_bh' to be invoked. */
static void
plip_kick_bh(struct work_struct *work)
{
struct net_local *nl =
container_of(work, struct net_local, deferred.work);
if (nl->is_deferred)
schedule_work(&nl->immediate);
}
/* Forward declarations of internal routines */
static int plip_none(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_receive_packet(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_send_packet(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_connection_close(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_error(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
struct plip_local *snd,
struct plip_local *rcv,
int error);
#define OK 0
#define TIMEOUT 1
#define ERROR 2
#define HS_TIMEOUT 3
typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv);
static const plip_func connection_state_table[] =
{
plip_none,
plip_receive_packet,
plip_send_packet,
plip_connection_close,
plip_error
};
/* Bottom half handler of PLIP. */
static void
plip_bh(struct work_struct *work)
{
struct net_local *nl = container_of(work, struct net_local, immediate);
struct plip_local *snd = &nl->snd_data;
struct plip_local *rcv = &nl->rcv_data;
plip_func f;
int r;
nl->is_deferred = 0;
f = connection_state_table[nl->connection];
if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
(r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
}
}
static void
plip_timer_bh(struct work_struct *work)
{
struct net_local *nl =
container_of(work, struct net_local, timer.work);
if (!(atomic_read (&nl->kill_timer))) {
plip_interrupt (nl->dev);
schedule_delayed_work(&nl->timer, 1);
}
else {
complete(&nl->killed_timer_cmp);
}
}
static int
plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv,
int error)
{
unsigned char c0;
/*
* This is tricky. If we got here from the beginning of send (either
* with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
* already disabled. With the old variant of {enable,disable}_irq()
* extra disable_irq() was a no-op. Now it became mortal - it's
* unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
* that is). So we have to treat HS_TIMEOUT and ERROR from send
* in a special way.
*/
spin_lock_irq(&nl->lock);
if (nl->connection == PLIP_CN_SEND) {
if (error != ERROR) { /* Timeout */
nl->timeout_count++;
if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
nl->timeout_count <= 3) {
spin_unlock_irq(&nl->lock);
/* Try again later */
return TIMEOUT;
}
c0 = read_status(dev);
printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
dev->name, snd->state, c0);
} else
error = HS_TIMEOUT;
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
} else if (nl->connection == PLIP_CN_RECEIVE) {
if (rcv->state == PLIP_PK_TRIGGER) {
/* Transmission was interrupted. */
spin_unlock_irq(&nl->lock);
return OK;
}
if (error != ERROR) { /* Timeout */
if (++nl->timeout_count <= 3) {
spin_unlock_irq(&nl->lock);
/* Try again later */
return TIMEOUT;
}
c0 = read_status(dev);
printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
dev->name, rcv->state, c0);
}
dev->stats.rx_dropped++;
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
dev_kfree_skb_irq(rcv->skb);
rcv->skb = NULL;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
dev_consume_skb_irq(snd->skb);
snd->skb = NULL;
}
spin_unlock_irq(&nl->lock);
if (error == HS_TIMEOUT) {
DISABLE(dev->irq);
synchronize_irq(dev->irq);
}
disable_parport_interrupts (dev);
netif_stop_queue (dev);
nl->connection = PLIP_CN_ERROR;
write_data (dev, 0x00);
return TIMEOUT;
}
static int
plip_none(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
return OK;
}
/* PLIP_RECEIVE --- receive a byte(two nibbles)
Returns OK on success, TIMEOUT on timeout */
static inline int
plip_receive(unsigned short nibble_timeout, struct net_device *dev,
enum plip_nibble_state *ns_p, unsigned char *data_p)
{
unsigned char c0, c1;
unsigned int cx;
switch (*ns_p) {
case PLIP_NB_BEGIN:
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
udelay(PLIP_DELAY_UNIT);
if ((c0 & 0x80) == 0) {
c1 = read_status(dev);
if (c0 == c1)
break;
}
if (--cx == 0)
return TIMEOUT;
}
*data_p = (c0 >> 3) & 0x0f;
write_data (dev, 0x10); /* send ACK */
*ns_p = PLIP_NB_1;
fallthrough;
case PLIP_NB_1:
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
udelay(PLIP_DELAY_UNIT);
if (c0 & 0x80) {
c1 = read_status(dev);
if (c0 == c1)
break;
}
if (--cx == 0)
return TIMEOUT;
}
*data_p |= (c0 << 1) & 0xf0;
write_data (dev, 0x00); /* send ACK */
*ns_p = PLIP_NB_BEGIN;
break;
case PLIP_NB_2:
break;
}
return OK;
}
/*
* Determine the packet's protocol ID. The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*
* PLIP is ethernet ish but the daddr might not be valid if unicast.
* PLIP fortunately has no bus architecture (its Point-to-point).
*
* We can't fix the daddr thing as that quirk (more bug) is embedded
* in far too many old systems not all even running Linux.
*/
static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct ethhdr *eth;
unsigned char *rawp;
skb_reset_mac_header(skb);
skb_pull(skb,dev->hard_header_len);
eth = eth_hdr(skb);
if(is_multicast_ether_addr(eth->h_dest))
{
if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
}
/*
* This ALLMULTI check should be redundant by 1.4
* so don't forget to remove it.
*/
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
return eth->h_proto;
rawp = skb->data;
/*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
if (*(unsigned short *)rawp == 0xFFFF)
return htons(ETH_P_802_3);
/*
* Real 802.2 LLC
*/
return htons(ETH_P_802_2);
}
/* PLIP_RECEIVE_PACKET --- receive a packet */
static int
plip_receive_packet(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
unsigned short nibble_timeout = nl->nibble;
unsigned char *lbuf;
switch (rcv->state) {
case PLIP_PK_TRIGGER:
DISABLE(dev->irq);
/* Don't need to synchronize irq, as we can safely ignore it */
disable_parport_interrupts (dev);
write_data (dev, 0x01); /* send ACK */
if (net_debug > 2)
printk(KERN_DEBUG "%s: receive start\n", dev->name);
rcv->state = PLIP_PK_LENGTH_LSB;
rcv->nibble = PLIP_NB_BEGIN;
fallthrough;
case PLIP_PK_LENGTH_LSB:
if (snd->state != PLIP_PK_DONE) {
if (plip_receive(nl->trigger, dev,
&rcv->nibble, &rcv->length.b.lsb)) {
/* collision, here dev->tbusy == 1 */
rcv->state = PLIP_PK_DONE;
nl->is_deferred = 1;
nl->connection = PLIP_CN_SEND;
schedule_delayed_work(&nl->deferred, 1);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
}
} else {
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &rcv->length.b.lsb))
return TIMEOUT;
}
rcv->state = PLIP_PK_LENGTH_MSB;
fallthrough;
case PLIP_PK_LENGTH_MSB:
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &rcv->length.b.msb))
return TIMEOUT;
if (rcv->length.h > dev->mtu + dev->hard_header_len ||
rcv->length.h < 8) {
printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
return ERROR;
}
/* Malloc up new buffer. */
rcv->skb = dev_alloc_skb(rcv->length.h + 2);
if (rcv->skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
return ERROR;
}
skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
skb_put(rcv->skb,rcv->length.h);
rcv->skb->dev = dev;
rcv->state = PLIP_PK_DATA;
rcv->byte = 0;
rcv->checksum = 0;
fallthrough;
case PLIP_PK_DATA:
lbuf = rcv->skb->data;
do {
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &lbuf[rcv->byte]))
return TIMEOUT;
} while (++rcv->byte < rcv->length.h);
do {
rcv->checksum += lbuf[--rcv->byte];
} while (rcv->byte);
rcv->state = PLIP_PK_CHECKSUM;
fallthrough;
case PLIP_PK_CHECKSUM:
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &rcv->data))
return TIMEOUT;
if (rcv->data != rcv->checksum) {
dev->stats.rx_crc_errors++;
if (net_debug)
printk(KERN_DEBUG "%s: checksum error\n", dev->name);
return ERROR;
}
rcv->state = PLIP_PK_DONE;
fallthrough;
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
netif_rx(rcv->skb);
dev->stats.rx_bytes += rcv->length.h;
dev->stats.rx_packets++;
rcv->skb = NULL;
if (net_debug > 2)
printk(KERN_DEBUG "%s: receive end\n", dev->name);
/* Close the connection. */
write_data (dev, 0x00);
spin_lock_irq(&nl->lock);
if (snd->state != PLIP_PK_DONE) {
nl->connection = PLIP_CN_SEND;
spin_unlock_irq(&nl->lock);
schedule_work(&nl->immediate);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
} else {
nl->connection = PLIP_CN_NONE;
spin_unlock_irq(&nl->lock);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
}
}
return OK;
}
/* PLIP_SEND --- send a byte (two nibbles)
Returns OK on success, TIMEOUT when timeout */
static inline int
plip_send(unsigned short nibble_timeout, struct net_device *dev,
enum plip_nibble_state *ns_p, unsigned char data)
{
unsigned char c0;
unsigned int cx;
switch (*ns_p) {
case PLIP_NB_BEGIN:
write_data (dev, data & 0x0f);
*ns_p = PLIP_NB_1;
fallthrough;
case PLIP_NB_1:
write_data (dev, 0x10 | (data & 0x0f));
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
if ((c0 & 0x80) == 0)
break;
if (--cx == 0)
return TIMEOUT;
udelay(PLIP_DELAY_UNIT);
}
write_data (dev, 0x10 | (data >> 4));
*ns_p = PLIP_NB_2;
fallthrough;
case PLIP_NB_2:
write_data (dev, (data >> 4));
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
if (c0 & 0x80)
break;
if (--cx == 0)
return TIMEOUT;
udelay(PLIP_DELAY_UNIT);
}
*ns_p = PLIP_NB_BEGIN;
return OK;
}
return OK;
}
/* PLIP_SEND_PACKET --- send a packet */
static int
plip_send_packet(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
unsigned short nibble_timeout = nl->nibble;
unsigned char *lbuf;
unsigned char c0;
unsigned int cx;
if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
snd->state = PLIP_PK_DONE;
snd->skb = NULL;
return ERROR;
}
switch (snd->state) {
case PLIP_PK_TRIGGER:
if ((read_status(dev) & 0xf8) != 0x80)
return HS_TIMEOUT;
/* Trigger remote rx interrupt. */
write_data (dev, 0x08);
cx = nl->trigger;
while (1) {
udelay(PLIP_DELAY_UNIT);
spin_lock_irq(&nl->lock);
if (nl->connection == PLIP_CN_RECEIVE) {
spin_unlock_irq(&nl->lock);
/* Interrupted. */
dev->stats.collisions++;
return OK;
}
c0 = read_status(dev);
if (c0 & 0x08) {
spin_unlock_irq(&nl->lock);
DISABLE(dev->irq);
synchronize_irq(dev->irq);
if (nl->connection == PLIP_CN_RECEIVE) {
/* Interrupted.
We don't need to enable irq,
as it is soon disabled. */
/* Yes, we do. New variant of
{enable,disable}_irq *counts*
them. -- AV */
ENABLE(dev->irq);
dev->stats.collisions++;
return OK;
}
disable_parport_interrupts (dev);
if (net_debug > 2)
printk(KERN_DEBUG "%s: send start\n", dev->name);
snd->state = PLIP_PK_LENGTH_LSB;
snd->nibble = PLIP_NB_BEGIN;
nl->timeout_count = 0;
break;
}
spin_unlock_irq(&nl->lock);
if (--cx == 0) {
write_data (dev, 0x00);
return HS_TIMEOUT;
}
}
break;
case PLIP_PK_LENGTH_LSB:
if (plip_send(nibble_timeout, dev,
&snd->nibble, snd->length.b.lsb))
return TIMEOUT;
snd->state = PLIP_PK_LENGTH_MSB;
fallthrough;
case PLIP_PK_LENGTH_MSB:
if (plip_send(nibble_timeout, dev,
&snd->nibble, snd->length.b.msb))
return TIMEOUT;
snd->state = PLIP_PK_DATA;
snd->byte = 0;
snd->checksum = 0;
fallthrough;
case PLIP_PK_DATA:
do {
if (plip_send(nibble_timeout, dev,
&snd->nibble, lbuf[snd->byte]))
return TIMEOUT;
} while (++snd->byte < snd->length.h);
do {
snd->checksum += lbuf[--snd->byte];
} while (snd->byte);
snd->state = PLIP_PK_CHECKSUM;
fallthrough;
case PLIP_PK_CHECKSUM:
if (plip_send(nibble_timeout, dev,
&snd->nibble, snd->checksum))
return TIMEOUT;
dev->stats.tx_bytes += snd->skb->len;
dev_kfree_skb(snd->skb);
dev->stats.tx_packets++;
snd->state = PLIP_PK_DONE;
fallthrough;
case PLIP_PK_DONE:
/* Close the connection */
write_data (dev, 0x00);
snd->skb = NULL;
if (net_debug > 2)
printk(KERN_DEBUG "%s: send end\n", dev->name);
nl->connection = PLIP_CN_CLOSING;
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
}
return OK;
}
static int
plip_connection_close(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
spin_lock_irq(&nl->lock);
if (nl->connection == PLIP_CN_CLOSING) {
nl->connection = PLIP_CN_NONE;
netif_wake_queue (dev);
}
spin_unlock_irq(&nl->lock);
if (nl->should_relinquish) {
nl->should_relinquish = nl->port_owner = 0;
parport_release(nl->pardev);
}
return OK;
}
/* PLIP_ERROR --- wait till other end settled */
static int
plip_error(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
unsigned char status;
status = read_status(dev);
if ((status & 0xf8) == 0x80) {
if (net_debug > 2)
printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
nl->connection = PLIP_CN_NONE;
nl->should_relinquish = 0;
netif_start_queue (dev);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
netif_wake_queue (dev);
} else {
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
}
return OK;
}
/* Handle the parallel port interrupts. */
static void
plip_interrupt(void *dev_id)
{
struct net_device *dev = dev_id;
struct net_local *nl;
struct plip_local *rcv;
unsigned char c0;
unsigned long flags;
nl = netdev_priv(dev);
rcv = &nl->rcv_data;
spin_lock_irqsave (&nl->lock, flags);
c0 = read_status(dev);
if ((c0 & 0xf8) != 0xc0) {
if ((dev->irq != -1) && (net_debug > 1))
printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
spin_unlock_irqrestore (&nl->lock, flags);
return;
}
if (net_debug > 3)
printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
switch (nl->connection) {
case PLIP_CN_CLOSING:
netif_wake_queue (dev);
fallthrough;
case PLIP_CN_NONE:
case PLIP_CN_SEND:
rcv->state = PLIP_PK_TRIGGER;
nl->connection = PLIP_CN_RECEIVE;
nl->timeout_count = 0;
schedule_work(&nl->immediate);
break;
case PLIP_CN_RECEIVE:
/* May occur because there is race condition
around test and set of dev->interrupt.
Ignore this interrupt. */
break;
case PLIP_CN_ERROR:
printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
break;
}
spin_unlock_irqrestore(&nl->lock, flags);
}
static netdev_tx_t
plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct plip_local *snd = &nl->snd_data;
if (netif_queue_stopped(dev))
return NETDEV_TX_BUSY;
/* We may need to grab the bus */
if (!nl->port_owner) {
if (parport_claim(nl->pardev))
return NETDEV_TX_BUSY;
nl->port_owner = 1;
}
netif_stop_queue (dev);
if (skb->len > dev->mtu + dev->hard_header_len) {
printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
netif_start_queue (dev);
return NETDEV_TX_BUSY;
}
if (net_debug > 2)
printk(KERN_DEBUG "%s: send request\n", dev->name);
spin_lock_irq(&nl->lock);
snd->skb = skb;
snd->length.h = skb->len;
snd->state = PLIP_PK_TRIGGER;
if (nl->connection == PLIP_CN_NONE) {
nl->connection = PLIP_CN_SEND;
nl->timeout_count = 0;
}
schedule_work(&nl->immediate);
spin_unlock_irq(&nl->lock);
return NETDEV_TX_OK;
}
static void
plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
{
const struct in_device *in_dev;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
/* Any address will do - we take the first */
const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa) {
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memset(eth->h_dest, 0xfc, 2);
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
}
rcu_read_unlock();
}
static int
plip_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
int ret;
ret = eth_header(skb, dev, type, daddr, saddr, len);
if (ret >= 0)
plip_rewrite_address (dev, (struct ethhdr *)skb->data);
return ret;
}
static int plip_hard_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type)
{
int ret;
ret = eth_header_cache(neigh, hh, type);
if (ret == 0) {
struct ethhdr *eth;
eth = (struct ethhdr*)(((u8*)hh->hh_data) +
HH_DATA_OFF(sizeof(*eth)));
plip_rewrite_address (neigh->dev, eth);
}
return ret;
}
/* Open/initialize the board. This is called (in the current kernel)
sometime after booting when the 'ifconfig' program is run.
This routine gets exclusive access to the parallel port by allocating
its IRQ line.
*/
static int
plip_open(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct in_device *in_dev;
/* Grab the port */
if (!nl->port_owner) {
if (parport_claim(nl->pardev)) return -EAGAIN;
nl->port_owner = 1;
}
nl->should_relinquish = 0;
/* Clear the data port. */
write_data (dev, 0x00);
/* Enable rx interrupt. */
enable_parport_interrupts (dev);
if (dev->irq == -1)
{
atomic_set (&nl->kill_timer, 0);
schedule_delayed_work(&nl->timer, 1);
}
/* Initialize the state machine. */
nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
nl->rcv_data.skb = nl->snd_data.skb = NULL;
nl->connection = PLIP_CN_NONE;
nl->is_deferred = 0;
/* Fill in the MAC-level header.
We used to abuse dev->broadcast to store the point-to-point
MAC address, but we no longer do it. Instead, we fetch the
interface address whenever it is needed, which is cheap enough
because we use the hh_cache. Actually, abusing dev->broadcast
didn't work, because when using plip_open the point-to-point
address isn't yet known.
PLIP doesn't have a real MAC address, but we need it to be
DOS compatible, and to properly support taps (otherwise,
when the device address isn't identical to the address of a
received frame, the kernel incorrectly drops it). */
in_dev=__in_dev_get_rtnl(dev);
if (in_dev) {
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
}
netif_start_queue (dev);
return 0;
}
/* The inverse routine to plip_open (). */
static int
plip_close(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct plip_local *snd = &nl->snd_data;
struct plip_local *rcv = &nl->rcv_data;
netif_stop_queue (dev);
DISABLE(dev->irq);
synchronize_irq(dev->irq);
if (dev->irq == -1)
{
init_completion(&nl->killed_timer_cmp);
atomic_set (&nl->kill_timer, 1);
wait_for_completion(&nl->killed_timer_cmp);
}
#ifdef NOTDEF
outb(0x00, PAR_DATA(dev));
#endif
nl->is_deferred = 0;
nl->connection = PLIP_CN_NONE;
if (nl->port_owner) {
parport_release(nl->pardev);
nl->port_owner = 0;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
dev_kfree_skb(snd->skb);
snd->skb = NULL;
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
kfree_skb(rcv->skb);
rcv->skb = NULL;
}
#ifdef NOTDEF
/* Reset. */
outb(0x00, PAR_CONTROL(dev));
#endif
return 0;
}
static int
plip_preempt(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
struct net_local *nl = netdev_priv(dev);
/* Stand our ground if a datagram is on the wire */
if (nl->connection != PLIP_CN_NONE) {
nl->should_relinquish = 1;
return 1;
}
nl->port_owner = 0; /* Remember that we released the bus */
return 0;
}
static void
plip_wakeup(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
struct net_local *nl = netdev_priv(dev);
if (nl->port_owner) {
/* Why are we being woken up? */
printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
if (!parport_claim(nl->pardev))
/* bus_owner is already set (but why?) */
printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
else
return;
}
if (!(dev->flags & IFF_UP))
/* Don't need the port when the interface is down */
return;
if (!parport_claim(nl->pardev)) {
nl->port_owner = 1;
/* Clear the data port. */
write_data (dev, 0x00);
}
}
static int
plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd)
{
struct net_local *nl = netdev_priv(dev);
struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
if (cmd != SIOCDEVPLIP)
return -EOPNOTSUPP;
if (in_compat_syscall())
return -EOPNOTSUPP;
switch(pc->pcmd) {
case PLIP_GET_TIMEOUT:
pc->trigger = nl->trigger;
pc->nibble = nl->nibble;
break;
case PLIP_SET_TIMEOUT:
if(!capable(CAP_NET_ADMIN))
return -EPERM;
nl->trigger = pc->trigger;
nl->nibble = pc->nibble;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
static int timid;
module_param_array(parport, int, NULL, 0);
module_param(timid, int, 0);
MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
static inline int
plip_searchfor(int list[], int a)
{
int i;
for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
if (list[i] == a) return 1;
}
return 0;
}
/* plip_attach() is called (by the parport code) when a port is
* available to use. */
static void plip_attach (struct parport *port)
{
static int unit;
struct net_device *dev;
struct net_local *nl;
char name[IFNAMSIZ];
struct pardev_cb plip_cb;
if ((parport[0] == -1 && (!timid || !port->devices)) ||
plip_searchfor(parport, port->number)) {
if (unit == PLIP_MAX) {
printk(KERN_ERR "plip: too many devices\n");
return;
}
sprintf(name, "plip%d", unit);
dev = alloc_etherdev(sizeof(struct net_local));
if (!dev)
return;
strcpy(dev->name, name);
dev->irq = port->irq;
dev->base_addr = port->base;
if (port->irq == -1) {
printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
"which is fairly inefficient!\n", port->name);
}
nl = netdev_priv(dev);
nl->dev = dev;
memset(&plip_cb, 0, sizeof(plip_cb));
plip_cb.private = dev;
plip_cb.preempt = plip_preempt;
plip_cb.wakeup = plip_wakeup;
plip_cb.irq_func = plip_interrupt;
nl->pardev = parport_register_dev_model(port, dev->name,
&plip_cb, unit);
if (!nl->pardev) {
printk(KERN_ERR "%s: parport_register failed\n", name);
goto err_free_dev;
}
plip_init_netdev(dev);
if (register_netdev(dev)) {
printk(KERN_ERR "%s: network register failed\n", name);
goto err_parport_unregister;
}
printk(KERN_INFO "%s", version);
if (dev->irq != -1)
printk(KERN_INFO "%s: Parallel port at %#3lx, "
"using IRQ %d.\n",
dev->name, dev->base_addr, dev->irq);
else
printk(KERN_INFO "%s: Parallel port at %#3lx, "
"not using IRQ.\n",
dev->name, dev->base_addr);
dev_plip[unit++] = dev;
}
return;
err_parport_unregister:
parport_unregister_device(nl->pardev);
err_free_dev:
free_netdev(dev);
}
/* plip_detach() is called (by the parport code) when a port is
* no longer available to use. */
static void plip_detach (struct parport *port)
{
/* Nothing to do */
}
static int plip_probe(struct pardevice *par_dev)
{
struct device_driver *drv = par_dev->dev.driver;
int len = strlen(drv->name);
if (strncmp(par_dev->name, drv->name, len))
return -ENODEV;
return 0;
}
static struct parport_driver plip_driver = {
.name = "plip",
.probe = plip_probe,
.match_port = plip_attach,
.detach = plip_detach,
.devmodel = true,
};
static void __exit plip_cleanup_module (void)
{
struct net_device *dev;
int i;
for (i=0; i < PLIP_MAX; i++) {
if ((dev = dev_plip[i])) {
struct net_local *nl = netdev_priv(dev);
unregister_netdev(dev);
if (nl->port_owner)
parport_release(nl->pardev);
parport_unregister_device(nl->pardev);
free_netdev(dev);
dev_plip[i] = NULL;
}
}
parport_unregister_driver(&plip_driver);
}
#ifndef MODULE
static int parport_ptr;
static int __init plip_setup(char *str)
{
int ints[4];
str = get_options(str, ARRAY_SIZE(ints), ints);
/* Ugh. */
if (!strncmp(str, "parport", 7)) {
int n = simple_strtoul(str+7, NULL, 10);
if (parport_ptr < PLIP_MAX)
parport[parport_ptr++] = n;
else
printk(KERN_INFO "plip: too many ports, %s ignored.\n",
str);
} else if (!strcmp(str, "timid")) {
timid = 1;
} else {
if (ints[0] == 0 || ints[1] == 0) {
/* disable driver on "plip=" or "plip=0" */
parport[0] = -2;
} else {
printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
ints[1]);
}
}
return 1;
}
__setup("plip=", plip_setup);
#endif /* !MODULE */
static int __init plip_init (void)
{
if (parport[0] == -2)
return 0;
if (parport[0] != -1 && timid) {
printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
timid = 0;
}
if (parport_register_driver (&plip_driver)) {
printk (KERN_WARNING "plip: couldn't register driver\n");
return 1;
}
return 0;
}
module_init(plip_init);
module_exit(plip_cleanup_module);
MODULE_LICENSE("GPL");
| linux-master | drivers/net/plip/plip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*****************************************************************************/
/*
* baycom_par.c -- baycom par96 and picpar radio modem driver.
*
* Copyright (C) 1996-2000 Thomas Sailer ([email protected])
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
* Supported modems
*
* par96: This is a modem for 9600 baud FSK compatible to the G3RUH standard.
* The modem does all the filtering and regenerates the receiver clock.
* Data is transferred from and to the PC via a shift register.
* The shift register is filled with 16 bits and an interrupt is
* signalled. The PC then empties the shift register in a burst. This
* modem connects to the parallel port, hence the name. The modem
* leaves the implementation of the HDLC protocol and the scrambler
* polynomial to the PC. This modem is no longer available (at least
* from Baycom) and has been replaced by the PICPAR modem (see below).
* You may however still build one from the schematics published in
* cq-DL :-).
*
* picpar: This is a redesign of the par96 modem by Henning Rech, DF9IC. The
* modem is protocol compatible to par96, but uses only three low
* power ICs and can therefore be fed from the parallel port and
* does not require an additional power supply. It features
* built in DCD circuitry. The driver should therefore be configured
* for hardware DCD.
*
* Command line options (insmod command line)
*
* mode driver mode string. Valid choices are par96 and picpar.
* iobase base address of the port; common values are 0x378, 0x278, 0x3bc
*
* History:
* 0.1 26.06.1996 Adapted from baycom.c and made network driver interface
* 18.10.1996 Changed to new user space access routines (copy_{to,from}_user)
* 0.3 26.04.1997 init code/data tagged
* 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints)
* 0.5 11.11.1997 split into separate files for ser12/par96
* 0.6 03.08.1999 adapt to Linus' new __setup/__initcall
* removed some pre-2.2 kernel compatibility cruft
* 0.7 10.08.1999 Check if parport can do SPP and is safe to access during interrupt contexts
* 0.8 12.02.2000 adapted to softnet driver interface
* removed direct parport access, uses parport driver methods
* 0.9 03.07.2000 fix interface name handling
*/
/*****************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
#include <linux/parport.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
/* --------------------------------------------------------------------- */
#define BAYCOM_DEBUG
/*
* modem options; bit mask
*/
#define BAYCOM_OPTIONS_SOFTDCD 1
/* --------------------------------------------------------------------- */
static const char bc_drvname[] = "baycom_par";
static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
"baycom_par: version 0.9\n";
/* --------------------------------------------------------------------- */
#define NR_PORTS 4
static struct net_device *baycom_device[NR_PORTS];
/* --------------------------------------------------------------------- */
#define PAR96_BURSTBITS 16
#define PAR96_BURST 4
#define PAR96_PTT 2
#define PAR96_TXBIT 1
#define PAR96_ACK 0x40
#define PAR96_RXBIT 0x20
#define PAR96_DCD 0x10
#define PAR97_POWER 0xf8
/* ---------------------------------------------------------------------- */
/*
* Information that need to be kept for each board.
*/
struct baycom_state {
struct hdlcdrv_state hdrv;
struct pardevice *pdev;
unsigned int options;
struct modem_state {
short arb_divider;
unsigned char flags;
unsigned int shreg;
struct modem_state_par96 {
int dcd_count;
unsigned int dcd_shreg;
unsigned long descram;
unsigned long scram;
} par96;
} modem;
#ifdef BAYCOM_DEBUG
struct debug_vals {
unsigned long last_jiffies;
unsigned cur_intcnt;
unsigned last_intcnt;
int cur_pllcorr;
int last_pllcorr;
} debug_vals;
#endif /* BAYCOM_DEBUG */
};
/* --------------------------------------------------------------------- */
static inline void baycom_int_freq(struct baycom_state *bc)
{
#ifdef BAYCOM_DEBUG
unsigned long cur_jiffies = jiffies;
/*
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
bc->debug_vals.cur_pllcorr = 0;
}
#endif /* BAYCOM_DEBUG */
}
/* --------------------------------------------------------------------- */
/*
* ===================== PAR96 specific routines =========================
*/
#define PAR96_DESCRAM_TAP1 0x20000
#define PAR96_DESCRAM_TAP2 0x01000
#define PAR96_DESCRAM_TAP3 0x00001
#define PAR96_DESCRAM_TAPSH1 17
#define PAR96_DESCRAM_TAPSH2 12
#define PAR96_DESCRAM_TAPSH3 0
#define PAR96_SCRAM_TAP1 0x20000 /* X^17 */
#define PAR96_SCRAM_TAPN 0x00021 /* X^0+X^5 */
/* --------------------------------------------------------------------- */
static inline void par96_tx(struct net_device *dev, struct baycom_state *bc)
{
int i;
unsigned int data = hdlcdrv_getbits(&bc->hdrv);
struct parport *pp = bc->pdev->port;
for(i = 0; i < PAR96_BURSTBITS; i++, data >>= 1) {
unsigned char val = PAR97_POWER;
bc->modem.par96.scram = ((bc->modem.par96.scram << 1) |
(bc->modem.par96.scram & 1));
if (!(data & 1))
bc->modem.par96.scram ^= 1;
if (bc->modem.par96.scram & (PAR96_SCRAM_TAP1 << 1))
bc->modem.par96.scram ^=
(PAR96_SCRAM_TAPN << 1);
if (bc->modem.par96.scram & (PAR96_SCRAM_TAP1 << 2))
val |= PAR96_TXBIT;
pp->ops->write_data(pp, val);
pp->ops->write_data(pp, val | PAR96_BURST);
}
}
/* --------------------------------------------------------------------- */
static inline void par96_rx(struct net_device *dev, struct baycom_state *bc)
{
int i;
unsigned int data, mask, mask2, descx;
struct parport *pp = bc->pdev->port;
/*
* do receiver; differential decode and descramble on the fly
*/
for(data = i = 0; i < PAR96_BURSTBITS; i++) {
bc->modem.par96.descram = (bc->modem.par96.descram << 1);
if (pp->ops->read_status(pp) & PAR96_RXBIT)
bc->modem.par96.descram |= 1;
descx = bc->modem.par96.descram ^
(bc->modem.par96.descram >> 1);
/* now the diff decoded data is inverted in descram */
pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT);
descx ^= ((descx >> PAR96_DESCRAM_TAPSH1) ^
(descx >> PAR96_DESCRAM_TAPSH2));
data >>= 1;
if (!(descx & 1))
data |= 0x8000;
pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT | PAR96_BURST);
}
hdlcdrv_putbits(&bc->hdrv, data);
/*
* do DCD algorithm
*/
if (bc->options & BAYCOM_OPTIONS_SOFTDCD) {
bc->modem.par96.dcd_shreg = (bc->modem.par96.dcd_shreg >> 16)
| (data << 16);
/* search for flags and set the dcd counter appropriately */
for(mask = 0x1fe00, mask2 = 0xfc00, i = 0;
i < PAR96_BURSTBITS; i++, mask <<= 1, mask2 <<= 1)
if ((bc->modem.par96.dcd_shreg & mask) == mask2)
bc->modem.par96.dcd_count = HDLCDRV_MAXFLEN+4;
/* check for abort/noise sequences */
for(mask = 0x1fe00, mask2 = 0x1fe00, i = 0;
i < PAR96_BURSTBITS; i++, mask <<= 1, mask2 <<= 1)
if (((bc->modem.par96.dcd_shreg & mask) == mask2) &&
(bc->modem.par96.dcd_count >= 0))
bc->modem.par96.dcd_count -= HDLCDRV_MAXFLEN-10;
/* decrement and set the dcd variable */
if (bc->modem.par96.dcd_count >= 0)
bc->modem.par96.dcd_count -= 2;
hdlcdrv_setdcd(&bc->hdrv, bc->modem.par96.dcd_count > 0);
} else {
hdlcdrv_setdcd(&bc->hdrv, !!(pp->ops->read_status(pp) & PAR96_DCD));
}
}
/* --------------------------------------------------------------------- */
static void par96_interrupt(void *dev_id)
{
struct net_device *dev = dev_id;
struct baycom_state *bc = netdev_priv(dev);
baycom_int_freq(bc);
/*
* check if transmitter active
*/
if (hdlcdrv_ptt(&bc->hdrv))
par96_tx(dev, bc);
else {
par96_rx(dev, bc);
if (--bc->modem.arb_divider <= 0) {
bc->modem.arb_divider = 6;
local_irq_enable();
hdlcdrv_arbitrate(dev, &bc->hdrv);
}
}
local_irq_enable();
hdlcdrv_transmitter(dev, &bc->hdrv);
hdlcdrv_receiver(dev, &bc->hdrv);
local_irq_disable();
}
/* --------------------------------------------------------------------- */
static void par96_wakeup(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
struct baycom_state *bc = netdev_priv(dev);
printk(KERN_DEBUG "baycom_par: %s: why am I being woken up?\n", dev->name);
if (!parport_claim(bc->pdev))
printk(KERN_DEBUG "baycom_par: %s: I'm broken.\n", dev->name);
}
/* --------------------------------------------------------------------- */
static int par96_open(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
struct pardev_cb par_cb;
struct parport *pp;
int i;
if (!dev || !bc)
return -ENXIO;
pp = parport_find_base(dev->base_addr);
if (!pp) {
printk(KERN_ERR "baycom_par: parport at 0x%lx unknown\n", dev->base_addr);
return -ENXIO;
}
if (pp->irq < 0) {
printk(KERN_ERR "baycom_par: parport at 0x%lx has no irq\n", pp->base);
parport_put_port(pp);
return -ENXIO;
}
if ((~pp->modes) & (PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) {
printk(KERN_ERR "baycom_par: parport at 0x%lx cannot be used\n", pp->base);
parport_put_port(pp);
return -ENXIO;
}
memset(&bc->modem, 0, sizeof(bc->modem));
bc->hdrv.par.bitrate = 9600;
memset(&par_cb, 0, sizeof(par_cb));
par_cb.wakeup = par96_wakeup;
par_cb.irq_func = par96_interrupt;
par_cb.private = (void *)dev;
par_cb.flags = PARPORT_DEV_EXCL;
for (i = 0; i < NR_PORTS; i++)
if (baycom_device[i] == dev)
break;
if (i == NR_PORTS) {
pr_err("%s: no device found\n", bc_drvname);
parport_put_port(pp);
return -ENODEV;
}
bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i);
parport_put_port(pp);
if (!bc->pdev) {
printk(KERN_ERR "baycom_par: cannot register parport at 0x%lx\n", dev->base_addr);
return -ENXIO;
}
if (parport_claim(bc->pdev)) {
printk(KERN_ERR "baycom_par: parport at 0x%lx busy\n", pp->base);
parport_unregister_device(bc->pdev);
return -EBUSY;
}
pp = bc->pdev->port;
dev->irq = pp->irq;
pp->ops->data_forward(pp);
bc->hdrv.par.bitrate = 9600;
pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER); /* switch off PTT */
pp->ops->enable_irq(pp);
printk(KERN_INFO "%s: par96 at iobase 0x%lx irq %u options 0x%x\n",
bc_drvname, dev->base_addr, dev->irq, bc->options);
return 0;
}
/* --------------------------------------------------------------------- */
static int par96_close(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
struct parport *pp;
if (!dev || !bc)
return -EINVAL;
pp = bc->pdev->port;
/* disable interrupt */
pp->ops->disable_irq(pp);
/* switch off PTT */
pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER);
parport_release(bc->pdev);
parport_unregister_device(bc->pdev);
printk(KERN_INFO "%s: close par96 at iobase 0x%lx irq %u\n",
bc_drvname, dev->base_addr, dev->irq);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* ===================== hdlcdrv driver interface =========================
*/
static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
static const struct hdlcdrv_ops par96_ops = {
.drvname = bc_drvname,
.drvinfo = bc_drvinfo,
.open = par96_open,
.close = par96_close,
.ioctl = baycom_ioctl
};
/* --------------------------------------------------------------------- */
static int baycom_setmode(struct baycom_state *bc, const char *modestr)
{
if (!strncmp(modestr, "picpar", 6))
bc->options = 0;
else if (!strncmp(modestr, "par96", 5))
bc->options = BAYCOM_OPTIONS_SOFTDCD;
else
bc->options = !!strchr(modestr, '*');
return 0;
}
/* --------------------------------------------------------------------- */
static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
struct baycom_ioctl bi;
if (!dev)
return -EINVAL;
bc = netdev_priv(dev);
BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC);
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
switch (hi->cmd) {
default:
break;
case HDLCDRVCTL_GETMODE:
strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
case HDLCDRVCTL_SETMODE:
if (netif_running(dev) || !capable(CAP_NET_ADMIN))
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "par96,picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
case HDLCDRVCTL_MODEMPARMASK:
return HDLCDRV_PARMASK_IOBASE;
}
if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
return -ENOIOCTLCMD;
#ifdef BAYCOM_DEBUG
case BAYCOMCTL_GETDEBUG:
bi.data.dbg.debug1 = bc->hdrv.ptt_keyed;
bi.data.dbg.debug2 = bc->debug_vals.last_intcnt;
bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr;
break;
#endif /* BAYCOM_DEBUG */
}
if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
}
/* --------------------------------------------------------------------- */
/*
* command line settable parameters
*/
static char *mode[NR_PORTS] = { "picpar", };
static int iobase[NR_PORTS] = { 0x378, };
module_param_array(mode, charp, NULL, 0);
MODULE_PARM_DESC(mode, "baycom operating mode; eg. par96 or picpar");
module_param_hw_array(iobase, int, ioport, NULL, 0);
MODULE_PARM_DESC(iobase, "baycom io base address");
MODULE_AUTHOR("Thomas M. Sailer, [email protected], [email protected]");
MODULE_DESCRIPTION("Baycom par96 and picpar amateur radio modem driver");
MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
static int baycom_par_probe(struct pardevice *par_dev)
{
struct device_driver *drv = par_dev->dev.driver;
int len = strlen(drv->name);
if (strncmp(par_dev->name, drv->name, len))
return -ENODEV;
return 0;
}
static struct parport_driver baycom_par_driver = {
.name = "bcp",
.probe = baycom_par_probe,
.devmodel = true,
};
static int __init init_baycompar(void)
{
int i, found = 0, ret;
char set_hw = 1;
printk(bc_drvinfo);
ret = parport_register_driver(&baycom_par_driver);
if (ret)
return ret;
/*
* register net devices
*/
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev;
struct baycom_state *bc;
char ifname[IFNAMSIZ];
sprintf(ifname, "bcp%d", i);
if (!mode[i])
set_hw = 0;
if (!set_hw)
iobase[i] = 0;
dev = hdlcdrv_register(&par96_ops,
sizeof(struct baycom_state),
ifname, iobase[i], 0, 0);
if (IS_ERR(dev))
break;
bc = netdev_priv(dev);
if (set_hw && baycom_setmode(bc, mode[i]))
set_hw = 0;
found++;
baycom_device[i] = dev;
}
if (!found) {
parport_unregister_driver(&baycom_par_driver);
return -ENXIO;
}
return 0;
}
static void __exit cleanup_baycompar(void)
{
int i;
for(i = 0; i < NR_PORTS; i++) {
struct net_device *dev = baycom_device[i];
if (dev)
hdlcdrv_unregister(dev);
}
parport_unregister_driver(&baycom_par_driver);
}
module_init(init_baycompar);
module_exit(cleanup_baycompar);
/* --------------------------------------------------------------------- */
#ifndef MODULE
/*
* format: baycom_par=io,mode
* mode: par96,picpar
*/
static int __init baycom_par_setup(char *str)
{
static unsigned nr_dev;
int ints[2];
if (nr_dev >= NR_PORTS)
return 0;
str = get_options(str, 2, ints);
if (ints[0] < 1)
return 0;
mode[nr_dev] = str;
iobase[nr_dev] = ints[1];
nr_dev++;
return 1;
}
__setup("baycom_par=", baycom_par_setup);
#endif /* MODULE */
/* --------------------------------------------------------------------- */
| linux-master | drivers/net/hamradio/baycom_par.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*****************************************************************************/
/*
* baycom_ser_hdx.c -- baycom ser12 halfduplex radio modem driver.
*
* Copyright (C) 1996-2000 Thomas Sailer ([email protected])
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
* Supported modems
*
* ser12: This is a very simple 1200 baud AFSK modem. The modem consists only
* of a modulator/demodulator chip, usually a TI TCM3105. The computer
* is responsible for regenerating the receiver bit clock, as well as
* for handling the HDLC protocol. The modem connects to a serial port,
* hence the name. Since the serial port is not used as an async serial
* port, the kernel driver for serial ports cannot be used, and this
* driver only supports standard serial hardware (8250, 16450, 16550A)
*
* Command line options (insmod command line)
*
* mode ser12 hardware DCD
* ser12* software DCD
* ser12@ hardware/software DCD, i.e. no explicit DCD signal but hardware
* mutes audio input to the modem
* ser12+ hardware DCD, inverted signal at DCD pin
* iobase base address of the port; common values are 0x3f8, 0x2f8, 0x3e8, 0x2e8
* irq interrupt line of the port; common values are 4,3
*
* History:
* 0.1 26.06.1996 Adapted from baycom.c and made network driver interface
* 18.10.1996 Changed to new user space access routines (copy_{to,from}_user)
* 0.3 26.04.1997 init code/data tagged
* 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints)
* 0.5 11.11.1997 ser12/par96 split into separate files
* 0.6 14.04.1998 cleanups
* 0.7 03.08.1999 adapt to Linus' new __setup/__initcall
* 0.8 10.08.1999 use module_init/module_exit
* 0.9 12.02.2000 adapted to softnet driver interface
* 0.10 03.07.2000 fix interface name handling
*/
/*****************************************************************************/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
#include <linux/jiffies.h>
/* --------------------------------------------------------------------- */
#define BAYCOM_DEBUG
/* --------------------------------------------------------------------- */
static const char bc_drvname[] = "baycom_ser_hdx";
static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
"baycom_ser_hdx: version 0.10\n";
/* --------------------------------------------------------------------- */
#define NR_PORTS 4
static struct net_device *baycom_device[NR_PORTS];
/* --------------------------------------------------------------------- */
#define RBR(iobase) (iobase+0)
#define THR(iobase) (iobase+0)
#define IER(iobase) (iobase+1)
#define IIR(iobase) (iobase+2)
#define FCR(iobase) (iobase+2)
#define LCR(iobase) (iobase+3)
#define MCR(iobase) (iobase+4)
#define LSR(iobase) (iobase+5)
#define MSR(iobase) (iobase+6)
#define SCR(iobase) (iobase+7)
#define DLL(iobase) (iobase+0)
#define DLM(iobase) (iobase+1)
#define SER12_EXTENT 8
/* ---------------------------------------------------------------------- */
/*
* Information that need to be kept for each board.
*/
struct baycom_state {
struct hdlcdrv_state hdrv;
int opt_dcd;
struct modem_state {
short arb_divider;
unsigned char flags;
unsigned int shreg;
struct modem_state_ser12 {
unsigned char tx_bit;
int dcd_sum0, dcd_sum1, dcd_sum2;
unsigned char last_sample;
unsigned char last_rxbit;
unsigned int dcd_shreg;
unsigned int dcd_time;
unsigned int bit_pll;
unsigned char interm_sample;
} ser12;
} modem;
#ifdef BAYCOM_DEBUG
struct debug_vals {
unsigned long last_jiffies;
unsigned cur_intcnt;
unsigned last_intcnt;
int cur_pllcorr;
int last_pllcorr;
} debug_vals;
#endif /* BAYCOM_DEBUG */
};
/* --------------------------------------------------------------------- */
static inline void baycom_int_freq(struct baycom_state *bc)
{
#ifdef BAYCOM_DEBUG
unsigned long cur_jiffies = jiffies;
/*
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
bc->debug_vals.cur_pllcorr = 0;
}
#endif /* BAYCOM_DEBUG */
}
/* --------------------------------------------------------------------- */
/*
* ===================== SER12 specific routines =========================
*/
static inline void ser12_set_divisor(struct net_device *dev,
unsigned char divisor)
{
outb(0x81, LCR(dev->base_addr)); /* DLAB = 1 */
outb(divisor, DLL(dev->base_addr));
outb(0, DLM(dev->base_addr));
outb(0x01, LCR(dev->base_addr)); /* word length = 6 */
/*
* make sure the next interrupt is generated;
* 0 must be used to power the modem; the modem draws its
* power from the TxD line
*/
outb(0x00, THR(dev->base_addr));
/*
* it is important not to set the divider while transmitting;
* this reportedly makes some UARTs generating interrupts
* in the hundredthousands per second region
* Reported by: [email protected] (Ignacio Arenaza Nuno)
*/
}
/* --------------------------------------------------------------------- */
/*
* must call the TX arbitrator every 10ms
*/
#define SER12_ARB_DIVIDER(bc) (bc->opt_dcd ? 24 : 36)
#define SER12_DCD_INTERVAL(bc) (bc->opt_dcd ? 12 : 240)
static inline void ser12_tx(struct net_device *dev, struct baycom_state *bc)
{
/* one interrupt per channel bit */
ser12_set_divisor(dev, 12);
/*
* first output the last bit (!) then call HDLC transmitter,
* since this may take quite long
*/
outb(0x0e | (!!bc->modem.ser12.tx_bit), MCR(dev->base_addr));
if (bc->modem.shreg <= 1)
bc->modem.shreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv);
bc->modem.ser12.tx_bit = !(bc->modem.ser12.tx_bit ^
(bc->modem.shreg & 1));
bc->modem.shreg >>= 1;
}
/* --------------------------------------------------------------------- */
static inline void ser12_rx(struct net_device *dev, struct baycom_state *bc)
{
unsigned char cur_s;
/*
* do demodulator
*/
cur_s = inb(MSR(dev->base_addr)) & 0x10; /* the CTS line */
hdlcdrv_channelbit(&bc->hdrv, cur_s);
bc->modem.ser12.dcd_shreg = (bc->modem.ser12.dcd_shreg << 1) |
(cur_s != bc->modem.ser12.last_sample);
bc->modem.ser12.last_sample = cur_s;
if(bc->modem.ser12.dcd_shreg & 1) {
if (!bc->opt_dcd) {
unsigned int dcdspos, dcdsneg;
dcdspos = dcdsneg = 0;
dcdspos += ((bc->modem.ser12.dcd_shreg >> 1) & 1);
if (!(bc->modem.ser12.dcd_shreg & 0x7ffffffe))
dcdspos += 2;
dcdsneg += ((bc->modem.ser12.dcd_shreg >> 2) & 1);
dcdsneg += ((bc->modem.ser12.dcd_shreg >> 3) & 1);
dcdsneg += ((bc->modem.ser12.dcd_shreg >> 4) & 1);
bc->modem.ser12.dcd_sum0 += 16*dcdspos - dcdsneg;
} else
bc->modem.ser12.dcd_sum0--;
}
if(!bc->modem.ser12.dcd_time) {
hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 +
bc->modem.ser12.dcd_sum1 +
bc->modem.ser12.dcd_sum2) < 0);
bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1;
bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0;
/* offset to ensure DCD off on silent input */
bc->modem.ser12.dcd_sum0 = 2;
bc->modem.ser12.dcd_time = SER12_DCD_INTERVAL(bc);
}
bc->modem.ser12.dcd_time--;
if (!bc->opt_dcd) {
/*
* PLL code for the improved software DCD algorithm
*/
if (bc->modem.ser12.interm_sample) {
/*
* intermediate sample; set timing correction to normal
*/
ser12_set_divisor(dev, 4);
} else {
/*
* do PLL correction and call HDLC receiver
*/
switch (bc->modem.ser12.dcd_shreg & 7) {
case 1: /* transition too late */
ser12_set_divisor(dev, 5);
#ifdef BAYCOM_DEBUG
bc->debug_vals.cur_pllcorr++;
#endif /* BAYCOM_DEBUG */
break;
case 4: /* transition too early */
ser12_set_divisor(dev, 3);
#ifdef BAYCOM_DEBUG
bc->debug_vals.cur_pllcorr--;
#endif /* BAYCOM_DEBUG */
break;
default:
ser12_set_divisor(dev, 4);
break;
}
bc->modem.shreg >>= 1;
if (bc->modem.ser12.last_sample ==
bc->modem.ser12.last_rxbit)
bc->modem.shreg |= 0x10000;
bc->modem.ser12.last_rxbit =
bc->modem.ser12.last_sample;
}
if (++bc->modem.ser12.interm_sample >= 3)
bc->modem.ser12.interm_sample = 0;
/*
* DCD stuff
*/
if (bc->modem.ser12.dcd_shreg & 1) {
unsigned int dcdspos, dcdsneg;
dcdspos = dcdsneg = 0;
dcdspos += ((bc->modem.ser12.dcd_shreg >> 1) & 1);
dcdspos += (!(bc->modem.ser12.dcd_shreg & 0x7ffffffe))
<< 1;
dcdsneg += ((bc->modem.ser12.dcd_shreg >> 2) & 1);
dcdsneg += ((bc->modem.ser12.dcd_shreg >> 3) & 1);
dcdsneg += ((bc->modem.ser12.dcd_shreg >> 4) & 1);
bc->modem.ser12.dcd_sum0 += 16*dcdspos - dcdsneg;
}
} else {
/*
* PLL algorithm for the hardware squelch DCD algorithm
*/
if (bc->modem.ser12.interm_sample) {
/*
* intermediate sample; set timing correction to normal
*/
ser12_set_divisor(dev, 6);
} else {
/*
* do PLL correction and call HDLC receiver
*/
switch (bc->modem.ser12.dcd_shreg & 3) {
case 1: /* transition too late */
ser12_set_divisor(dev, 7);
#ifdef BAYCOM_DEBUG
bc->debug_vals.cur_pllcorr++;
#endif /* BAYCOM_DEBUG */
break;
case 2: /* transition too early */
ser12_set_divisor(dev, 5);
#ifdef BAYCOM_DEBUG
bc->debug_vals.cur_pllcorr--;
#endif /* BAYCOM_DEBUG */
break;
default:
ser12_set_divisor(dev, 6);
break;
}
bc->modem.shreg >>= 1;
if (bc->modem.ser12.last_sample ==
bc->modem.ser12.last_rxbit)
bc->modem.shreg |= 0x10000;
bc->modem.ser12.last_rxbit =
bc->modem.ser12.last_sample;
}
bc->modem.ser12.interm_sample = !bc->modem.ser12.interm_sample;
/*
* DCD stuff
*/
bc->modem.ser12.dcd_sum0 -= (bc->modem.ser12.dcd_shreg & 1);
}
outb(0x0d, MCR(dev->base_addr)); /* transmitter off */
if (bc->modem.shreg & 1) {
hdlcdrv_putbits(&bc->hdrv, bc->modem.shreg >> 1);
bc->modem.shreg = 0x10000;
}
if(!bc->modem.ser12.dcd_time) {
if (bc->opt_dcd & 1)
hdlcdrv_setdcd(&bc->hdrv, !((inb(MSR(dev->base_addr)) ^ bc->opt_dcd) & 0x80));
else
hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 +
bc->modem.ser12.dcd_sum1 +
bc->modem.ser12.dcd_sum2) < 0);
bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1;
bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0;
/* offset to ensure DCD off on silent input */
bc->modem.ser12.dcd_sum0 = 2;
bc->modem.ser12.dcd_time = SER12_DCD_INTERVAL(bc);
}
bc->modem.ser12.dcd_time--;
}
/* --------------------------------------------------------------------- */
static irqreturn_t ser12_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct baycom_state *bc = netdev_priv(dev);
unsigned char iir;
if (!dev || !bc || bc->hdrv.magic != HDLCDRV_MAGIC)
return IRQ_NONE;
/* fast way out */
if ((iir = inb(IIR(dev->base_addr))) & 1)
return IRQ_NONE;
baycom_int_freq(bc);
do {
switch (iir & 6) {
case 6:
inb(LSR(dev->base_addr));
break;
case 4:
inb(RBR(dev->base_addr));
break;
case 2:
/*
* check if transmitter active
*/
if (hdlcdrv_ptt(&bc->hdrv))
ser12_tx(dev, bc);
else {
ser12_rx(dev, bc);
bc->modem.arb_divider--;
}
outb(0x00, THR(dev->base_addr));
break;
default:
inb(MSR(dev->base_addr));
break;
}
iir = inb(IIR(dev->base_addr));
} while (!(iir & 1));
if (bc->modem.arb_divider <= 0) {
bc->modem.arb_divider = SER12_ARB_DIVIDER(bc);
local_irq_enable();
hdlcdrv_arbitrate(dev, &bc->hdrv);
}
local_irq_enable();
hdlcdrv_transmitter(dev, &bc->hdrv);
hdlcdrv_receiver(dev, &bc->hdrv);
local_irq_disable();
return IRQ_HANDLED;
}
/* --------------------------------------------------------------------- */
enum uart { c_uart_unknown, c_uart_8250,
c_uart_16450, c_uart_16550, c_uart_16550A};
static const char *uart_str[] = {
"unknown", "8250", "16450", "16550", "16550A"
};
static enum uart ser12_check_uart(unsigned int iobase)
{
unsigned char b1,b2,b3;
enum uart u;
enum uart uart_tab[] =
{ c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A };
b1 = inb(MCR(iobase));
outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
b2 = inb(MSR(iobase));
outb(0x1a, MCR(iobase));
b3 = inb(MSR(iobase)) & 0xf0;
outb(b1, MCR(iobase)); /* restore old values */
outb(b2, MSR(iobase));
if (b3 != 0x90)
return c_uart_unknown;
inb(RBR(iobase));
inb(RBR(iobase));
outb(0x01, FCR(iobase)); /* enable FIFOs */
u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
if (u == c_uart_16450) {
outb(0x5a, SCR(iobase));
b1 = inb(SCR(iobase));
outb(0xa5, SCR(iobase));
b2 = inb(SCR(iobase));
if ((b1 != 0x5a) || (b2 != 0xa5))
u = c_uart_8250;
}
return u;
}
/* --------------------------------------------------------------------- */
static int ser12_open(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
enum uart u;
if (!dev || !bc)
return -ENXIO;
if (!dev->base_addr || dev->base_addr > 0x1000-SER12_EXTENT ||
dev->irq < 2 || dev->irq > 15)
return -ENXIO;
if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser12"))
return -EACCES;
memset(&bc->modem, 0, sizeof(bc->modem));
bc->hdrv.par.bitrate = 1200;
if ((u = ser12_check_uart(dev->base_addr)) == c_uart_unknown) {
release_region(dev->base_addr, SER12_EXTENT);
return -EIO;
}
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser12", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
}
/*
* enable transmitter empty interrupt
*/
outb(2, IER(dev->base_addr));
/*
* set the SIO to 6 Bits/character and 19200 or 28800 baud, so that
* we get exactly (hopefully) 2 or 3 interrupts per radio symbol,
* depending on the usage of the software DCD routine
*/
ser12_set_divisor(dev, bc->opt_dcd ? 6 : 4);
printk(KERN_INFO "%s: ser12 at iobase 0x%lx irq %u uart %s\n",
bc_drvname, dev->base_addr, dev->irq, uart_str[u]);
return 0;
}
/* --------------------------------------------------------------------- */
static int ser12_close(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
if (!dev || !bc)
return -EINVAL;
/*
* disable interrupts
*/
outb(0, IER(dev->base_addr));
outb(1, MCR(dev->base_addr));
free_irq(dev->irq, dev);
release_region(dev->base_addr, SER12_EXTENT);
printk(KERN_INFO "%s: close ser12 at iobase 0x%lx irq %u\n",
bc_drvname, dev->base_addr, dev->irq);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* ===================== hdlcdrv driver interface =========================
*/
/* --------------------------------------------------------------------- */
static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
static const struct hdlcdrv_ops ser12_ops = {
.drvname = bc_drvname,
.drvinfo = bc_drvinfo,
.open = ser12_open,
.close = ser12_close,
.ioctl = baycom_ioctl,
};
/* --------------------------------------------------------------------- */
static int baycom_setmode(struct baycom_state *bc, const char *modestr)
{
if (strchr(modestr, '*'))
bc->opt_dcd = 0;
else if (strchr(modestr, '+'))
bc->opt_dcd = -1;
else if (strchr(modestr, '@'))
bc->opt_dcd = -2;
else
bc->opt_dcd = 1;
return 0;
}
/* --------------------------------------------------------------------- */
static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
struct baycom_ioctl bi;
if (!dev)
return -EINVAL;
bc = netdev_priv(dev);
BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC);
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
switch (hi->cmd) {
default:
break;
case HDLCDRVCTL_GETMODE:
strcpy(hi->data.modename, "ser12");
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
case HDLCDRVCTL_SETMODE:
if (netif_running(dev) || !capable(CAP_NET_ADMIN))
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "ser12");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
case HDLCDRVCTL_MODEMPARMASK:
return HDLCDRV_PARMASK_IOBASE | HDLCDRV_PARMASK_IRQ;
}
if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
return -ENOIOCTLCMD;
#ifdef BAYCOM_DEBUG
case BAYCOMCTL_GETDEBUG:
bi.data.dbg.debug1 = bc->hdrv.ptt_keyed;
bi.data.dbg.debug2 = bc->debug_vals.last_intcnt;
bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr;
break;
#endif /* BAYCOM_DEBUG */
}
if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
}
/* --------------------------------------------------------------------- */
/*
* command line settable parameters
*/
static char *mode[NR_PORTS] = { "ser12*", };
static int iobase[NR_PORTS] = { 0x3f8, };
static int irq[NR_PORTS] = { 4, };
module_param_array(mode, charp, NULL, 0);
MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD");
module_param_hw_array(iobase, int, ioport, NULL, 0);
MODULE_PARM_DESC(iobase, "baycom io base address");
module_param_hw_array(irq, int, irq, NULL, 0);
MODULE_PARM_DESC(irq, "baycom irq number");
MODULE_AUTHOR("Thomas M. Sailer, [email protected], [email protected]");
MODULE_DESCRIPTION("Baycom ser12 half duplex amateur radio modem driver");
MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
static int __init init_baycomserhdx(void)
{
int i, found = 0;
char set_hw = 1;
printk(bc_drvinfo);
/*
* register net devices
*/
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev;
struct baycom_state *bc;
char ifname[IFNAMSIZ];
sprintf(ifname, "bcsh%d", i);
if (!mode[i])
set_hw = 0;
if (!set_hw)
iobase[i] = irq[i] = 0;
dev = hdlcdrv_register(&ser12_ops,
sizeof(struct baycom_state),
ifname, iobase[i], irq[i], 0);
if (IS_ERR(dev))
break;
bc = netdev_priv(dev);
if (set_hw && baycom_setmode(bc, mode[i]))
set_hw = 0;
found++;
baycom_device[i] = dev;
}
if (!found)
return -ENXIO;
return 0;
}
static void __exit cleanup_baycomserhdx(void)
{
int i;
for(i = 0; i < NR_PORTS; i++) {
struct net_device *dev = baycom_device[i];
if (dev)
hdlcdrv_unregister(dev);
}
}
module_init(init_baycomserhdx);
module_exit(cleanup_baycomserhdx);
/* --------------------------------------------------------------------- */
#ifndef MODULE
/*
* format: baycom_ser_hdx=io,irq,mode
* mode: ser12 hardware DCD
* ser12* software DCD
* ser12@ hardware/software DCD, i.e. no explicit DCD signal but hardware
* mutes audio input to the modem
* ser12+ hardware DCD, inverted signal at DCD pin
*/
static int __init baycom_ser_hdx_setup(char *str)
{
static unsigned nr_dev;
int ints[3];
if (nr_dev >= NR_PORTS)
return 0;
str = get_options(str, 3, ints);
if (ints[0] < 2)
return 0;
mode[nr_dev] = str;
iobase[nr_dev] = ints[1];
irq[nr_dev] = ints[2];
nr_dev++;
return 1;
}
__setup("baycom_ser_hdx=", baycom_ser_hdx_setup);
#endif /* MODULE */
/* --------------------------------------------------------------------- */
| linux-master | drivers/net/hamradio/baycom_ser_hdx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*****************************************************************************/
/*
* baycom_epp.c -- baycom epp radio modem driver.
*
* Copyright (C) 1998-2000
* Thomas Sailer ([email protected])
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
* History:
* 0.1 xx.xx.1998 Initial version by Matthias Welwarsky (dg2fef)
* 0.2 21.04.1998 Massive rework by Thomas Sailer
* Integrated FPGA EPP modem configuration routines
* 0.3 11.05.1998 Took FPGA config out and moved it into a separate program
* 0.4 26.07.1999 Adapted to new lowlevel parport driver interface
* 0.5 03.08.1999 adapt to Linus' new __setup/__initcall
* removed some pre-2.2 kernel compatibility cruft
* 0.6 10.08.1999 Check if parport can do SPP and is safe to access during interrupt contexts
* 0.7 12.02.2000 adapted to softnet driver interface
*/
/*****************************************************************************/
#include <linux/crc-ccitt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/parport.h>
#include <linux/if_arp.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <net/ax25.h>
#include <linux/uaccess.h>
/* --------------------------------------------------------------------- */
#define BAYCOM_DEBUG
#define BAYCOM_MAGIC 19730510
/* --------------------------------------------------------------------- */
static const char paranoia_str[] = KERN_ERR
"baycom_epp: bad magic number for hdlcdrv_state struct in routine %s\n";
static const char bc_drvname[] = "baycom_epp";
static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
"baycom_epp: version 0.7\n";
/* --------------------------------------------------------------------- */
#define NR_PORTS 4
static struct net_device *baycom_device[NR_PORTS];
/* --------------------------------------------------------------------- */
/* EPP status register */
#define EPP_DCDBIT 0x80
#define EPP_PTTBIT 0x08
#define EPP_NREF 0x01
#define EPP_NRAEF 0x02
#define EPP_NRHF 0x04
#define EPP_NTHF 0x20
#define EPP_NTAEF 0x10
#define EPP_NTEF EPP_PTTBIT
/* EPP control register */
#define EPP_TX_FIFO_ENABLE 0x10
#define EPP_RX_FIFO_ENABLE 0x08
#define EPP_MODEM_ENABLE 0x20
#define EPP_LEDS 0xC0
#define EPP_IRQ_ENABLE 0x10
/* LPT registers */
#define LPTREG_ECONTROL 0x402
#define LPTREG_CONFIGB 0x401
#define LPTREG_CONFIGA 0x400
#define LPTREG_EPPDATA 0x004
#define LPTREG_EPPADDR 0x003
#define LPTREG_CONTROL 0x002
#define LPTREG_STATUS 0x001
#define LPTREG_DATA 0x000
/* LPT control register */
#define LPTCTRL_PROGRAM 0x04 /* 0 to reprogram */
#define LPTCTRL_WRITE 0x01
#define LPTCTRL_ADDRSTB 0x08
#define LPTCTRL_DATASTB 0x02
#define LPTCTRL_INTEN 0x10
/* LPT status register */
#define LPTSTAT_SHIFT_NINTR 6
#define LPTSTAT_WAIT 0x80
#define LPTSTAT_NINTR (1<<LPTSTAT_SHIFT_NINTR)
#define LPTSTAT_PE 0x20
#define LPTSTAT_DONE 0x10
#define LPTSTAT_NERROR 0x08
#define LPTSTAT_EPPTIMEOUT 0x01
/* LPT data register */
#define LPTDATA_SHIFT_TDI 0
#define LPTDATA_SHIFT_TMS 2
#define LPTDATA_TDI (1<<LPTDATA_SHIFT_TDI)
#define LPTDATA_TCK 0x02
#define LPTDATA_TMS (1<<LPTDATA_SHIFT_TMS)
#define LPTDATA_INITBIAS 0x80
/* EPP modem config/status bits */
#define EPP_DCDBIT 0x80
#define EPP_PTTBIT 0x08
#define EPP_RXEBIT 0x01
#define EPP_RXAEBIT 0x02
#define EPP_RXHFULL 0x04
#define EPP_NTHF 0x20
#define EPP_NTAEF 0x10
#define EPP_NTEF EPP_PTTBIT
#define EPP_TX_FIFO_ENABLE 0x10
#define EPP_RX_FIFO_ENABLE 0x08
#define EPP_MODEM_ENABLE 0x20
#define EPP_LEDS 0xC0
#define EPP_IRQ_ENABLE 0x10
/* Xilinx 4k JTAG instructions */
#define XC4K_IRLENGTH 3
#define XC4K_EXTEST 0
#define XC4K_PRELOAD 1
#define XC4K_CONFIGURE 5
#define XC4K_BYPASS 7
#define EPP_CONVENTIONAL 0
#define EPP_FPGA 1
#define EPP_FPGAEXTSTATUS 2
#define TXBUFFER_SIZE ((HDLCDRV_MAXFLEN*6/5)+8)
/* ---------------------------------------------------------------------- */
/*
* Information that need to be kept for each board.
*/
struct baycom_state {
int magic;
struct pardevice *pdev;
struct net_device *dev;
unsigned int work_running;
struct delayed_work run_work;
unsigned int modem;
unsigned int bitrate;
unsigned char stat;
struct {
unsigned int intclk;
unsigned int fclk;
unsigned int bps;
unsigned int extmodem;
unsigned int loopback;
} cfg;
struct hdlcdrv_channel_params ch_params;
struct {
unsigned int bitbuf, bitstream, numbits, state;
unsigned char *bufptr;
int bufcnt;
unsigned char buf[TXBUFFER_SIZE];
} hdlcrx;
struct {
int calibrate;
int slotcnt;
int flags;
enum { tx_idle = 0, tx_keyup, tx_data, tx_tail } state;
unsigned char *bufptr;
int bufcnt;
unsigned char buf[TXBUFFER_SIZE];
} hdlctx;
unsigned int ptt_keyed;
struct sk_buff *skb; /* next transmit packet */
#ifdef BAYCOM_DEBUG
struct debug_vals {
unsigned long last_jiffies;
unsigned cur_intcnt;
unsigned last_intcnt;
int cur_pllcorr;
int last_pllcorr;
unsigned int mod_cycles;
unsigned int demod_cycles;
} debug_vals;
#endif /* BAYCOM_DEBUG */
};
/* --------------------------------------------------------------------- */
#define KISS_VERBOSE
/* --------------------------------------------------------------------- */
#define PARAM_TXDELAY 1
#define PARAM_PERSIST 2
#define PARAM_SLOTTIME 3
#define PARAM_TXTAIL 4
#define PARAM_FULLDUP 5
#define PARAM_HARDWARE 6
#define PARAM_RETURN 255
/* --------------------------------------------------------------------- */
/*
* the CRC routines are stolen from WAMPES
* by Dieter Deyke
*/
/*---------------------------------------------------------------------------*/
#if 0
static inline void append_crc_ccitt(unsigned char *buffer, int len)
{
unsigned int crc = 0xffff;
for (;len>0;len--)
crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buffer++) & 0xff];
crc ^= 0xffff;
*buffer++ = crc;
*buffer++ = crc >> 8;
}
#endif
/*---------------------------------------------------------------------------*/
static inline int check_crc_ccitt(const unsigned char *buf, int cnt)
{
return (crc_ccitt(0xffff, buf, cnt) & 0xffff) == 0xf0b8;
}
/*---------------------------------------------------------------------------*/
static inline int calc_crc_ccitt(const unsigned char *buf, int cnt)
{
return (crc_ccitt(0xffff, buf, cnt) ^ 0xffff) & 0xffff;
}
/* ---------------------------------------------------------------------- */
#define tenms_to_flags(bc,tenms) ((tenms * bc->bitrate) / 800)
/* --------------------------------------------------------------------- */
static inline void baycom_int_freq(struct baycom_state *bc)
{
#ifdef BAYCOM_DEBUG
unsigned long cur_jiffies = jiffies;
/*
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
bc->debug_vals.cur_pllcorr = 0;
}
#endif /* BAYCOM_DEBUG */
}
/* ---------------------------------------------------------------------- */
/*
* eppconfig_path should be setable via /proc/sys.
*/
static char const eppconfig_path[] = "/usr/sbin/eppfpga";
static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL };
/* eppconfig: called during ifconfig up to configure the modem */
static int eppconfig(struct baycom_state *bc)
{
char modearg[256];
char portarg[16];
char *argv[] = {
(char *)eppconfig_path,
"-s",
"-p", portarg,
"-m", modearg,
NULL };
/* set up arguments */
sprintf(modearg, "%sclk,%smodem,fclk=%d,bps=%d,divider=%d%s,extstat",
bc->cfg.intclk ? "int" : "ext",
bc->cfg.extmodem ? "ext" : "int", bc->cfg.fclk, bc->cfg.bps,
(bc->cfg.fclk + 8 * bc->cfg.bps) / (16 * bc->cfg.bps),
bc->cfg.loopback ? ",loopback" : "");
sprintf(portarg, "%ld", bc->pdev->port->base);
printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg);
return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC);
}
/* ---------------------------------------------------------------------- */
static inline void do_kiss_params(struct baycom_state *bc,
unsigned char *data, unsigned long len)
{
#ifdef KISS_VERBOSE
#define PKP(a,b) printk(KERN_INFO "baycomm_epp: channel params: " a "\n", b)
#else /* KISS_VERBOSE */
#define PKP(a,b)
#endif /* KISS_VERBOSE */
if (len < 2)
return;
switch(data[0]) {
case PARAM_TXDELAY:
bc->ch_params.tx_delay = data[1];
PKP("TX delay = %ums", 10 * bc->ch_params.tx_delay);
break;
case PARAM_PERSIST:
bc->ch_params.ppersist = data[1];
PKP("p persistence = %u", bc->ch_params.ppersist);
break;
case PARAM_SLOTTIME:
bc->ch_params.slottime = data[1];
PKP("slot time = %ums", bc->ch_params.slottime);
break;
case PARAM_TXTAIL:
bc->ch_params.tx_tail = data[1];
PKP("TX tail = %ums", bc->ch_params.tx_tail);
break;
case PARAM_FULLDUP:
bc->ch_params.fulldup = !!data[1];
PKP("%s duplex", bc->ch_params.fulldup ? "full" : "half");
break;
default:
break;
}
#undef PKP
}
/* --------------------------------------------------------------------- */
static void encode_hdlc(struct baycom_state *bc)
{
struct sk_buff *skb;
unsigned char *wp, *bp;
int pkt_len;
unsigned bitstream, notbitstream, bitbuf, numbit, crc;
unsigned char crcarr[2];
int j;
if (bc->hdlctx.bufcnt > 0)
return;
skb = bc->skb;
if (!skb)
return;
bc->skb = NULL;
pkt_len = skb->len-1; /* strip KISS byte */
wp = bc->hdlctx.buf;
bp = skb->data+1;
crc = calc_crc_ccitt(bp, pkt_len);
crcarr[0] = crc;
crcarr[1] = crc >> 8;
*wp++ = 0x7e;
bitstream = bitbuf = numbit = 0;
while (pkt_len > -2) {
bitstream >>= 8;
bitstream |= ((unsigned int)*bp) << 8;
bitbuf |= ((unsigned int)*bp) << numbit;
notbitstream = ~bitstream;
bp++;
pkt_len--;
if (!pkt_len)
bp = crcarr;
for (j = 0; j < 8; j++)
if (unlikely(!(notbitstream & (0x1f0 << j)))) {
bitstream &= ~(0x100 << j);
bitbuf = (bitbuf & (((2 << j) << numbit) - 1)) |
((bitbuf & ~(((2 << j) << numbit) - 1)) << 1);
numbit++;
notbitstream = ~bitstream;
}
numbit += 8;
while (numbit >= 8) {
*wp++ = bitbuf;
bitbuf >>= 8;
numbit -= 8;
}
}
bitbuf |= 0x7e7e << numbit;
numbit += 16;
while (numbit >= 8) {
*wp++ = bitbuf;
bitbuf >>= 8;
numbit -= 8;
}
bc->hdlctx.bufptr = bc->hdlctx.buf;
bc->hdlctx.bufcnt = wp - bc->hdlctx.buf;
dev_kfree_skb(skb);
bc->dev->stats.tx_packets++;
}
/* ---------------------------------------------------------------------- */
static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
{
struct parport *pp = bc->pdev->port;
unsigned char tmp[128];
int i, j;
if (bc->hdlctx.state == tx_tail && !(stat & EPP_PTTBIT))
bc->hdlctx.state = tx_idle;
if (bc->hdlctx.state == tx_idle && bc->hdlctx.calibrate <= 0) {
if (bc->hdlctx.bufcnt <= 0)
encode_hdlc(bc);
if (bc->hdlctx.bufcnt <= 0)
return 0;
if (!bc->ch_params.fulldup) {
if (!(stat & EPP_DCDBIT)) {
bc->hdlctx.slotcnt = bc->ch_params.slottime;
return 0;
}
if ((--bc->hdlctx.slotcnt) > 0)
return 0;
bc->hdlctx.slotcnt = bc->ch_params.slottime;
if (get_random_u8() > bc->ch_params.ppersist)
return 0;
}
}
if (bc->hdlctx.state == tx_idle && bc->hdlctx.bufcnt > 0) {
bc->hdlctx.state = tx_keyup;
bc->hdlctx.flags = tenms_to_flags(bc, bc->ch_params.tx_delay);
bc->ptt_keyed++;
}
while (cnt > 0) {
switch (bc->hdlctx.state) {
case tx_keyup:
i = min_t(int, cnt, bc->hdlctx.flags);
cnt -= i;
bc->hdlctx.flags -= i;
if (bc->hdlctx.flags <= 0)
bc->hdlctx.state = tx_data;
memset(tmp, 0x7e, sizeof(tmp));
while (i > 0) {
j = (i > sizeof(tmp)) ? sizeof(tmp) : i;
if (j != pp->ops->epp_write_data(pp, tmp, j, 0))
return -1;
i -= j;
}
break;
case tx_data:
if (bc->hdlctx.bufcnt <= 0) {
encode_hdlc(bc);
if (bc->hdlctx.bufcnt <= 0) {
bc->hdlctx.state = tx_tail;
bc->hdlctx.flags = tenms_to_flags(bc, bc->ch_params.tx_tail);
break;
}
}
i = min_t(int, cnt, bc->hdlctx.bufcnt);
bc->hdlctx.bufcnt -= i;
cnt -= i;
if (i != pp->ops->epp_write_data(pp, bc->hdlctx.bufptr, i, 0))
return -1;
bc->hdlctx.bufptr += i;
break;
case tx_tail:
encode_hdlc(bc);
if (bc->hdlctx.bufcnt > 0) {
bc->hdlctx.state = tx_data;
break;
}
i = min_t(int, cnt, bc->hdlctx.flags);
if (i) {
cnt -= i;
bc->hdlctx.flags -= i;
memset(tmp, 0x7e, sizeof(tmp));
while (i > 0) {
j = (i > sizeof(tmp)) ? sizeof(tmp) : i;
if (j != pp->ops->epp_write_data(pp, tmp, j, 0))
return -1;
i -= j;
}
break;
}
fallthrough;
default:
if (bc->hdlctx.calibrate <= 0)
return 0;
i = min_t(int, cnt, bc->hdlctx.calibrate);
cnt -= i;
bc->hdlctx.calibrate -= i;
memset(tmp, 0, sizeof(tmp));
while (i > 0) {
j = (i > sizeof(tmp)) ? sizeof(tmp) : i;
if (j != pp->ops->epp_write_data(pp, tmp, j, 0))
return -1;
i -= j;
}
break;
}
}
return 0;
}
/* ---------------------------------------------------------------------- */
static void do_rxpacket(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
struct sk_buff *skb;
unsigned char *cp;
unsigned pktlen;
if (bc->hdlcrx.bufcnt < 4)
return;
if (!check_crc_ccitt(bc->hdlcrx.buf, bc->hdlcrx.bufcnt))
return;
pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */
if (!(skb = dev_alloc_skb(pktlen))) {
printk("%s: memory squeeze, dropping packet\n", dev->name);
dev->stats.rx_dropped++;
return;
}
cp = skb_put(skb, pktlen);
*cp++ = 0; /* KISS kludge */
memcpy(cp, bc->hdlcrx.buf, pktlen - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
}
static int receive(struct net_device *dev, int cnt)
{
struct baycom_state *bc = netdev_priv(dev);
struct parport *pp = bc->pdev->port;
unsigned int bitbuf, notbitstream, bitstream, numbits, state;
unsigned char tmp[128];
unsigned char *cp;
int cnt2, ret = 0;
int j;
numbits = bc->hdlcrx.numbits;
state = bc->hdlcrx.state;
bitstream = bc->hdlcrx.bitstream;
bitbuf = bc->hdlcrx.bitbuf;
while (cnt > 0) {
cnt2 = (cnt > sizeof(tmp)) ? sizeof(tmp) : cnt;
cnt -= cnt2;
if (cnt2 != pp->ops->epp_read_data(pp, tmp, cnt2, 0)) {
ret = -1;
break;
}
cp = tmp;
for (; cnt2 > 0; cnt2--, cp++) {
bitstream >>= 8;
bitstream |= (*cp) << 8;
bitbuf >>= 8;
bitbuf |= (*cp) << 8;
numbits += 8;
notbitstream = ~bitstream;
for (j = 0; j < 8; j++) {
/* flag or abort */
if (unlikely(!(notbitstream & (0x0fc << j)))) {
/* abort received */
if (!(notbitstream & (0x1fc << j)))
state = 0;
/* flag received */
else if ((bitstream & (0x1fe << j)) == (0x0fc << j)) {
if (state)
do_rxpacket(dev);
bc->hdlcrx.bufcnt = 0;
bc->hdlcrx.bufptr = bc->hdlcrx.buf;
state = 1;
numbits = 7-j;
}
}
/* stuffed bit */
else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) {
numbits--;
bitbuf = (bitbuf & ((~0xff) << j)) | ((bitbuf & ~((~0xff) << j)) << 1);
}
}
while (state && numbits >= 8) {
if (bc->hdlcrx.bufcnt >= TXBUFFER_SIZE) {
state = 0;
} else {
*(bc->hdlcrx.bufptr)++ = bitbuf >> (16-numbits);
bc->hdlcrx.bufcnt++;
numbits -= 8;
}
}
}
}
bc->hdlcrx.numbits = numbits;
bc->hdlcrx.state = state;
bc->hdlcrx.bitstream = bitstream;
bc->hdlcrx.bitbuf = bitbuf;
return ret;
}
/* --------------------------------------------------------------------- */
#define GETTICK(x) \
({ \
x = (unsigned int)get_cycles(); \
})
static void epp_bh(struct work_struct *work)
{
struct net_device *dev;
struct baycom_state *bc;
struct parport *pp;
unsigned char stat;
unsigned char tmp[2];
unsigned int time1 = 0, time2 = 0, time3 = 0;
int cnt, cnt2;
bc = container_of(work, struct baycom_state, run_work.work);
dev = bc->dev;
if (!bc->work_running)
return;
baycom_int_freq(bc);
pp = bc->pdev->port;
/* update status */
if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
goto epptimeout;
bc->stat = stat;
bc->debug_vals.last_pllcorr = stat;
GETTICK(time1);
if (bc->modem == EPP_FPGAEXTSTATUS) {
/* get input count */
tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE|1;
if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
goto epptimeout;
if (pp->ops->epp_read_addr(pp, tmp, 2, 0) != 2)
goto epptimeout;
cnt = tmp[0] | (tmp[1] << 8);
cnt &= 0x7fff;
/* get output count */
tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE|2;
if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
goto epptimeout;
if (pp->ops->epp_read_addr(pp, tmp, 2, 0) != 2)
goto epptimeout;
cnt2 = tmp[0] | (tmp[1] << 8);
cnt2 = 16384 - (cnt2 & 0x7fff);
/* return to normal */
tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE;
if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
goto epptimeout;
if (transmit(bc, cnt2, stat))
goto epptimeout;
GETTICK(time2);
if (receive(dev, cnt))
goto epptimeout;
if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
goto epptimeout;
bc->stat = stat;
} else {
/* try to tx */
switch (stat & (EPP_NTAEF|EPP_NTHF)) {
case EPP_NTHF:
cnt = 2048 - 256;
break;
case EPP_NTAEF:
cnt = 2048 - 1793;
break;
case 0:
cnt = 0;
break;
default:
cnt = 2048 - 1025;
break;
}
if (transmit(bc, cnt, stat))
goto epptimeout;
GETTICK(time2);
/* do receiver */
while ((stat & (EPP_NRAEF|EPP_NRHF)) != EPP_NRHF) {
switch (stat & (EPP_NRAEF|EPP_NRHF)) {
case EPP_NRAEF:
cnt = 1025;
break;
case 0:
cnt = 1793;
break;
default:
cnt = 256;
break;
}
if (receive(dev, cnt))
goto epptimeout;
if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
goto epptimeout;
}
cnt = 0;
if (bc->bitrate < 50000)
cnt = 256;
else if (bc->bitrate < 100000)
cnt = 128;
while (cnt > 0 && stat & EPP_NREF) {
if (receive(dev, 1))
goto epptimeout;
cnt--;
if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
goto epptimeout;
}
}
GETTICK(time3);
#ifdef BAYCOM_DEBUG
bc->debug_vals.mod_cycles = time2 - time1;
bc->debug_vals.demod_cycles = time3 - time2;
#endif /* BAYCOM_DEBUG */
schedule_delayed_work(&bc->run_work, 1);
if (!bc->skb)
netif_wake_queue(dev);
return;
epptimeout:
printk(KERN_ERR "%s: EPP timeout!\n", bc_drvname);
}
/* ---------------------------------------------------------------------- */
/*
* ===================== network driver interface =========================
*/
static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
if (skb->data[0] != 0) {
do_kiss_params(bc, skb->data, skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (bc->skb) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* strip KISS byte */
if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
netif_stop_queue(dev);
bc->skb = skb;
return NETDEV_TX_OK;
}
/* --------------------------------------------------------------------- */
static int baycom_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
dev_addr_set(dev, sa->sa_data);
return 0;
}
/* --------------------------------------------------------------------- */
static void epp_wakeup(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
struct baycom_state *bc = netdev_priv(dev);
printk(KERN_DEBUG "baycom_epp: %s: why am I being woken up?\n", dev->name);
if (!parport_claim(bc->pdev))
printk(KERN_DEBUG "baycom_epp: %s: I'm broken.\n", dev->name);
}
/* --------------------------------------------------------------------- */
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int epp_open(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
struct parport *pp = parport_find_base(dev->base_addr);
unsigned int i, j;
unsigned char tmp[128];
unsigned char stat;
unsigned long tstart;
struct pardev_cb par_cb;
if (!pp) {
printk(KERN_ERR "%s: parport at 0x%lx unknown\n", bc_drvname, dev->base_addr);
return -ENXIO;
}
#if 0
if (pp->irq < 0) {
printk(KERN_ERR "%s: parport at 0x%lx has no irq\n", bc_drvname, pp->base);
parport_put_port(pp);
return -ENXIO;
}
#endif
if ((~pp->modes) & (PARPORT_MODE_TRISTATE | PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) {
printk(KERN_ERR "%s: parport at 0x%lx cannot be used\n",
bc_drvname, pp->base);
parport_put_port(pp);
return -EIO;
}
memset(&bc->modem, 0, sizeof(bc->modem));
memset(&par_cb, 0, sizeof(par_cb));
par_cb.wakeup = epp_wakeup;
par_cb.private = (void *)dev;
par_cb.flags = PARPORT_DEV_EXCL;
for (i = 0; i < NR_PORTS; i++)
if (baycom_device[i] == dev)
break;
if (i == NR_PORTS) {
pr_err("%s: no device found\n", bc_drvname);
parport_put_port(pp);
return -ENODEV;
}
bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i);
parport_put_port(pp);
if (!bc->pdev) {
printk(KERN_ERR "%s: cannot register parport at 0x%lx\n", bc_drvname, pp->base);
return -ENXIO;
}
if (parport_claim(bc->pdev)) {
printk(KERN_ERR "%s: parport at 0x%lx busy\n", bc_drvname, pp->base);
parport_unregister_device(bc->pdev);
return -EBUSY;
}
dev->irq = /*pp->irq*/ 0;
INIT_DELAYED_WORK(&bc->run_work, epp_bh);
bc->work_running = 1;
bc->modem = EPP_CONVENTIONAL;
if (eppconfig(bc))
printk(KERN_INFO "%s: no FPGA detected, assuming conventional EPP modem\n", bc_drvname);
else
bc->modem = /*EPP_FPGA*/ EPP_FPGAEXTSTATUS;
parport_write_control(pp, LPTCTRL_PROGRAM); /* prepare EPP mode; we aren't using interrupts */
/* reset the modem */
tmp[0] = 0;
tmp[1] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE;
if (pp->ops->epp_write_addr(pp, tmp, 2, 0) != 2)
goto epptimeout;
/* autoprobe baud rate */
tstart = jiffies;
i = 0;
while (time_before(jiffies, tstart + HZ/3)) {
if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
goto epptimeout;
if ((stat & (EPP_NRAEF|EPP_NRHF)) == EPP_NRHF) {
schedule();
continue;
}
if (pp->ops->epp_read_data(pp, tmp, 128, 0) != 128)
goto epptimeout;
if (pp->ops->epp_read_data(pp, tmp, 128, 0) != 128)
goto epptimeout;
i += 256;
}
for (j = 0; j < 256; j++) {
if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1)
goto epptimeout;
if (!(stat & EPP_NREF))
break;
if (pp->ops->epp_read_data(pp, tmp, 1, 0) != 1)
goto epptimeout;
i++;
}
tstart = jiffies - tstart;
bc->bitrate = i * (8 * HZ) / tstart;
j = 1;
i = bc->bitrate >> 3;
while (j < 7 && i > 150) {
j++;
i >>= 1;
}
printk(KERN_INFO "%s: autoprobed bitrate: %d int divider: %d int rate: %d\n",
bc_drvname, bc->bitrate, j, bc->bitrate >> (j+2));
tmp[0] = EPP_TX_FIFO_ENABLE|EPP_RX_FIFO_ENABLE|EPP_MODEM_ENABLE/*|j*/;
if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1)
goto epptimeout;
/*
* initialise hdlc variables
*/
bc->hdlcrx.state = 0;
bc->hdlcrx.numbits = 0;
bc->hdlctx.state = tx_idle;
bc->hdlctx.bufcnt = 0;
bc->hdlctx.slotcnt = bc->ch_params.slottime;
bc->hdlctx.calibrate = 0;
/* start the bottom half stuff */
schedule_delayed_work(&bc->run_work, 1);
netif_start_queue(dev);
return 0;
epptimeout:
printk(KERN_ERR "%s: epp timeout during bitrate probe\n", bc_drvname);
parport_write_control(pp, 0); /* reset the adapter */
parport_release(bc->pdev);
parport_unregister_device(bc->pdev);
return -EIO;
}
/* --------------------------------------------------------------------- */
static int epp_close(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
struct parport *pp = bc->pdev->port;
unsigned char tmp[1];
bc->work_running = 0;
cancel_delayed_work_sync(&bc->run_work);
bc->stat = EPP_DCDBIT;
tmp[0] = 0;
pp->ops->epp_write_addr(pp, tmp, 1, 0);
parport_write_control(pp, 0); /* reset the adapter */
parport_release(bc->pdev);
parport_unregister_device(bc->pdev);
dev_kfree_skb(bc->skb);
bc->skb = NULL;
printk(KERN_INFO "%s: close epp at iobase 0x%lx irq %u\n",
bc_drvname, dev->base_addr, dev->irq);
return 0;
}
/* --------------------------------------------------------------------- */
static int baycom_setmode(struct baycom_state *bc, const char *modestr)
{
const char *cp;
if (strstr(modestr,"intclk"))
bc->cfg.intclk = 1;
if (strstr(modestr,"extclk"))
bc->cfg.intclk = 0;
if (strstr(modestr,"intmodem"))
bc->cfg.extmodem = 0;
if (strstr(modestr,"extmodem"))
bc->cfg.extmodem = 1;
if (strstr(modestr,"loopback"))
bc->cfg.loopback = 1;
if (strstr(modestr, "noloopback"))
bc->cfg.loopback = 0;
if ((cp = strstr(modestr,"fclk="))) {
bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0);
if (bc->cfg.fclk < 1000000)
bc->cfg.fclk = 1000000;
if (bc->cfg.fclk > 25000000)
bc->cfg.fclk = 25000000;
}
if ((cp = strstr(modestr,"bps="))) {
bc->cfg.bps = simple_strtoul(cp+4, NULL, 0);
if (bc->cfg.bps < 1000)
bc->cfg.bps = 1000;
if (bc->cfg.bps > 1500000)
bc->cfg.bps = 1500000;
}
return 0;
}
/* --------------------------------------------------------------------- */
static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct baycom_state *bc = netdev_priv(dev);
struct hdlcdrv_ioctl hi;
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
if (copy_from_user(&hi, data, sizeof(hi)))
return -EFAULT;
switch (hi.cmd) {
default:
return -ENOIOCTLCMD;
case HDLCDRVCTL_GETCHANNELPAR:
hi.data.cp.tx_delay = bc->ch_params.tx_delay;
hi.data.cp.tx_tail = bc->ch_params.tx_tail;
hi.data.cp.slottime = bc->ch_params.slottime;
hi.data.cp.ppersist = bc->ch_params.ppersist;
hi.data.cp.fulldup = bc->ch_params.fulldup;
break;
case HDLCDRVCTL_SETCHANNELPAR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
bc->ch_params.tx_delay = hi.data.cp.tx_delay;
bc->ch_params.tx_tail = hi.data.cp.tx_tail;
bc->ch_params.slottime = hi.data.cp.slottime;
bc->ch_params.ppersist = hi.data.cp.ppersist;
bc->ch_params.fulldup = hi.data.cp.fulldup;
bc->hdlctx.slotcnt = 1;
return 0;
case HDLCDRVCTL_GETMODEMPAR:
hi.data.mp.iobase = dev->base_addr;
hi.data.mp.irq = dev->irq;
hi.data.mp.dma = dev->dma;
hi.data.mp.dma2 = 0;
hi.data.mp.seriobase = 0;
hi.data.mp.pariobase = 0;
hi.data.mp.midiiobase = 0;
break;
case HDLCDRVCTL_SETMODEMPAR:
if ((!capable(CAP_SYS_RAWIO)) || netif_running(dev))
return -EACCES;
dev->base_addr = hi.data.mp.iobase;
dev->irq = /*hi.data.mp.irq*/0;
dev->dma = /*hi.data.mp.dma*/0;
return 0;
case HDLCDRVCTL_GETSTAT:
hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT);
hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT);
hi.data.cs.ptt_keyed = bc->ptt_keyed;
hi.data.cs.tx_packets = dev->stats.tx_packets;
hi.data.cs.tx_errors = dev->stats.tx_errors;
hi.data.cs.rx_packets = dev->stats.rx_packets;
hi.data.cs.rx_errors = dev->stats.rx_errors;
break;
case HDLCDRVCTL_OLDGETSTAT:
hi.data.ocs.ptt = !!(bc->stat & EPP_PTTBIT);
hi.data.ocs.dcd = !(bc->stat & EPP_DCDBIT);
hi.data.ocs.ptt_keyed = bc->ptt_keyed;
break;
case HDLCDRVCTL_CALIBRATE:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
bc->hdlctx.calibrate = hi.data.calibrate * bc->bitrate / 8;
return 0;
case HDLCDRVCTL_DRIVERNAME:
strncpy(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername));
break;
case HDLCDRVCTL_GETMODE:
sprintf(hi.data.modename, "%sclk,%smodem,fclk=%d,bps=%d%s",
bc->cfg.intclk ? "int" : "ext",
bc->cfg.extmodem ? "ext" : "int", bc->cfg.fclk, bc->cfg.bps,
bc->cfg.loopback ? ",loopback" : "");
break;
case HDLCDRVCTL_SETMODE:
if (!capable(CAP_NET_ADMIN) || netif_running(dev))
return -EACCES;
hi.data.modename[sizeof(hi.data.modename)-1] = '\0';
return baycom_setmode(bc, hi.data.modename);
case HDLCDRVCTL_MODELIST:
strncpy(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x",
sizeof(hi.data.modename));
break;
case HDLCDRVCTL_MODEMPARMASK:
return HDLCDRV_PARMASK_IOBASE;
}
if (copy_to_user(data, &hi, sizeof(hi)))
return -EFAULT;
return 0;
}
/* --------------------------------------------------------------------- */
static const struct net_device_ops baycom_netdev_ops = {
.ndo_open = epp_open,
.ndo_stop = epp_close,
.ndo_siocdevprivate = baycom_siocdevprivate,
.ndo_start_xmit = baycom_send_packet,
.ndo_set_mac_address = baycom_set_mac_address,
};
/*
* Check for a network adaptor of this type, and return '0' if one exists.
* If dev->base_addr == 0, probe all likely locations.
* If dev->base_addr == 1, always return failure.
* If dev->base_addr == 2, allocate space for the device and return success
* (detachable devices only).
*/
static void baycom_probe(struct net_device *dev)
{
const struct hdlcdrv_channel_params dflt_ch_params = {
20, 2, 10, 40, 0
};
struct baycom_state *bc;
/*
* not a real probe! only initialize data structures
*/
bc = netdev_priv(dev);
/*
* initialize the baycom_state struct
*/
bc->ch_params = dflt_ch_params;
bc->ptt_keyed = 0;
/*
* initialize the device struct
*/
/* Fill in the fields of the device structure */
bc->skb = NULL;
dev->netdev_ops = &baycom_netdev_ops;
dev->header_ops = &ax25_header_ops;
dev->type = ARPHRD_AX25; /* AF_AX25 device */
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&null_ax25_address);
dev->tx_queue_len = 16;
/* New style flags */
dev->flags = 0;
}
/* --------------------------------------------------------------------- */
/*
* command line settable parameters
*/
static char *mode[NR_PORTS] = { "", };
static int iobase[NR_PORTS] = { 0x378, };
module_param_array(mode, charp, NULL, 0);
MODULE_PARM_DESC(mode, "baycom operating mode");
module_param_hw_array(iobase, int, ioport, NULL, 0);
MODULE_PARM_DESC(iobase, "baycom io base address");
MODULE_AUTHOR("Thomas M. Sailer, [email protected], [email protected]");
MODULE_DESCRIPTION("Baycom epp amateur radio modem driver");
MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
static int baycom_epp_par_probe(struct pardevice *par_dev)
{
struct device_driver *drv = par_dev->dev.driver;
int len = strlen(drv->name);
if (strncmp(par_dev->name, drv->name, len))
return -ENODEV;
return 0;
}
static struct parport_driver baycom_epp_par_driver = {
.name = "bce",
.probe = baycom_epp_par_probe,
.devmodel = true,
};
static void __init baycom_epp_dev_setup(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
/*
* initialize part of the baycom_state struct
*/
bc->dev = dev;
bc->magic = BAYCOM_MAGIC;
bc->cfg.fclk = 19666600;
bc->cfg.bps = 9600;
/*
* initialize part of the device struct
*/
baycom_probe(dev);
}
static int __init init_baycomepp(void)
{
int i, found = 0, ret;
char set_hw = 1;
printk(bc_drvinfo);
ret = parport_register_driver(&baycom_epp_par_driver);
if (ret)
return ret;
/*
* register net devices
*/
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev;
dev = alloc_netdev(sizeof(struct baycom_state), "bce%d",
NET_NAME_UNKNOWN, baycom_epp_dev_setup);
if (!dev) {
printk(KERN_WARNING "bce%d : out of memory\n", i);
return found ? 0 : -ENOMEM;
}
sprintf(dev->name, "bce%d", i);
dev->base_addr = iobase[i];
if (!mode[i])
set_hw = 0;
if (!set_hw)
iobase[i] = 0;
if (register_netdev(dev)) {
printk(KERN_WARNING "%s: cannot register net device %s\n", bc_drvname, dev->name);
free_netdev(dev);
break;
}
if (set_hw && baycom_setmode(netdev_priv(dev), mode[i]))
set_hw = 0;
baycom_device[i] = dev;
found++;
}
if (found == 0) {
parport_unregister_driver(&baycom_epp_par_driver);
return -ENXIO;
}
return 0;
}
static void __exit cleanup_baycomepp(void)
{
int i;
for(i = 0; i < NR_PORTS; i++) {
struct net_device *dev = baycom_device[i];
if (dev) {
struct baycom_state *bc = netdev_priv(dev);
if (bc->magic == BAYCOM_MAGIC) {
unregister_netdev(dev);
free_netdev(dev);
} else
printk(paranoia_str, "cleanup_module");
}
}
parport_unregister_driver(&baycom_epp_par_driver);
}
module_init(init_baycomepp);
module_exit(cleanup_baycomepp);
/* --------------------------------------------------------------------- */
#ifndef MODULE
/*
* format: baycom_epp=io,mode
* mode: fpga config options
*/
static int __init baycom_epp_setup(char *str)
{
static unsigned __initdata nr_dev = 0;
int ints[2];
if (nr_dev >= NR_PORTS)
return 0;
str = get_options(str, 2, ints);
if (ints[0] < 1)
return 0;
mode[nr_dev] = str;
iobase[nr_dev] = ints[1];
nr_dev++;
return 1;
}
__setup("baycom_epp=", baycom_epp_setup);
#endif /* MODULE */
/* --------------------------------------------------------------------- */
| linux-master | drivers/net/hamradio/baycom_epp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*****************************************************************************/
/*
* yam.c -- YAM radio modem driver.
*
* Copyright (C) 1998 Frederic Rible F1OAT ([email protected])
* Adapted from baycom.c driver written by Thomas Sailer ([email protected])
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
* History:
* 0.0 F1OAT 06.06.98 Begin of work with baycom.c source code V 0.3
* 0.1 F1OAT 07.06.98 Add timer polling routine for channel arbitration
* 0.2 F6FBB 08.06.98 Added delay after FPGA programming
* 0.3 F6FBB 29.07.98 Delayed PTT implementation for dupmode=2
* 0.4 F6FBB 30.07.98 Added TxTail, Slottime and Persistence
* 0.5 F6FBB 01.08.98 Shared IRQs, /proc/net and network statistics
* 0.6 F6FBB 25.08.98 Added 1200Bds format
* 0.7 F6FBB 12.09.98 Added to the kernel configuration
* 0.8 F6FBB 14.10.98 Fixed slottime/persistence timing bug
* OK1ZIA 2.09.01 Fixed "kfree_skb on hard IRQ"
* using dev_kfree_skb_any(). (important in 2.4 kernel)
*/
/*****************************************************************************/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/random.h>
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/ax25.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/yam.h>
/* --------------------------------------------------------------------- */
static const char yam_drvname[] = "yam";
static const char yam_drvinfo[] __initconst = KERN_INFO \
"YAM driver version 0.8 by F1OAT/F6FBB\n";
/* --------------------------------------------------------------------- */
#define FIRMWARE_9600 "yam/9600.bin"
#define FIRMWARE_1200 "yam/1200.bin"
#define YAM_9600 1
#define YAM_1200 2
#define NR_PORTS 4
#define YAM_MAGIC 0xF10A7654
/* Transmitter states */
#define TX_OFF 0
#define TX_HEAD 1
#define TX_DATA 2
#define TX_CRC1 3
#define TX_CRC2 4
#define TX_TAIL 5
#define YAM_MAX_FRAME 1024
#define DEFAULT_BITRATE 9600 /* bps */
#define DEFAULT_HOLDD 10 /* sec */
#define DEFAULT_TXD 300 /* ms */
#define DEFAULT_TXTAIL 10 /* ms */
#define DEFAULT_SLOT 100 /* ms */
#define DEFAULT_PERS 64 /* 0->255 */
struct yam_port {
int magic;
int bitrate;
int baudrate;
int iobase;
int irq;
int dupmode;
struct net_device *dev;
int nb_rxint;
int nb_mdint;
/* Parameters section */
int txd; /* tx delay */
int holdd; /* duplex ptt delay */
int txtail; /* txtail delay */
int slot; /* slottime */
int pers; /* persistence */
/* Tx section */
int tx_state;
int tx_count;
int slotcnt;
unsigned char tx_buf[YAM_MAX_FRAME];
int tx_len;
int tx_crcl, tx_crch;
struct sk_buff_head send_queue; /* Packets awaiting transmission */
/* Rx section */
int dcd;
unsigned char rx_buf[YAM_MAX_FRAME];
int rx_len;
int rx_crcl, rx_crch;
};
struct yam_mcs {
unsigned char bits[YAM_FPGA_SIZE];
int bitrate;
struct yam_mcs *next;
};
static struct net_device *yam_devs[NR_PORTS];
static struct yam_mcs *yam_data;
static DEFINE_TIMER(yam_timer, NULL);
/* --------------------------------------------------------------------- */
#define RBR(iobase) (iobase+0)
#define THR(iobase) (iobase+0)
#define IER(iobase) (iobase+1)
#define IIR(iobase) (iobase+2)
#define FCR(iobase) (iobase+2)
#define LCR(iobase) (iobase+3)
#define MCR(iobase) (iobase+4)
#define LSR(iobase) (iobase+5)
#define MSR(iobase) (iobase+6)
#define SCR(iobase) (iobase+7)
#define DLL(iobase) (iobase+0)
#define DLM(iobase) (iobase+1)
#define YAM_EXTENT 8
/* Interrupt Identification Register Bit Masks */
#define IIR_NOPEND 1
#define IIR_MSR 0
#define IIR_TX 2
#define IIR_RX 4
#define IIR_LSR 6
#define IIR_TIMEOUT 12 /* Fifo mode only */
#define IIR_MASK 0x0F
/* Interrupt Enable Register Bit Masks */
#define IER_RX 1 /* enable rx interrupt */
#define IER_TX 2 /* enable tx interrupt */
#define IER_LSR 4 /* enable line status interrupts */
#define IER_MSR 8 /* enable modem status interrupts */
/* Modem Control Register Bit Masks */
#define MCR_DTR 0x01 /* DTR output */
#define MCR_RTS 0x02 /* RTS output */
#define MCR_OUT1 0x04 /* OUT1 output (not accessible in RS232) */
#define MCR_OUT2 0x08 /* Master Interrupt enable (must be set on PCs) */
#define MCR_LOOP 0x10 /* Loopback enable */
/* Modem Status Register Bit Masks */
#define MSR_DCTS 0x01 /* Delta CTS input */
#define MSR_DDSR 0x02 /* Delta DSR */
#define MSR_DRIN 0x04 /* Delta RI */
#define MSR_DDCD 0x08 /* Delta DCD */
#define MSR_CTS 0x10 /* CTS input */
#define MSR_DSR 0x20 /* DSR input */
#define MSR_RING 0x40 /* RI input */
#define MSR_DCD 0x80 /* DCD input */
/* line status register bit mask */
#define LSR_RXC 0x01
#define LSR_OE 0x02
#define LSR_PE 0x04
#define LSR_FE 0x08
#define LSR_BREAK 0x10
#define LSR_THRE 0x20
#define LSR_TSRE 0x40
/* Line Control Register Bit Masks */
#define LCR_DLAB 0x80
#define LCR_BREAK 0x40
#define LCR_PZERO 0x28
#define LCR_PEVEN 0x18
#define LCR_PODD 0x08
#define LCR_STOP1 0x00
#define LCR_STOP2 0x04
#define LCR_BIT5 0x00
#define LCR_BIT6 0x02
#define LCR_BIT7 0x01
#define LCR_BIT8 0x03
/* YAM Modem <-> UART Port mapping */
#define TX_RDY MSR_DCTS /* transmitter ready to send */
#define RX_DCD MSR_DCD /* carrier detect */
#define RX_FLAG MSR_RING /* hdlc flag received */
#define FPGA_DONE MSR_DSR /* FPGA is configured */
#define PTT_ON (MCR_RTS|MCR_OUT2) /* activate PTT */
#define PTT_OFF (MCR_DTR|MCR_OUT2) /* release PTT */
#define ENABLE_RXINT IER_RX /* enable uart rx interrupt during rx */
#define ENABLE_TXINT IER_MSR /* enable uart ms interrupt during tx */
#define ENABLE_RTXINT (IER_RX|IER_MSR) /* full duplex operations */
/*************************************************************************
* CRC Tables
************************************************************************/
static const unsigned char chktabl[256] =
{0x00, 0x89, 0x12, 0x9b, 0x24, 0xad, 0x36, 0xbf, 0x48, 0xc1, 0x5a, 0xd3, 0x6c, 0xe5, 0x7e,
0xf7, 0x81, 0x08, 0x93, 0x1a, 0xa5, 0x2c, 0xb7, 0x3e, 0xc9, 0x40, 0xdb, 0x52, 0xed, 0x64,
0xff, 0x76, 0x02, 0x8b, 0x10, 0x99, 0x26, 0xaf, 0x34, 0xbd, 0x4a, 0xc3, 0x58, 0xd1, 0x6e,
0xe7, 0x7c, 0xf5, 0x83, 0x0a, 0x91, 0x18, 0xa7, 0x2e, 0xb5, 0x3c, 0xcb, 0x42, 0xd9, 0x50,
0xef, 0x66, 0xfd, 0x74, 0x04, 0x8d, 0x16, 0x9f, 0x20, 0xa9, 0x32, 0xbb, 0x4c, 0xc5, 0x5e,
0xd7, 0x68, 0xe1, 0x7a, 0xf3, 0x85, 0x0c, 0x97, 0x1e, 0xa1, 0x28, 0xb3, 0x3a, 0xcd, 0x44,
0xdf, 0x56, 0xe9, 0x60, 0xfb, 0x72, 0x06, 0x8f, 0x14, 0x9d, 0x22, 0xab, 0x30, 0xb9, 0x4e,
0xc7, 0x5c, 0xd5, 0x6a, 0xe3, 0x78, 0xf1, 0x87, 0x0e, 0x95, 0x1c, 0xa3, 0x2a, 0xb1, 0x38,
0xcf, 0x46, 0xdd, 0x54, 0xeb, 0x62, 0xf9, 0x70, 0x08, 0x81, 0x1a, 0x93, 0x2c, 0xa5, 0x3e,
0xb7, 0x40, 0xc9, 0x52, 0xdb, 0x64, 0xed, 0x76, 0xff, 0x89, 0x00, 0x9b, 0x12, 0xad, 0x24,
0xbf, 0x36, 0xc1, 0x48, 0xd3, 0x5a, 0xe5, 0x6c, 0xf7, 0x7e, 0x0a, 0x83, 0x18, 0x91, 0x2e,
0xa7, 0x3c, 0xb5, 0x42, 0xcb, 0x50, 0xd9, 0x66, 0xef, 0x74, 0xfd, 0x8b, 0x02, 0x99, 0x10,
0xaf, 0x26, 0xbd, 0x34, 0xc3, 0x4a, 0xd1, 0x58, 0xe7, 0x6e, 0xf5, 0x7c, 0x0c, 0x85, 0x1e,
0x97, 0x28, 0xa1, 0x3a, 0xb3, 0x44, 0xcd, 0x56, 0xdf, 0x60, 0xe9, 0x72, 0xfb, 0x8d, 0x04,
0x9f, 0x16, 0xa9, 0x20, 0xbb, 0x32, 0xc5, 0x4c, 0xd7, 0x5e, 0xe1, 0x68, 0xf3, 0x7a, 0x0e,
0x87, 0x1c, 0x95, 0x2a, 0xa3, 0x38, 0xb1, 0x46, 0xcf, 0x54, 0xdd, 0x62, 0xeb, 0x70, 0xf9,
0x8f, 0x06, 0x9d, 0x14, 0xab, 0x22, 0xb9, 0x30, 0xc7, 0x4e, 0xd5, 0x5c, 0xe3, 0x6a, 0xf1,
0x78};
static const unsigned char chktabh[256] =
{0x00, 0x11, 0x23, 0x32, 0x46, 0x57, 0x65, 0x74, 0x8c, 0x9d, 0xaf, 0xbe, 0xca, 0xdb, 0xe9,
0xf8, 0x10, 0x01, 0x33, 0x22, 0x56, 0x47, 0x75, 0x64, 0x9c, 0x8d, 0xbf, 0xae, 0xda, 0xcb,
0xf9, 0xe8, 0x21, 0x30, 0x02, 0x13, 0x67, 0x76, 0x44, 0x55, 0xad, 0xbc, 0x8e, 0x9f, 0xeb,
0xfa, 0xc8, 0xd9, 0x31, 0x20, 0x12, 0x03, 0x77, 0x66, 0x54, 0x45, 0xbd, 0xac, 0x9e, 0x8f,
0xfb, 0xea, 0xd8, 0xc9, 0x42, 0x53, 0x61, 0x70, 0x04, 0x15, 0x27, 0x36, 0xce, 0xdf, 0xed,
0xfc, 0x88, 0x99, 0xab, 0xba, 0x52, 0x43, 0x71, 0x60, 0x14, 0x05, 0x37, 0x26, 0xde, 0xcf,
0xfd, 0xec, 0x98, 0x89, 0xbb, 0xaa, 0x63, 0x72, 0x40, 0x51, 0x25, 0x34, 0x06, 0x17, 0xef,
0xfe, 0xcc, 0xdd, 0xa9, 0xb8, 0x8a, 0x9b, 0x73, 0x62, 0x50, 0x41, 0x35, 0x24, 0x16, 0x07,
0xff, 0xee, 0xdc, 0xcd, 0xb9, 0xa8, 0x9a, 0x8b, 0x84, 0x95, 0xa7, 0xb6, 0xc2, 0xd3, 0xe1,
0xf0, 0x08, 0x19, 0x2b, 0x3a, 0x4e, 0x5f, 0x6d, 0x7c, 0x94, 0x85, 0xb7, 0xa6, 0xd2, 0xc3,
0xf1, 0xe0, 0x18, 0x09, 0x3b, 0x2a, 0x5e, 0x4f, 0x7d, 0x6c, 0xa5, 0xb4, 0x86, 0x97, 0xe3,
0xf2, 0xc0, 0xd1, 0x29, 0x38, 0x0a, 0x1b, 0x6f, 0x7e, 0x4c, 0x5d, 0xb5, 0xa4, 0x96, 0x87,
0xf3, 0xe2, 0xd0, 0xc1, 0x39, 0x28, 0x1a, 0x0b, 0x7f, 0x6e, 0x5c, 0x4d, 0xc6, 0xd7, 0xe5,
0xf4, 0x80, 0x91, 0xa3, 0xb2, 0x4a, 0x5b, 0x69, 0x78, 0x0c, 0x1d, 0x2f, 0x3e, 0xd6, 0xc7,
0xf5, 0xe4, 0x90, 0x81, 0xb3, 0xa2, 0x5a, 0x4b, 0x79, 0x68, 0x1c, 0x0d, 0x3f, 0x2e, 0xe7,
0xf6, 0xc4, 0xd5, 0xa1, 0xb0, 0x82, 0x93, 0x6b, 0x7a, 0x48, 0x59, 0x2d, 0x3c, 0x0e, 0x1f,
0xf7, 0xe6, 0xd4, 0xc5, 0xb1, 0xa0, 0x92, 0x83, 0x7b, 0x6a, 0x58, 0x49, 0x3d, 0x2c, 0x1e,
0x0f};
/*************************************************************************
* FPGA functions
************************************************************************/
static void delay(int ms)
{
unsigned long timeout = jiffies + ((ms * HZ) / 1000);
while (time_before(jiffies, timeout))
cpu_relax();
}
/*
* reset FPGA
*/
static void fpga_reset(int iobase)
{
outb(0, IER(iobase));
outb(LCR_DLAB | LCR_BIT5, LCR(iobase));
outb(1, DLL(iobase));
outb(0, DLM(iobase));
outb(LCR_BIT5, LCR(iobase));
inb(LSR(iobase));
inb(MSR(iobase));
/* turn off FPGA supply voltage */
outb(MCR_OUT1 | MCR_OUT2, MCR(iobase));
delay(100);
/* turn on FPGA supply voltage again */
outb(MCR_DTR | MCR_RTS | MCR_OUT1 | MCR_OUT2, MCR(iobase));
delay(100);
}
/*
* send one byte to FPGA
*/
static int fpga_write(int iobase, unsigned char wrd)
{
unsigned char bit;
int k;
unsigned long timeout = jiffies + HZ / 10;
for (k = 0; k < 8; k++) {
bit = (wrd & 0x80) ? (MCR_RTS | MCR_DTR) : MCR_DTR;
outb(bit | MCR_OUT1 | MCR_OUT2, MCR(iobase));
wrd <<= 1;
outb(0xfc, THR(iobase));
while ((inb(LSR(iobase)) & LSR_TSRE) == 0)
if (time_after(jiffies, timeout))
return -1;
}
return 0;
}
/*
* predef should be 0 for loading user defined mcs
* predef should be YAM_1200 for loading predef 1200 mcs
* predef should be YAM_9600 for loading predef 9600 mcs
*/
static unsigned char *add_mcs(unsigned char *bits, int bitrate,
unsigned int predef)
{
const char *fw_name[2] = {FIRMWARE_9600, FIRMWARE_1200};
const struct firmware *fw;
struct platform_device *pdev;
struct yam_mcs *p;
int err;
switch (predef) {
case 0:
fw = NULL;
break;
case YAM_1200:
case YAM_9600:
predef--;
pdev = platform_device_register_simple("yam", 0, NULL, 0);
if (IS_ERR(pdev)) {
printk(KERN_ERR "yam: Failed to register firmware\n");
return NULL;
}
err = request_firmware(&fw, fw_name[predef], &pdev->dev);
platform_device_unregister(pdev);
if (err) {
printk(KERN_ERR "Failed to load firmware \"%s\"\n",
fw_name[predef]);
return NULL;
}
if (fw->size != YAM_FPGA_SIZE) {
printk(KERN_ERR "Bogus length %zu in firmware \"%s\"\n",
fw->size, fw_name[predef]);
release_firmware(fw);
return NULL;
}
bits = (unsigned char *)fw->data;
break;
default:
printk(KERN_ERR "yam: Invalid predef number %u\n", predef);
return NULL;
}
/* If it already exists, replace the bit data */
p = yam_data;
while (p) {
if (p->bitrate == bitrate) {
memcpy(p->bits, bits, YAM_FPGA_SIZE);
goto out;
}
p = p->next;
}
/* Allocate a new mcs */
if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
release_firmware(fw);
return NULL;
}
memcpy(p->bits, bits, YAM_FPGA_SIZE);
p->bitrate = bitrate;
p->next = yam_data;
yam_data = p;
out:
release_firmware(fw);
return p->bits;
}
static unsigned char *get_mcs(int bitrate)
{
struct yam_mcs *p;
p = yam_data;
while (p) {
if (p->bitrate == bitrate)
return p->bits;
p = p->next;
}
/* Load predefined mcs data */
switch (bitrate) {
case 1200:
/* setting predef as YAM_1200 for loading predef 1200 mcs */
return add_mcs(NULL, bitrate, YAM_1200);
default:
/* setting predef as YAM_9600 for loading predef 9600 mcs */
return add_mcs(NULL, bitrate, YAM_9600);
}
}
/*
* download bitstream to FPGA
* data is contained in bits[] array in yam1200.h resp. yam9600.h
*/
static int fpga_download(int iobase, int bitrate)
{
int i, rc;
unsigned char *pbits;
pbits = get_mcs(bitrate);
if (pbits == NULL)
return -1;
fpga_reset(iobase);
for (i = 0; i < YAM_FPGA_SIZE; i++) {
if (fpga_write(iobase, pbits[i])) {
printk(KERN_ERR "yam: error in write cycle\n");
return -1; /* write... */
}
}
fpga_write(iobase, 0xFF);
rc = inb(MSR(iobase)); /* check DONE signal */
/* Needed for some hardwares */
delay(50);
return (rc & MSR_DSR) ? 0 : -1;
}
/************************************************************************
* Serial port init
************************************************************************/
static void yam_set_uart(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
int divisor = 115200 / yp->baudrate;
outb(0, IER(dev->base_addr));
outb(LCR_DLAB | LCR_BIT8, LCR(dev->base_addr));
outb(divisor, DLL(dev->base_addr));
outb(0, DLM(dev->base_addr));
outb(LCR_BIT8, LCR(dev->base_addr));
outb(PTT_OFF, MCR(dev->base_addr));
outb(0x00, FCR(dev->base_addr));
/* Flush pending irq */
inb(RBR(dev->base_addr));
inb(MSR(dev->base_addr));
/* Enable rx irq */
outb(ENABLE_RTXINT, IER(dev->base_addr));
}
/* --------------------------------------------------------------------- */
enum uart {
c_uart_unknown, c_uart_8250,
c_uart_16450, c_uart_16550, c_uart_16550A
};
static const char *uart_str[] =
{"unknown", "8250", "16450", "16550", "16550A"};
static enum uart yam_check_uart(unsigned int iobase)
{
unsigned char b1, b2, b3;
enum uart u;
enum uart uart_tab[] =
{c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A};
b1 = inb(MCR(iobase));
outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
b2 = inb(MSR(iobase));
outb(0x1a, MCR(iobase));
b3 = inb(MSR(iobase)) & 0xf0;
outb(b1, MCR(iobase)); /* restore old values */
outb(b2, MSR(iobase));
if (b3 != 0x90)
return c_uart_unknown;
inb(RBR(iobase));
inb(RBR(iobase));
outb(0x01, FCR(iobase)); /* enable FIFOs */
u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
if (u == c_uart_16450) {
outb(0x5a, SCR(iobase));
b1 = inb(SCR(iobase));
outb(0xa5, SCR(iobase));
b2 = inb(SCR(iobase));
if ((b1 != 0x5a) || (b2 != 0xa5))
u = c_uart_8250;
}
return u;
}
/******************************************************************************
* Rx Section
******************************************************************************/
static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
{
if (yp->dcd && yp->rx_len >= 3 && yp->rx_len < YAM_MAX_FRAME) {
int pkt_len = yp->rx_len - 2 + 1; /* -CRC + kiss */
struct sk_buff *skb;
if ((yp->rx_crch & yp->rx_crcl) != 0xFF) {
/* Bad crc */
} else {
if (!(skb = dev_alloc_skb(pkt_len))) {
printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
++dev->stats.rx_dropped;
} else {
unsigned char *cp;
cp = skb_put(skb, pkt_len);
*cp++ = 0; /* KISS kludge */
memcpy(cp, yp->rx_buf, pkt_len - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
++dev->stats.rx_packets;
}
}
}
yp->rx_len = 0;
yp->rx_crcl = 0x21;
yp->rx_crch = 0xf3;
}
static inline void yam_rx_byte(struct net_device *dev, struct yam_port *yp, unsigned char rxb)
{
if (yp->rx_len < YAM_MAX_FRAME) {
unsigned char c = yp->rx_crcl;
yp->rx_crcl = (chktabl[c] ^ yp->rx_crch);
yp->rx_crch = (chktabh[c] ^ rxb);
yp->rx_buf[yp->rx_len++] = rxb;
}
}
/********************************************************************************
* TX Section
********************************************************************************/
static void ptt_on(struct net_device *dev)
{
outb(PTT_ON, MCR(dev->base_addr));
}
static void ptt_off(struct net_device *dev)
{
outb(PTT_OFF, MCR(dev->base_addr));
}
static netdev_tx_t yam_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
skb_queue_tail(&yp->send_queue, skb);
netif_trans_update(dev);
return NETDEV_TX_OK;
}
static void yam_start_tx(struct net_device *dev, struct yam_port *yp)
{
if ((yp->tx_state == TX_TAIL) || (yp->txd == 0))
yp->tx_count = 1;
else
yp->tx_count = (yp->bitrate * yp->txd) / 8000;
yp->tx_state = TX_HEAD;
ptt_on(dev);
}
static void yam_arbitrate(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
if (yp->magic != YAM_MAGIC || yp->tx_state != TX_OFF ||
skb_queue_empty(&yp->send_queue))
return;
/* tx_state is TX_OFF and there is data to send */
if (yp->dupmode) {
/* Full duplex mode, don't wait */
yam_start_tx(dev, yp);
return;
}
if (yp->dcd) {
/* DCD on, wait slotime ... */
yp->slotcnt = yp->slot / 10;
return;
}
/* Is slottime passed ? */
if ((--yp->slotcnt) > 0)
return;
yp->slotcnt = yp->slot / 10;
/* is random > persist ? */
if (get_random_u8() > yp->pers)
return;
yam_start_tx(dev, yp);
}
static void yam_dotimer(struct timer_list *unused)
{
int i;
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev && netif_running(dev))
yam_arbitrate(dev);
}
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
}
static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
{
struct sk_buff *skb;
unsigned char b, temp;
switch (yp->tx_state) {
case TX_OFF:
break;
case TX_HEAD:
if (--yp->tx_count <= 0) {
if (!(skb = skb_dequeue(&yp->send_queue))) {
ptt_off(dev);
yp->tx_state = TX_OFF;
break;
}
yp->tx_state = TX_DATA;
if (skb->data[0] != 0) {
/* do_kiss_params(s, skb->data, skb->len); */
dev_kfree_skb_any(skb);
break;
}
yp->tx_len = skb->len - 1; /* strip KISS byte */
if (yp->tx_len >= YAM_MAX_FRAME || yp->tx_len < 2) {
dev_kfree_skb_any(skb);
break;
}
skb_copy_from_linear_data_offset(skb, 1,
yp->tx_buf,
yp->tx_len);
dev_kfree_skb_any(skb);
yp->tx_count = 0;
yp->tx_crcl = 0x21;
yp->tx_crch = 0xf3;
yp->tx_state = TX_DATA;
}
break;
case TX_DATA:
b = yp->tx_buf[yp->tx_count++];
outb(b, THR(dev->base_addr));
temp = yp->tx_crcl;
yp->tx_crcl = chktabl[temp] ^ yp->tx_crch;
yp->tx_crch = chktabh[temp] ^ b;
if (yp->tx_count >= yp->tx_len) {
yp->tx_state = TX_CRC1;
}
break;
case TX_CRC1:
yp->tx_crch = chktabl[yp->tx_crcl] ^ yp->tx_crch;
yp->tx_crcl = chktabh[yp->tx_crcl] ^ chktabl[yp->tx_crch] ^ 0xff;
outb(yp->tx_crcl, THR(dev->base_addr));
yp->tx_state = TX_CRC2;
break;
case TX_CRC2:
outb(chktabh[yp->tx_crch] ^ 0xFF, THR(dev->base_addr));
if (skb_queue_empty(&yp->send_queue)) {
yp->tx_count = (yp->bitrate * yp->txtail) / 8000;
if (yp->dupmode == 2)
yp->tx_count += (yp->bitrate * yp->holdd) / 8;
if (yp->tx_count == 0)
yp->tx_count = 1;
yp->tx_state = TX_TAIL;
} else {
yp->tx_count = 1;
yp->tx_state = TX_HEAD;
}
++dev->stats.tx_packets;
break;
case TX_TAIL:
if (--yp->tx_count <= 0) {
yp->tx_state = TX_OFF;
ptt_off(dev);
}
break;
}
}
/***********************************************************************************
* ISR routine
************************************************************************************/
static irqreturn_t yam_interrupt(int irq, void *dev_id)
{
struct net_device *dev;
struct yam_port *yp;
unsigned char iir;
int counter = 100;
int i;
int handled = 0;
for (i = 0; i < NR_PORTS; i++) {
dev = yam_devs[i];
yp = netdev_priv(dev);
if (!netif_running(dev))
continue;
while ((iir = IIR_MASK & inb(IIR(dev->base_addr))) != IIR_NOPEND) {
unsigned char msr = inb(MSR(dev->base_addr));
unsigned char lsr = inb(LSR(dev->base_addr));
unsigned char rxb;
handled = 1;
if (lsr & LSR_OE)
++dev->stats.rx_fifo_errors;
yp->dcd = (msr & RX_DCD) ? 1 : 0;
if (--counter <= 0) {
printk(KERN_ERR "%s: too many irq iir=%d\n",
dev->name, iir);
goto out;
}
if (msr & TX_RDY) {
++yp->nb_mdint;
yam_tx_byte(dev, yp);
}
if (lsr & LSR_RXC) {
++yp->nb_rxint;
rxb = inb(RBR(dev->base_addr));
if (msr & RX_FLAG)
yam_rx_flag(dev, yp);
else
yam_rx_byte(dev, yp, rxb);
}
}
}
out:
return IRQ_RETVAL(handled);
}
#ifdef CONFIG_PROC_FS
static void *yam_seq_start(struct seq_file *seq, loff_t *pos)
{
return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
}
static void *yam_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
}
static void yam_seq_stop(struct seq_file *seq, void *v)
{
}
static int yam_seq_show(struct seq_file *seq, void *v)
{
struct net_device *dev = v;
const struct yam_port *yp = netdev_priv(dev);
seq_printf(seq, "Device %s\n", dev->name);
seq_printf(seq, " Up %d\n", netif_running(dev));
seq_printf(seq, " Speed %u\n", yp->bitrate);
seq_printf(seq, " IoBase 0x%x\n", yp->iobase);
seq_printf(seq, " BaudRate %u\n", yp->baudrate);
seq_printf(seq, " IRQ %u\n", yp->irq);
seq_printf(seq, " TxState %u\n", yp->tx_state);
seq_printf(seq, " Duplex %u\n", yp->dupmode);
seq_printf(seq, " HoldDly %u\n", yp->holdd);
seq_printf(seq, " TxDelay %u\n", yp->txd);
seq_printf(seq, " TxTail %u\n", yp->txtail);
seq_printf(seq, " SlotTime %u\n", yp->slot);
seq_printf(seq, " Persist %u\n", yp->pers);
seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets);
seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets);
seq_printf(seq, " TxInt %u\n", yp->nb_mdint);
seq_printf(seq, " RxInt %u\n", yp->nb_rxint);
seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors);
seq_printf(seq, "\n");
return 0;
}
static const struct seq_operations yam_seqops = {
.start = yam_seq_start,
.next = yam_seq_next,
.stop = yam_seq_stop,
.show = yam_seq_show,
};
#endif
/* --------------------------------------------------------------------- */
static int yam_open(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
enum uart u;
int i;
int ret=0;
printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq);
if (!yp->bitrate)
return -ENXIO;
if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT ||
dev->irq < 2 || dev->irq > 15) {
return -ENXIO;
}
if (!request_region(dev->base_addr, YAM_EXTENT, dev->name))
{
printk(KERN_ERR "%s: cannot 0x%lx busy\n", dev->name, dev->base_addr);
return -EACCES;
}
if ((u = yam_check_uart(dev->base_addr)) == c_uart_unknown) {
printk(KERN_ERR "%s: cannot find uart type\n", dev->name);
ret = -EIO;
goto out_release_base;
}
if (fpga_download(dev->base_addr, yp->bitrate)) {
printk(KERN_ERR "%s: cannot init FPGA\n", dev->name);
ret = -EIO;
goto out_release_base;
}
outb(0, IER(dev->base_addr));
if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
ret = -EBUSY;
goto out_release_base;
}
yam_set_uart(dev);
netif_start_queue(dev);
yp->slotcnt = yp->slot / 10;
/* Reset overruns for all ports - FPGA programming makes overruns */
for (i = 0; i < NR_PORTS; i++) {
struct net_device *yam_dev = yam_devs[i];
inb(LSR(yam_dev->base_addr));
yam_dev->stats.rx_fifo_errors = 0;
}
printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq,
uart_str[u]);
return 0;
out_release_base:
release_region(dev->base_addr, YAM_EXTENT);
return ret;
}
/* --------------------------------------------------------------------- */
static int yam_close(struct net_device *dev)
{
struct sk_buff *skb;
struct yam_port *yp = netdev_priv(dev);
if (!dev)
return -EINVAL;
/*
* disable interrupts
*/
outb(0, IER(dev->base_addr));
outb(1, MCR(dev->base_addr));
/* Remove IRQ handler if last */
free_irq(dev->irq,dev);
release_region(dev->base_addr, YAM_EXTENT);
netif_stop_queue(dev);
while ((skb = skb_dequeue(&yp->send_queue)))
dev_kfree_skb(skb);
printk(KERN_INFO "%s: close yam at iobase 0x%lx irq %u\n",
yam_drvname, dev->base_addr, dev->irq);
return 0;
}
/* --------------------------------------------------------------------- */
static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
if (copy_from_user(&ioctl_cmd, data, sizeof(int)))
return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd != SIOCDEVPRIVATE)
return -EINVAL;
switch (ioctl_cmd) {
case SIOCYAMRESERVED:
return -EINVAL; /* unused */
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
/* setting predef as 0 for loading userdefined mcs data */
add_mcs(ym->bits, ym->bitrate, 0);
kfree(ym);
break;
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&yi, data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
if (yi.cmd != SIOCYAMSCFG)
return -EINVAL;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
yp->iobase = yi.cfg.iobase;
dev->base_addr = yi.cfg.iobase;
}
if (yi.cfg.mask & YAM_IRQ) {
if (yi.cfg.irq > 15)
return -EINVAL;
yp->irq = yi.cfg.irq;
dev->irq = yi.cfg.irq;
}
if (yi.cfg.mask & YAM_BITRATE) {
if (yi.cfg.bitrate > YAM_MAXBITRATE)
return -EINVAL;
yp->bitrate = yi.cfg.bitrate;
}
if (yi.cfg.mask & YAM_BAUDRATE) {
if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
return -EINVAL;
yp->baudrate = yi.cfg.baudrate;
}
if (yi.cfg.mask & YAM_MODE) {
if (yi.cfg.mode > YAM_MAXMODE)
return -EINVAL;
yp->dupmode = yi.cfg.mode;
}
if (yi.cfg.mask & YAM_HOLDDLY) {
if (yi.cfg.holddly > YAM_MAXHOLDDLY)
return -EINVAL;
yp->holdd = yi.cfg.holddly;
}
if (yi.cfg.mask & YAM_TXDELAY) {
if (yi.cfg.txdelay > YAM_MAXTXDELAY)
return -EINVAL;
yp->txd = yi.cfg.txdelay;
}
if (yi.cfg.mask & YAM_TXTAIL) {
if (yi.cfg.txtail > YAM_MAXTXTAIL)
return -EINVAL;
yp->txtail = yi.cfg.txtail;
}
if (yi.cfg.mask & YAM_PERSIST) {
if (yi.cfg.persist > YAM_MAXPERSIST)
return -EINVAL;
yp->pers = yi.cfg.persist;
}
if (yi.cfg.mask & YAM_SLOTTIME) {
if (yi.cfg.slottime > YAM_MAXSLOTTIME)
return -EINVAL;
yp->slot = yi.cfg.slottime;
yp->slotcnt = yp->slot / 10;
}
break;
case SIOCYAMGCFG:
memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
yi.cfg.bitrate = yp->bitrate;
yi.cfg.baudrate = yp->baudrate;
yi.cfg.mode = yp->dupmode;
yi.cfg.txdelay = yp->txd;
yi.cfg.holddly = yp->holdd;
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
if (copy_to_user(data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
/* --------------------------------------------------------------------- */
static int yam_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
/* addr is an AX.25 shifted ASCII mac address */
dev_addr_set(dev, sa->sa_data);
return 0;
}
/* --------------------------------------------------------------------- */
static const struct net_device_ops yam_netdev_ops = {
.ndo_open = yam_open,
.ndo_stop = yam_close,
.ndo_start_xmit = yam_send_packet,
.ndo_siocdevprivate = yam_siocdevprivate,
.ndo_set_mac_address = yam_set_mac_address,
};
static void yam_setup(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
yp->magic = YAM_MAGIC;
yp->bitrate = DEFAULT_BITRATE;
yp->baudrate = DEFAULT_BITRATE * 2;
yp->iobase = 0;
yp->irq = 0;
yp->dupmode = 0;
yp->holdd = DEFAULT_HOLDD;
yp->txd = DEFAULT_TXD;
yp->txtail = DEFAULT_TXTAIL;
yp->slot = DEFAULT_SLOT;
yp->pers = DEFAULT_PERS;
yp->dev = dev;
dev->base_addr = yp->iobase;
dev->irq = yp->irq;
skb_queue_head_init(&yp->send_queue);
dev->netdev_ops = &yam_netdev_ops;
dev->header_ops = &ax25_header_ops;
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->mtu = AX25_MTU;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static int __init yam_init_driver(void)
{
struct net_device *dev;
int i, err;
char name[IFNAMSIZ];
printk(yam_drvinfo);
for (i = 0; i < NR_PORTS; i++) {
sprintf(name, "yam%d", i);
dev = alloc_netdev(sizeof(struct yam_port), name,
NET_NAME_UNKNOWN, yam_setup);
if (!dev) {
pr_err("yam: cannot allocate net device\n");
err = -ENOMEM;
goto error;
}
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name);
free_netdev(dev);
goto error;
}
yam_devs[i] = dev;
}
timer_setup(&yam_timer, yam_dotimer, 0);
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
proc_create_seq("yam", 0444, init_net.proc_net, &yam_seqops);
return 0;
error:
while (--i >= 0) {
unregister_netdev(yam_devs[i]);
free_netdev(yam_devs[i]);
}
return err;
}
/* --------------------------------------------------------------------- */
static void __exit yam_cleanup_driver(void)
{
struct yam_mcs *p;
int i;
del_timer_sync(&yam_timer);
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
while (yam_data) {
p = yam_data;
yam_data = yam_data->next;
kfree(p);
}
remove_proc_entry("yam", init_net.proc_net);
}
/* --------------------------------------------------------------------- */
MODULE_AUTHOR("Frederic Rible F1OAT [email protected]");
MODULE_DESCRIPTION("Yam amateur radio modem driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_1200);
MODULE_FIRMWARE(FIRMWARE_9600);
module_init(yam_init_driver);
module_exit(yam_cleanup_driver);
/* --------------------------------------------------------------------- */
| linux-master | drivers/net/hamradio/yam.c |
#define RCS_ID "$Id: scc.c,v 1.75 1998/11/04 15:15:01 jreuter Exp jreuter $"
#define VERSION "3.0"
/*
* Please use z8530drv-utils-3.0 with this version.
* ------------------
*
* You can find a subset of the documentation in
* Documentation/networking/device_drivers/hamradio/z8530drv.rst.
*/
/*
********************************************************************
* SCC.C - Linux driver for Z8530 based HDLC cards for AX.25 *
********************************************************************
********************************************************************
Copyright (c) 1993, 2000 Joerg Reuter DL1BKE
portions (c) 1993 Guido ten Dolle PE1NNZ
********************************************************************
The driver and the programs in the archive are UNDER CONSTRUCTION.
The code is likely to fail, and so your kernel could --- even
a whole network.
This driver is intended for Amateur Radio use. If you are running it
for commercial purposes, please drop me a note. I am nosy...
...BUT:
! You m u s t recognize the appropriate legislations of your country !
! before you connect a radio to the SCC board and start to transmit or !
! receive. The GPL allows you to use the d r i v e r, NOT the RADIO! !
For non-Amateur-Radio use please note that you might need a special
allowance/licence from the designer of the SCC Board and/or the
MODEM.
This program is free software; you can redistribute it and/or modify
it under the terms of the (modified) GNU General Public License
delivered with the Linux kernel source.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should find a copy of the GNU General Public License in
/usr/src/linux/COPYING;
********************************************************************
Incomplete history of z8530drv:
-------------------------------
1994-09-13 started to write the driver, rescued most of my own
code (and Hans Alblas' memory buffer pool concept) from
an earlier project "sccdrv" which was initiated by
Guido ten Dolle. Not much of the old driver survived,
though. The first version I put my hands on was sccdrv1.3
from August 1993. The memory buffer pool concept
appeared in an unauthorized sccdrv version (1.5) from
August 1994.
1995-01-31 changed copyright notice to GPL without limitations.
.
. <SNIP>
.
1996-10-05 New semester, new driver...
* KISS TNC emulator removed (TTY driver)
* Source moved to drivers/net/
* Includes Z8530 defines from drivers/net/z8530.h
* Uses sk_buffer memory management
* Reduced overhead of /proc/net/z8530drv output
* Streamlined quite a lot things
* Invents brand new bugs... ;-)
The move to version number 3.0 reflects theses changes.
You can use 'kissbridge' if you need a KISS TNC emulator.
1996-12-13 Fixed for Linux networking changes. (G4KLX)
1997-01-08 Fixed the remaining problems.
1997-04-02 Hopefully fixed the problems with the new *_timer()
routines, added calibration code.
1997-10-12 Made SCC_DELAY a CONFIG option, added CONFIG_SCC_TRXECHO
1998-01-29 Small fix to avoid lock-up on initialization
1998-09-29 Fixed the "grouping" bugs, tx_inhibit works again,
using dev->tx_queue_len now instead of MAXQUEUE now.
1998-10-21 Postponed the spinlock changes, would need a lot of
testing I currently don't have the time to. Softdcd doesn't
work.
1998-11-04 Softdcd does not work correctly in DPLL mode, in fact it
never did. The DPLL locks on noise, the SYNC unit sees
flags that aren't... Restarting the DPLL does not help
either, it resynchronizes too slow and the first received
frame gets lost.
2000-02-13 Fixed for new network driver interface changes, still
does TX timeouts itself since it uses its own queue
scheme.
Thanks to all who contributed to this driver with ideas and bug
reports!
NB -- if you find errors, change something, please let me know
first before you distribute it... And please don't touch
the version number. Just replace my callsign in
"v3.0.dl1bke" with your own. Just to avoid confusion...
If you want to add your modification to the linux distribution
please (!) contact me first.
New versions of the driver will be announced on the linux-hams
mailing list on vger.kernel.org. To subscribe send an e-mail
to [email protected] with the following line in
the body of the mail:
subscribe linux-hams
The content of the "Subject" field will be ignored.
vy 73,
Joerg Reuter ampr-net: [email protected]
AX-25 : DL1BKE @ DB0ABH.#BAY.DEU.EU
Internet: [email protected]
www : http://yaina.de/jreuter
*/
/* ----------------------------------------------------------------------- */
#undef SCC_LDELAY /* slow it even a bit more down */
#undef SCC_DONT_CHECK /* don't look if the SCCs you specified are available */
#define SCC_MAXCHIPS 4 /* number of max. supported chips */
#define SCC_BUFSIZE 384 /* must not exceed 4096 */
#undef SCC_DEBUG
#define SCC_DEFAULT_CLOCK 4915200
/* default pclock if nothing is specified */
/* ----------------------------------------------------------------------- */
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/socket.h>
#include <linux/init.h>
#include <linux/scc.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <net/net_namespace.h>
#include <net/ax25.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include "z8530.h"
static const char banner[] __initconst = KERN_INFO \
"AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
static void t_dwait(struct timer_list *t);
static void t_txdelay(struct timer_list *t);
static void t_tail(struct timer_list *t);
static void t_busy(struct timer_list *);
static void t_maxkeyup(struct timer_list *);
static void t_idle(struct timer_list *t);
static void scc_tx_done(struct scc_channel *);
static void scc_start_tx_timer(struct scc_channel *,
void (*)(struct timer_list *), unsigned long);
static void scc_start_maxkeyup(struct scc_channel *);
static void scc_start_defer(struct scc_channel *);
static void z8530_init(void);
static void init_channel(struct scc_channel *scc);
static void scc_key_trx (struct scc_channel *scc, char tx);
static void scc_init_timer(struct scc_channel *scc);
static int scc_net_alloc(const char *name, struct scc_channel *scc);
static void scc_net_setup(struct net_device *dev);
static int scc_net_open(struct net_device *dev);
static int scc_net_close(struct net_device *dev);
static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
static netdev_tx_t scc_net_tx(struct sk_buff *skb,
struct net_device *dev);
static int scc_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd);
static int scc_net_set_mac_address(struct net_device *dev, void *addr);
static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
static unsigned char SCC_DriverName[] = "scc";
static struct irqflags { unsigned char used : 1; } Ivec[NR_IRQS];
static struct scc_channel SCC_Info[2 * SCC_MAXCHIPS]; /* information per channel */
static struct scc_ctrl {
io_port chan_A;
io_port chan_B;
int irq;
} SCC_ctrl[SCC_MAXCHIPS+1];
static unsigned char Driver_Initialized;
static int Nchips;
static io_port Vector_Latch;
/* ******************************************************************** */
/* * Port Access Functions * */
/* ******************************************************************** */
/* These provide interrupt save 2-step access to the Z8530 registers */
static DEFINE_SPINLOCK(iolock); /* Guards paired accesses */
static inline unsigned char InReg(io_port port, unsigned char reg)
{
unsigned long flags;
unsigned char r;
spin_lock_irqsave(&iolock, flags);
#ifdef SCC_LDELAY
Outb(port, reg);
udelay(SCC_LDELAY);
r=Inb(port);
udelay(SCC_LDELAY);
#else
Outb(port, reg);
r=Inb(port);
#endif
spin_unlock_irqrestore(&iolock, flags);
return r;
}
static inline void OutReg(io_port port, unsigned char reg, unsigned char val)
{
unsigned long flags;
spin_lock_irqsave(&iolock, flags);
#ifdef SCC_LDELAY
Outb(port, reg); udelay(SCC_LDELAY);
Outb(port, val); udelay(SCC_LDELAY);
#else
Outb(port, reg);
Outb(port, val);
#endif
spin_unlock_irqrestore(&iolock, flags);
}
static inline void wr(struct scc_channel *scc, unsigned char reg,
unsigned char val)
{
OutReg(scc->ctrl, reg, (scc->wreg[reg] = val));
}
static inline void or(struct scc_channel *scc, unsigned char reg, unsigned char val)
{
OutReg(scc->ctrl, reg, (scc->wreg[reg] |= val));
}
static inline void cl(struct scc_channel *scc, unsigned char reg, unsigned char val)
{
OutReg(scc->ctrl, reg, (scc->wreg[reg] &= ~val));
}
/* ******************************************************************** */
/* * Some useful macros * */
/* ******************************************************************** */
static inline void scc_discard_buffers(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
if (scc->tx_buff != NULL)
{
dev_kfree_skb_irq(scc->tx_buff);
scc->tx_buff = NULL;
}
while (!skb_queue_empty(&scc->tx_queue))
dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue));
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ******************************************************************** */
/* * Interrupt Service Routines * */
/* ******************************************************************** */
/* ----> subroutines for the interrupt handlers <---- */
static inline void scc_notify(struct scc_channel *scc, int event)
{
struct sk_buff *skb;
char *bp;
if (scc->kiss.fulldup != KISS_DUPLEX_OPTIMA)
return;
skb = dev_alloc_skb(2);
if (skb != NULL)
{
bp = skb_put(skb, 2);
*bp++ = PARAM_HWEVENT;
*bp++ = event;
scc_net_rx(scc, skb);
} else
scc->stat.nospace++;
}
static inline void flush_rx_FIFO(struct scc_channel *scc)
{
int k;
for (k=0; k<3; k++)
Inb(scc->data);
if(scc->rx_buff != NULL) /* did we receive something? */
{
scc->stat.rxerrs++; /* then count it as an error */
dev_kfree_skb_irq(scc->rx_buff);
scc->rx_buff = NULL;
}
}
static void start_hunt(struct scc_channel *scc)
{
if ((scc->modem.clocksrc != CLK_EXTERNAL))
OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]); /* DPLL: enter search mode */
or(scc,R3,ENT_HM|RxENABLE); /* enable the receiver, hunt mode */
}
/* ----> four different interrupt handlers for Tx, Rx, changing of */
/* DCD/CTS and Rx/Tx errors */
/* Transmitter interrupt handler */
static inline void scc_txint(struct scc_channel *scc)
{
struct sk_buff *skb;
scc->stat.txints++;
skb = scc->tx_buff;
/* send first octet */
if (skb == NULL)
{
skb = skb_dequeue(&scc->tx_queue);
scc->tx_buff = skb;
netif_wake_queue(scc->dev);
if (skb == NULL)
{
scc_tx_done(scc);
Outb(scc->ctrl, RES_Tx_P);
return;
}
if (skb->len == 0) /* Paranoia... */
{
dev_kfree_skb_irq(skb);
scc->tx_buff = NULL;
scc_tx_done(scc);
Outb(scc->ctrl, RES_Tx_P);
return;
}
scc->stat.tx_state = TXS_ACTIVE;
OutReg(scc->ctrl, R0, RES_Tx_CRC);
/* reset CRC generator */
or(scc,R10,ABUNDER); /* re-install underrun protection */
Outb(scc->data,*skb->data); /* send byte */
skb_pull(skb, 1);
if (!scc->enhanced) /* reset EOM latch */
Outb(scc->ctrl,RES_EOM_L);
return;
}
/* End Of Frame... */
if (skb->len == 0)
{
Outb(scc->ctrl, RES_Tx_P); /* reset pending int */
cl(scc, R10, ABUNDER); /* send CRC */
dev_kfree_skb_irq(skb);
scc->tx_buff = NULL;
scc->stat.tx_state = TXS_NEWFRAME; /* next frame... */
return;
}
/* send octet */
Outb(scc->data,*skb->data);
skb_pull(skb, 1);
}
/* External/Status interrupt handler */
static inline void scc_exint(struct scc_channel *scc)
{
unsigned char status,changes,chg_and_stat;
scc->stat.exints++;
status = InReg(scc->ctrl,R0);
changes = status ^ scc->status;
chg_and_stat = changes & status;
/* ABORT: generated whenever DCD drops while receiving */
if (chg_and_stat & BRK_ABRT) /* Received an ABORT */
flush_rx_FIFO(scc);
/* HUNT: software DCD; on = waiting for SYNC, off = receiving frame */
if ((changes & SYNC_HUNT) && scc->kiss.softdcd)
{
if (status & SYNC_HUNT)
{
scc->dcd = 0;
flush_rx_FIFO(scc);
if ((scc->modem.clocksrc != CLK_EXTERNAL))
OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]); /* DPLL: enter search mode */
} else {
scc->dcd = 1;
}
scc_notify(scc, scc->dcd? HWEV_DCD_OFF:HWEV_DCD_ON);
}
/* DCD: on = start to receive packet, off = ABORT condition */
/* (a successfully received packet generates a special condition int) */
if((changes & DCD) && !scc->kiss.softdcd) /* DCD input changed state */
{
if(status & DCD) /* DCD is now ON */
{
start_hunt(scc);
scc->dcd = 1;
} else { /* DCD is now OFF */
cl(scc,R3,ENT_HM|RxENABLE); /* disable the receiver */
flush_rx_FIFO(scc);
scc->dcd = 0;
}
scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
}
#ifdef notdef
/* CTS: use external TxDelay (what's that good for?!)
* Anyway: If we _could_ use it (BayCom USCC uses CTS for
* own purposes) we _should_ use the "autoenable" feature
* of the Z8530 and not this interrupt...
*/
if (chg_and_stat & CTS) /* CTS is now ON */
{
if (scc->kiss.txdelay == 0) /* zero TXDELAY = wait for CTS */
scc_start_tx_timer(scc, t_txdelay, 0);
}
#endif
if (scc->stat.tx_state == TXS_ACTIVE && (status & TxEOM))
{
scc->stat.tx_under++; /* oops, an underrun! count 'em */
Outb(scc->ctrl, RES_EXT_INT); /* reset ext/status interrupts */
if (scc->tx_buff != NULL)
{
dev_kfree_skb_irq(scc->tx_buff);
scc->tx_buff = NULL;
}
or(scc,R10,ABUNDER);
scc_start_tx_timer(scc, t_txdelay, 0); /* restart transmission */
}
scc->status = status;
Outb(scc->ctrl,RES_EXT_INT);
}
/* Receiver interrupt handler */
static inline void scc_rxint(struct scc_channel *scc)
{
struct sk_buff *skb;
scc->stat.rxints++;
if((scc->wreg[5] & RTS) && scc->kiss.fulldup == KISS_DUPLEX_HALF)
{
Inb(scc->data); /* discard char */
or(scc,R3,ENT_HM); /* enter hunt mode for next flag */
return;
}
skb = scc->rx_buff;
if (skb == NULL)
{
skb = dev_alloc_skb(scc->stat.bufsize);
if (skb == NULL)
{
scc->dev_stat.rx_dropped++;
scc->stat.nospace++;
Inb(scc->data);
or(scc, R3, ENT_HM);
return;
}
scc->rx_buff = skb;
skb_put_u8(skb, 0); /* KISS data */
}
if (skb->len >= scc->stat.bufsize)
{
#ifdef notdef
printk(KERN_DEBUG "z8530drv: oops, scc_rxint() received huge frame...\n");
#endif
dev_kfree_skb_irq(skb);
scc->rx_buff = NULL;
Inb(scc->data);
or(scc, R3, ENT_HM);
return;
}
skb_put_u8(skb, Inb(scc->data));
}
/* Receive Special Condition interrupt handler */
static inline void scc_spint(struct scc_channel *scc)
{
unsigned char status;
struct sk_buff *skb;
scc->stat.spints++;
status = InReg(scc->ctrl,R1); /* read receiver status */
Inb(scc->data); /* throw away Rx byte */
skb = scc->rx_buff;
if(status & Rx_OVR) /* receiver overrun */
{
scc->stat.rx_over++; /* count them */
or(scc,R3,ENT_HM); /* enter hunt mode for next flag */
if (skb != NULL)
dev_kfree_skb_irq(skb);
scc->rx_buff = skb = NULL;
}
if(status & END_FR && skb != NULL) /* end of frame */
{
/* CRC okay, frame ends on 8 bit boundary and received something ? */
if (!(status & CRC_ERR) && (status & 0xe) == RES8 && skb->len > 0)
{
/* ignore last received byte (first of the CRC bytes) */
skb_trim(skb, skb->len-1);
scc_net_rx(scc, skb);
scc->rx_buff = NULL;
scc->stat.rxframes++;
} else { /* a bad frame */
dev_kfree_skb_irq(skb);
scc->rx_buff = NULL;
scc->stat.rxerrs++;
}
}
Outb(scc->ctrl,ERR_RES);
}
/* ----> interrupt service routine for the Z8530 <---- */
static void scc_isr_dispatch(struct scc_channel *scc, int vector)
{
spin_lock(&scc->lock);
switch (vector & VECTOR_MASK)
{
case TXINT: scc_txint(scc); break;
case EXINT: scc_exint(scc); break;
case RXINT: scc_rxint(scc); break;
case SPINT: scc_spint(scc); break;
}
spin_unlock(&scc->lock);
}
/* If the card has a latch for the interrupt vector (like the PA0HZP card)
use it to get the number of the chip that generated the int.
If not: poll all defined chips.
*/
#define SCC_IRQTIMEOUT 30000
static irqreturn_t scc_isr(int irq, void *dev_id)
{
int chip_irq = (long) dev_id;
unsigned char vector;
struct scc_channel *scc;
struct scc_ctrl *ctrl;
int k;
if (Vector_Latch)
{
for(k=0; k < SCC_IRQTIMEOUT; k++)
{
Outb(Vector_Latch, 0); /* Generate INTACK */
/* Read the vector */
if((vector=Inb(Vector_Latch)) >= 16 * Nchips) break;
if (vector & 0x01) break;
scc=&SCC_Info[vector >> 3 ^ 0x01];
if (!scc->dev) break;
scc_isr_dispatch(scc, vector);
OutReg(scc->ctrl,R0,RES_H_IUS); /* Reset Highest IUS */
}
if (k == SCC_IRQTIMEOUT)
printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?\n");
return IRQ_HANDLED;
}
/* Find the SCC generating the interrupt by polling all attached SCCs
* reading RR3A (the interrupt pending register)
*/
ctrl = SCC_ctrl;
while (ctrl->chan_A)
{
if (ctrl->irq != chip_irq)
{
ctrl++;
continue;
}
scc = NULL;
for (k = 0; InReg(ctrl->chan_A,R3) && k < SCC_IRQTIMEOUT; k++)
{
vector=InReg(ctrl->chan_B,R2); /* Read the vector */
if (vector & 0x01) break;
scc = &SCC_Info[vector >> 3 ^ 0x01];
if (!scc->dev) break;
scc_isr_dispatch(scc, vector);
}
if (k == SCC_IRQTIMEOUT)
{
printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?!\n");
break;
}
/* This looks weird and it is. At least the BayCom USCC doesn't
* use the Interrupt Daisy Chain, thus we'll have to start
* all over again to be sure not to miss an interrupt from
* (any of) the other chip(s)...
* Honestly, the situation *is* braindamaged...
*/
if (scc != NULL)
{
OutReg(scc->ctrl,R0,RES_H_IUS);
ctrl = SCC_ctrl;
} else
ctrl++;
}
return IRQ_HANDLED;
}
/* ******************************************************************** */
/* * Init Channel */
/* ******************************************************************** */
/* ----> set SCC channel speed <---- */
static inline void set_brg(struct scc_channel *scc, unsigned int tc)
{
cl(scc,R14,BRENABL); /* disable baudrate generator */
wr(scc,R12,tc & 255); /* brg rate LOW */
wr(scc,R13,tc >> 8); /* brg rate HIGH */
or(scc,R14,BRENABL); /* enable baudrate generator */
}
static inline void set_speed(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
if (scc->modem.speed > 0) /* paranoia... */
set_brg(scc, (unsigned) (scc->clock / (scc->modem.speed * 64)) - 2);
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ----> initialize a SCC channel <---- */
static inline void init_brg(struct scc_channel *scc)
{
wr(scc, R14, BRSRC); /* BRG source = PCLK */
OutReg(scc->ctrl, R14, SSBR|scc->wreg[R14]); /* DPLL source = BRG */
OutReg(scc->ctrl, R14, SNRZI|scc->wreg[R14]); /* DPLL NRZI mode */
}
/*
* Initialization according to the Z8530 manual (SGS-Thomson's version):
*
* 1. Modes and constants
*
* WR9 11000000 chip reset
* WR4 XXXXXXXX Tx/Rx control, async or sync mode
* WR1 0XX00X00 select W/REQ (optional)
* WR2 XXXXXXXX program interrupt vector
* WR3 XXXXXXX0 select Rx control
* WR5 XXXX0XXX select Tx control
* WR6 XXXXXXXX sync character
* WR7 XXXXXXXX sync character
* WR9 000X0XXX select interrupt control
* WR10 XXXXXXXX miscellaneous control (optional)
* WR11 XXXXXXXX clock control
* WR12 XXXXXXXX time constant lower byte (optional)
* WR13 XXXXXXXX time constant upper byte (optional)
* WR14 XXXXXXX0 miscellaneous control
* WR14 XXXSSSSS commands (optional)
*
* 2. Enables
*
* WR14 000SSSS1 baud rate enable
* WR3 SSSSSSS1 Rx enable
* WR5 SSSS1SSS Tx enable
* WR0 10000000 reset Tx CRG (optional)
* WR1 XSS00S00 DMA enable (optional)
*
* 3. Interrupt status
*
* WR15 XXXXXXXX enable external/status
* WR0 00010000 reset external status
* WR0 00010000 reset external status twice
* WR1 SSSXXSXX enable Rx, Tx and Ext/status
* WR9 000SXSSS enable master interrupt enable
*
* 1 = set to one, 0 = reset to zero
* X = user defined, S = same as previous init
*
*
* Note that the implementation differs in some points from above scheme.
*
*/
static void init_channel(struct scc_channel *scc)
{
del_timer(&scc->tx_t);
del_timer(&scc->tx_wdog);
disable_irq(scc->irq);
wr(scc,R4,X1CLK|SDLC); /* *1 clock, SDLC mode */
wr(scc,R1,0); /* no W/REQ operation */
wr(scc,R3,Rx8|RxCRC_ENAB); /* RX 8 bits/char, CRC, disabled */
wr(scc,R5,Tx8|DTR|TxCRC_ENAB); /* TX 8 bits/char, disabled, DTR */
wr(scc,R6,0); /* SDLC address zero (not used) */
wr(scc,R7,FLAG); /* SDLC flag value */
wr(scc,R9,VIS); /* vector includes status */
wr(scc,R10,(scc->modem.nrz? NRZ : NRZI)|CRCPS|ABUNDER); /* abort on underrun, preset CRC generator, NRZ(I) */
wr(scc,R14, 0);
/* set clock sources:
CLK_DPLL: normal halfduplex operation
RxClk: use DPLL
TxClk: use DPLL
TRxC mode DPLL output
CLK_EXTERNAL: external clocking (G3RUH or DF9IC modem)
BayCom: others:
TxClk = pin RTxC TxClk = pin TRxC
RxClk = pin TRxC RxClk = pin RTxC
CLK_DIVIDER:
RxClk = use DPLL
TxClk = pin RTxC
BayCom: others:
pin TRxC = DPLL pin TRxC = BRG
(RxClk * 1) (RxClk * 32)
*/
switch(scc->modem.clocksrc)
{
case CLK_DPLL:
wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
init_brg(scc);
break;
case CLK_DIVIDER:
wr(scc, R11, ((scc->brand & BAYCOM)? TRxCDP : TRxCBR) | RCDPLL|TCRTxCP|TRxCOI);
init_brg(scc);
break;
case CLK_EXTERNAL:
wr(scc, R11, (scc->brand & BAYCOM)? RCTRxCP|TCRTxCP : RCRTxCP|TCTRxCP);
OutReg(scc->ctrl, R14, DISDPLL);
break;
}
set_speed(scc); /* set baudrate */
if(scc->enhanced)
{
or(scc,R15,SHDLCE|FIFOE); /* enable FIFO, SDLC/HDLC Enhancements (From now R7 is R7') */
wr(scc,R7,AUTOEOM);
}
if(scc->kiss.softdcd || (InReg(scc->ctrl,R0) & DCD))
/* DCD is now ON */
{
start_hunt(scc);
}
/* enable ABORT, DCD & SYNC/HUNT interrupts */
wr(scc,R15, BRKIE|TxUIE|(scc->kiss.softdcd? SYNCIE:DCDIE));
Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
Outb(scc->ctrl,RES_EXT_INT); /* must be done twice */
or(scc,R1,INT_ALL_Rx|TxINT_ENAB|EXT_INT_ENAB); /* enable interrupts */
scc->status = InReg(scc->ctrl,R0); /* read initial status */
or(scc,R9,MIE); /* master interrupt enable */
scc_init_timer(scc);
enable_irq(scc->irq);
}
/* ******************************************************************** */
/* * SCC timer functions * */
/* ******************************************************************** */
/* ----> scc_key_trx sets the time constant for the baudrate
generator and keys the transmitter <---- */
static void scc_key_trx(struct scc_channel *scc, char tx)
{
unsigned int time_const;
if (scc->brand & PRIMUS)
Outb(scc->ctrl + 4, scc->option | (tx? 0x80 : 0));
if (scc->modem.speed < 300)
scc->modem.speed = 1200;
time_const = (unsigned) (scc->clock / (scc->modem.speed * (tx? 2:64))) - 2;
disable_irq(scc->irq);
if (tx)
{
or(scc, R1, TxINT_ENAB); /* t_maxkeyup may have reset these */
or(scc, R15, TxUIE);
}
if (scc->modem.clocksrc == CLK_DPLL)
{ /* force simplex operation */
if (tx)
{
#ifdef CONFIG_SCC_TRXECHO
cl(scc, R3, RxENABLE|ENT_HM); /* switch off receiver */
cl(scc, R15, DCDIE|SYNCIE); /* No DCD changes, please */
#endif
set_brg(scc, time_const); /* reprogram baudrate generator */
/* DPLL -> Rx clk, BRG -> Tx CLK, TRxC mode output, TRxC = BRG */
wr(scc, R11, RCDPLL|TCBR|TRxCOI|TRxCBR);
/* By popular demand: tx_inhibit */
if (scc->kiss.tx_inhibit)
{
or(scc,R5, TxENAB);
scc->wreg[R5] |= RTS;
} else {
or(scc,R5,RTS|TxENAB); /* set the RTS line and enable TX */
}
} else {
cl(scc,R5,RTS|TxENAB);
set_brg(scc, time_const); /* reprogram baudrate generator */
/* DPLL -> Rx clk, DPLL -> Tx CLK, TRxC mode output, TRxC = DPLL */
wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
#ifndef CONFIG_SCC_TRXECHO
if (scc->kiss.softdcd)
#endif
{
or(scc,R15, scc->kiss.softdcd? SYNCIE:DCDIE);
start_hunt(scc);
}
}
} else {
if (tx)
{
#ifdef CONFIG_SCC_TRXECHO
if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
{
cl(scc, R3, RxENABLE);
cl(scc, R15, DCDIE|SYNCIE);
}
#endif
if (scc->kiss.tx_inhibit)
{
or(scc,R5, TxENAB);
scc->wreg[R5] |= RTS;
} else {
or(scc,R5,RTS|TxENAB); /* enable tx */
}
} else {
cl(scc,R5,RTS|TxENAB); /* disable tx */
if ((scc->kiss.fulldup == KISS_DUPLEX_HALF) &&
#ifndef CONFIG_SCC_TRXECHO
scc->kiss.softdcd)
#else
1)
#endif
{
or(scc, R15, scc->kiss.softdcd? SYNCIE:DCDIE);
start_hunt(scc);
}
}
}
enable_irq(scc->irq);
}
/* ----> SCC timer interrupt handler and friends. <---- */
static void __scc_start_tx_timer(struct scc_channel *scc,
void (*handler)(struct timer_list *t),
unsigned long when)
{
del_timer(&scc->tx_t);
if (when == 0)
{
handler(&scc->tx_t);
} else
if (when != TIMER_OFF)
{
scc->tx_t.function = handler;
scc->tx_t.expires = jiffies + (when*HZ)/100;
add_timer(&scc->tx_t);
}
}
static void scc_start_tx_timer(struct scc_channel *scc,
void (*handler)(struct timer_list *t),
unsigned long when)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
__scc_start_tx_timer(scc, handler, when);
spin_unlock_irqrestore(&scc->lock, flags);
}
static void scc_start_defer(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
scc->tx_wdog.function = t_busy;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
add_timer(&scc->tx_wdog);
}
spin_unlock_irqrestore(&scc->lock, flags);
}
static void scc_start_maxkeyup(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
scc->tx_wdog.function = t_maxkeyup;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
add_timer(&scc->tx_wdog);
}
spin_unlock_irqrestore(&scc->lock, flags);
}
/*
* This is called from scc_txint() when there are no more frames to send.
* Not exactly a timer function, but it is a close friend of the family...
*/
static void scc_tx_done(struct scc_channel *scc)
{
/*
* trx remains keyed in fulldup mode 2 until t_idle expires.
*/
switch (scc->kiss.fulldup)
{
case KISS_DUPLEX_LINK:
scc->stat.tx_state = TXS_IDLE2;
if (scc->kiss.idletime != TIMER_OFF)
scc_start_tx_timer(scc, t_idle,
scc->kiss.idletime*100);
break;
case KISS_DUPLEX_OPTIMA:
scc_notify(scc, HWEV_ALL_SENT);
break;
default:
scc->stat.tx_state = TXS_BUSY;
scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
}
netif_wake_queue(scc->dev);
}
static unsigned char Rand = 17;
static inline int is_grouped(struct scc_channel *scc)
{
int k;
struct scc_channel *scc2;
unsigned char grp1, grp2;
grp1 = scc->kiss.group;
for (k = 0; k < (Nchips * 2); k++)
{
scc2 = &SCC_Info[k];
grp2 = scc2->kiss.group;
if (scc2 == scc || !(scc2->dev && grp2))
continue;
if ((grp1 & 0x3f) == (grp2 & 0x3f))
{
if ( (grp1 & TXGROUP) && (scc2->wreg[R5] & RTS) )
return 1;
if ( (grp1 & RXGROUP) && scc2->dcd )
return 1;
}
}
return 0;
}
/* DWAIT and SLOTTIME expired
*
* fulldup == 0: DCD is active or Rand > P-persistence: start t_busy timer
* else key trx and start txdelay
* fulldup == 1: key trx and start txdelay
* fulldup == 2: mintime expired, reset status or key trx and start txdelay
*/
static void t_dwait(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_t);
if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
{
if (skb_queue_empty(&scc->tx_queue)) { /* nothing to send */
scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */
return;
}
scc->stat.tx_state = TXS_BUSY;
}
if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
{
Rand = Rand * 17 + 31;
if (scc->dcd || (scc->kiss.persist) < Rand || (scc->kiss.group && is_grouped(scc)) )
{
scc_start_defer(scc);
scc_start_tx_timer(scc, t_dwait, scc->kiss.slottime);
return ;
}
}
if ( !(scc->wreg[R5] & RTS) )
{
scc_key_trx(scc, TX_ON);
scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
} else {
scc_start_tx_timer(scc, t_txdelay, 0);
}
}
/* TXDELAY expired
*
* kick transmission by a fake scc_txint(scc), start 'maxkeyup' watchdog.
*/
static void t_txdelay(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_t);
scc_start_maxkeyup(scc);
if (scc->tx_buff == NULL)
{
disable_irq(scc->irq);
scc_txint(scc);
enable_irq(scc->irq);
}
}
/* TAILTIME expired
*
* switch off transmitter. If we were stopped by Maxkeyup restart
* transmission after 'mintime' seconds
*/
static void t_tail(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_t);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
spin_unlock_irqrestore(&scc->lock, flags);
if (scc->stat.tx_state == TXS_TIMEOUT) /* we had a timeout? */
{
scc->stat.tx_state = TXS_WAIT;
scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
return;
}
scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev);
}
/* BUSY timeout
*
* throw away send buffers if DCD remains active too long.
*/
static void t_busy(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_wdog);
del_timer(&scc->tx_t);
netif_stop_queue(scc->dev); /* don't pile on the wabbit! */
scc_discard_buffers(scc);
scc->stat.txerrs++;
scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev);
}
/* MAXKEYUP timeout
*
* this is our watchdog.
*/
static void t_maxkeyup(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
/*
* let things settle down before we start to
* accept new data.
*/
netif_stop_queue(scc->dev);
scc_discard_buffers(scc);
del_timer(&scc->tx_t);
cl(scc, R1, TxINT_ENAB); /* force an ABORT, but don't */
cl(scc, R15, TxUIE); /* count it. */
OutReg(scc->ctrl, R0, RES_Tx_P);
spin_unlock_irqrestore(&scc->lock, flags);
scc->stat.txerrs++;
scc->stat.tx_state = TXS_TIMEOUT;
scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
}
/* IDLE timeout
*
* in fulldup mode 2 it keys down the transmitter after 'idle' seconds
* of inactivity. We will not restart transmission before 'mintime'
* expires.
*/
static void t_idle(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_t);
del_timer(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
if(scc->kiss.mintime)
scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
scc->stat.tx_state = TXS_WAIT;
}
static void scc_init_timer(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
scc->stat.tx_state = TXS_IDLE;
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ******************************************************************** */
/* * Set/get L1 parameters * */
/* ******************************************************************** */
/*
* this will set the "hardware" parameters through KISS commands or ioctl()
*/
#define CAST(x) (unsigned long)(x)
static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, unsigned int arg)
{
switch (cmd)
{
case PARAM_TXDELAY: scc->kiss.txdelay=arg; break;
case PARAM_PERSIST: scc->kiss.persist=arg; break;
case PARAM_SLOTTIME: scc->kiss.slottime=arg; break;
case PARAM_TXTAIL: scc->kiss.tailtime=arg; break;
case PARAM_FULLDUP: scc->kiss.fulldup=arg; break;
case PARAM_DTR: break; /* does someone need this? */
case PARAM_GROUP: scc->kiss.group=arg; break;
case PARAM_IDLE: scc->kiss.idletime=arg; break;
case PARAM_MIN: scc->kiss.mintime=arg; break;
case PARAM_MAXKEY: scc->kiss.maxkeyup=arg; break;
case PARAM_WAIT: scc->kiss.waittime=arg; break;
case PARAM_MAXDEFER: scc->kiss.maxdefer=arg; break;
case PARAM_TX: scc->kiss.tx_inhibit=arg; break;
case PARAM_SOFTDCD:
scc->kiss.softdcd=arg;
if (arg)
{
or(scc, R15, SYNCIE);
cl(scc, R15, DCDIE);
start_hunt(scc);
} else {
or(scc, R15, DCDIE);
cl(scc, R15, SYNCIE);
}
break;
case PARAM_SPEED:
if (arg < 256)
scc->modem.speed=arg*100;
else
scc->modem.speed=arg;
if (scc->stat.tx_state == 0) /* only switch baudrate on rx... ;-) */
set_speed(scc);
break;
case PARAM_RTS:
if ( !(scc->wreg[R5] & RTS) )
{
if (arg != TX_OFF) {
scc_key_trx(scc, TX_ON);
scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
}
} else {
if (arg == TX_OFF)
{
scc->stat.tx_state = TXS_BUSY;
scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
}
}
break;
case PARAM_HWEVENT:
scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
break;
default: return -EINVAL;
}
return 0;
}
static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
{
switch (cmd)
{
case PARAM_TXDELAY: return CAST(scc->kiss.txdelay);
case PARAM_PERSIST: return CAST(scc->kiss.persist);
case PARAM_SLOTTIME: return CAST(scc->kiss.slottime);
case PARAM_TXTAIL: return CAST(scc->kiss.tailtime);
case PARAM_FULLDUP: return CAST(scc->kiss.fulldup);
case PARAM_SOFTDCD: return CAST(scc->kiss.softdcd);
case PARAM_DTR: return CAST((scc->wreg[R5] & DTR)? 1:0);
case PARAM_RTS: return CAST((scc->wreg[R5] & RTS)? 1:0);
case PARAM_SPEED: return CAST(scc->modem.speed);
case PARAM_GROUP: return CAST(scc->kiss.group);
case PARAM_IDLE: return CAST(scc->kiss.idletime);
case PARAM_MIN: return CAST(scc->kiss.mintime);
case PARAM_MAXKEY: return CAST(scc->kiss.maxkeyup);
case PARAM_WAIT: return CAST(scc->kiss.waittime);
case PARAM_MAXDEFER: return CAST(scc->kiss.maxdefer);
case PARAM_TX: return CAST(scc->kiss.tx_inhibit);
default: return NO_SUCH_PARAM;
}
}
#undef CAST
/* ******************************************************************* */
/* * Send calibration pattern * */
/* ******************************************************************* */
static void scc_stop_calibrate(struct timer_list *t)
{
struct scc_channel *scc = from_timer(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
wr(scc, R6, 0);
wr(scc, R7, FLAG);
Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
Outb(scc->ctrl,RES_EXT_INT);
netif_wake_queue(scc->dev);
spin_unlock_irqrestore(&scc->lock, flags);
}
static void
scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
netif_stop_queue(scc->dev);
scc_discard_buffers(scc);
del_timer(&scc->tx_wdog);
scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
add_timer(&scc->tx_wdog);
/* This doesn't seem to work. Why not? */
wr(scc, R6, 0);
wr(scc, R7, pattern);
/*
* Don't know if this works.
* Damn, where is my Z8530 programming manual...?
*/
Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
Outb(scc->ctrl,RES_EXT_INT);
scc_key_trx(scc, TX_ON);
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ******************************************************************* */
/* * Init channel structures, special HW, etc... * */
/* ******************************************************************* */
/*
* Reset the Z8530s and setup special hardware
*/
static void z8530_init(void)
{
struct scc_channel *scc;
int chip, k;
unsigned long flags;
char *flag;
printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
flag=" ";
for (k = 0; k < nr_irqs; k++)
if (Ivec[k].used)
{
printk("%s%d", flag, k);
flag=",";
}
printk("\n");
/* reset and pre-init all chips in the system */
for (chip = 0; chip < Nchips; chip++)
{
scc=&SCC_Info[2*chip];
if (!scc->ctrl) continue;
/* Special SCC cards */
if(scc->brand & EAGLE) /* this is an EAGLE card */
Outb(scc->special,0x08); /* enable interrupt on the board */
if(scc->brand & (PC100 | PRIMUS)) /* this is a PC100/PRIMUS card */
Outb(scc->special,scc->option); /* set the MODEM mode (0x22) */
/* Reset and pre-init Z8530 */
spin_lock_irqsave(&scc->lock, flags);
Outb(scc->ctrl, 0);
OutReg(scc->ctrl,R9,FHWRES); /* force hardware reset */
udelay(100); /* give it 'a bit' more time than required */
wr(scc, R2, chip*16); /* interrupt vector */
wr(scc, R9, VIS); /* vector includes status */
spin_unlock_irqrestore(&scc->lock, flags);
}
Driver_Initialized = 1;
}
/*
* Allocate device structure, err, instance, and register driver
*/
static int scc_net_alloc(const char *name, struct scc_channel *scc)
{
int err;
struct net_device *dev;
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, scc_net_setup);
if (!dev)
return -ENOMEM;
dev->ml_priv = scc;
scc->dev = dev;
spin_lock_init(&scc->lock);
timer_setup(&scc->tx_t, NULL, 0);
timer_setup(&scc->tx_wdog, NULL, 0);
err = register_netdevice(dev);
if (err) {
printk(KERN_ERR "%s: can't register network device (%d)\n",
name, err);
free_netdev(dev);
scc->dev = NULL;
return err;
}
return 0;
}
/* ******************************************************************** */
/* * Network driver methods * */
/* ******************************************************************** */
static const struct net_device_ops scc_netdev_ops = {
.ndo_open = scc_net_open,
.ndo_stop = scc_net_close,
.ndo_start_xmit = scc_net_tx,
.ndo_set_mac_address = scc_net_set_mac_address,
.ndo_get_stats = scc_net_get_stats,
.ndo_siocdevprivate = scc_net_siocdevprivate,
};
/* ----> Initialize device <----- */
static void scc_net_setup(struct net_device *dev)
{
dev->tx_queue_len = 16; /* should be enough... */
dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
dev->flags = 0;
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/* ----> open network device <---- */
static int scc_net_open(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
if (!scc->init)
return -EINVAL;
scc->tx_buff = NULL;
skb_queue_head_init(&scc->tx_queue);
init_channel(scc);
netif_start_queue(dev);
return 0;
}
/* ----> close network device <---- */
static int scc_net_close(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
unsigned long flags;
netif_stop_queue(dev);
spin_lock_irqsave(&scc->lock, flags);
Outb(scc->ctrl,0); /* Make sure pointer is written */
wr(scc,R1,0); /* disable interrupts */
wr(scc,R3,0);
spin_unlock_irqrestore(&scc->lock, flags);
del_timer_sync(&scc->tx_t);
del_timer_sync(&scc->tx_wdog);
scc_discard_buffers(scc);
return 0;
}
/* ----> receive frame, called from scc_rxint() <---- */
static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
{
if (skb->len == 0) {
dev_kfree_skb_irq(skb);
return;
}
scc->dev_stat.rx_packets++;
scc->dev_stat.rx_bytes += skb->len;
skb->protocol = ax25_type_trans(skb, scc->dev);
netif_rx(skb);
}
/* ----> transmit frame <---- */
static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
unsigned long flags;
char kisscmd;
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
if (skb->len > scc->stat.bufsize || skb->len < 2) {
scc->dev_stat.tx_dropped++; /* bogus frame */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
scc->dev_stat.tx_packets++;
scc->dev_stat.tx_bytes += skb->len;
scc->stat.txframes++;
kisscmd = *skb->data & 0x1f;
skb_pull(skb, 1);
if (kisscmd) {
scc_set_param(scc, kisscmd, *skb->data);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock_irqsave(&scc->lock, flags);
if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
struct sk_buff *skb_del;
skb_del = skb_dequeue(&scc->tx_queue);
dev_kfree_skb_irq(skb_del);
}
skb_queue_tail(&scc->tx_queue, skb);
netif_trans_update(dev);
/*
* Start transmission if the trx state is idle or
* t_idle hasn't expired yet. Use dwait/persistence/slottime
* algorithm for normal halfduplex operation.
*/
if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2) {
scc->stat.tx_state = TXS_BUSY;
if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
__scc_start_tx_timer(scc, t_dwait, scc->kiss.waittime);
else
__scc_start_tx_timer(scc, t_dwait, 0);
}
spin_unlock_irqrestore(&scc->lock, flags);
return NETDEV_TX_OK;
}
/* ----> ioctl functions <---- */
/*
* SIOCSCCCFG - configure driver arg: (struct scc_hw_config *) arg
* SIOCSCCINI - initialize driver arg: ---
* SIOCSCCCHANINI - initialize channel arg: (struct scc_modem *) arg
* SIOCSCCSMEM - set memory arg: (struct scc_mem_config *) arg
* SIOCSCCGKISS - get level 1 parameter arg: (struct scc_kiss_cmd *) arg
* SIOCSCCSKISS - set level 1 parameter arg: (struct scc_kiss_cmd *) arg
* SIOCSCCGSTAT - get driver status arg: (struct scc_stat *) arg
* SIOCSCCCAL - send calib. pattern arg: (struct scc_calibrate *) arg
*/
static int scc_net_siocdevprivate(struct net_device *dev,
struct ifreq *ifr, void __user *arg, int cmd)
{
struct scc_kiss_cmd kiss_cmd;
struct scc_mem_config memcfg;
struct scc_hw_config hwcfg;
struct scc_calibrate cal;
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
int chan;
unsigned char device_name[IFNAMSIZ];
if (!Driver_Initialized)
{
if (cmd == SIOCSCCCFG)
{
int found = 1;
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (in_compat_syscall())
return -EOPNOTSUPP;
if (!arg) return -EFAULT;
if (Nchips >= SCC_MAXCHIPS)
return -EINVAL;
if (copy_from_user(&hwcfg, arg, sizeof(hwcfg)))
return -EFAULT;
if (hwcfg.irq == 2) hwcfg.irq = 9;
if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
return -EINVAL;
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
{
if (request_irq(hwcfg.irq, scc_isr,
0, "AX.25 SCC",
(void *)(long) hwcfg.irq))
printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
else
Ivec[hwcfg.irq].used = 1;
}
if (hwcfg.vector_latch && !Vector_Latch) {
if (!request_region(hwcfg.vector_latch, 1, "scc vector latch"))
printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%lx\n, disabled.", hwcfg.vector_latch);
else
Vector_Latch = hwcfg.vector_latch;
}
if (hwcfg.clock == 0)
hwcfg.clock = SCC_DEFAULT_CLOCK;
#ifndef SCC_DONT_CHECK
if(request_region(hwcfg.ctrl_a, 1, "scc-probe"))
{
disable_irq(hwcfg.irq);
Outb(hwcfg.ctrl_a, 0);
OutReg(hwcfg.ctrl_a, R9, FHWRES);
udelay(100);
OutReg(hwcfg.ctrl_a,R13,0x55); /* is this chip really there? */
udelay(5);
if (InReg(hwcfg.ctrl_a,R13) != 0x55)
found = 0;
enable_irq(hwcfg.irq);
release_region(hwcfg.ctrl_a, 1);
}
else
found = 0;
#endif
if (found)
{
SCC_Info[2*Nchips ].ctrl = hwcfg.ctrl_a;
SCC_Info[2*Nchips ].data = hwcfg.data_a;
SCC_Info[2*Nchips ].irq = hwcfg.irq;
SCC_Info[2*Nchips+1].ctrl = hwcfg.ctrl_b;
SCC_Info[2*Nchips+1].data = hwcfg.data_b;
SCC_Info[2*Nchips+1].irq = hwcfg.irq;
SCC_ctrl[Nchips].chan_A = hwcfg.ctrl_a;
SCC_ctrl[Nchips].chan_B = hwcfg.ctrl_b;
SCC_ctrl[Nchips].irq = hwcfg.irq;
}
for (chan = 0; chan < 2; chan++)
{
sprintf(device_name, "%s%i", SCC_DriverName, 2*Nchips+chan);
SCC_Info[2*Nchips+chan].special = hwcfg.special;
SCC_Info[2*Nchips+chan].clock = hwcfg.clock;
SCC_Info[2*Nchips+chan].brand = hwcfg.brand;
SCC_Info[2*Nchips+chan].option = hwcfg.option;
SCC_Info[2*Nchips+chan].enhanced = hwcfg.escc;
#ifdef SCC_DONT_CHECK
printk(KERN_INFO "%s: data port = 0x%3.3x control port = 0x%3.3x\n",
device_name,
SCC_Info[2*Nchips+chan].data,
SCC_Info[2*Nchips+chan].ctrl);
#else
printk(KERN_INFO "%s: data port = 0x%3.3lx control port = 0x%3.3lx -- %s\n",
device_name,
chan? hwcfg.data_b : hwcfg.data_a,
chan? hwcfg.ctrl_b : hwcfg.ctrl_a,
found? "found" : "missing");
#endif
if (found)
{
request_region(SCC_Info[2*Nchips+chan].ctrl, 1, "scc ctrl");
request_region(SCC_Info[2*Nchips+chan].data, 1, "scc data");
if (Nchips+chan != 0 &&
scc_net_alloc(device_name,
&SCC_Info[2*Nchips+chan]))
return -EINVAL;
}
}
if (found) Nchips++;
return 0;
}
if (cmd == SIOCSCCINI)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (Nchips == 0)
return -EINVAL;
z8530_init();
return 0;
}
return -EINVAL; /* confuse the user */
}
if (!scc->init)
{
if (cmd == SIOCSCCCHANINI)
{
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (!arg) return -EINVAL;
scc->stat.bufsize = SCC_BUFSIZE;
if (copy_from_user(&scc->modem, arg, sizeof(struct scc_modem)))
return -EINVAL;
/* default KISS Params */
if (scc->modem.speed < 4800)
{
scc->kiss.txdelay = 36; /* 360 ms */
scc->kiss.persist = 42; /* 25% persistence */ /* was 25 */
scc->kiss.slottime = 16; /* 160 ms */
scc->kiss.tailtime = 4; /* minimal reasonable value */
scc->kiss.fulldup = 0; /* CSMA */
scc->kiss.waittime = 50; /* 500 ms */
scc->kiss.maxkeyup = 10; /* 10 s */
scc->kiss.mintime = 3; /* 3 s */
scc->kiss.idletime = 30; /* 30 s */
scc->kiss.maxdefer = 120; /* 2 min */
scc->kiss.softdcd = 0; /* hardware dcd */
} else {
scc->kiss.txdelay = 10; /* 100 ms */
scc->kiss.persist = 64; /* 25% persistence */ /* was 25 */
scc->kiss.slottime = 8; /* 160 ms */
scc->kiss.tailtime = 1; /* minimal reasonable value */
scc->kiss.fulldup = 0; /* CSMA */
scc->kiss.waittime = 50; /* 500 ms */
scc->kiss.maxkeyup = 7; /* 7 s */
scc->kiss.mintime = 3; /* 3 s */
scc->kiss.idletime = 30; /* 30 s */
scc->kiss.maxdefer = 120; /* 2 min */
scc->kiss.softdcd = 0; /* hardware dcd */
}
scc->tx_buff = NULL;
skb_queue_head_init(&scc->tx_queue);
scc->init = 1;
return 0;
}
return -EINVAL;
}
switch(cmd)
{
case SIOCSCCRESERVED:
return -ENOIOCTLCMD;
case SIOCSCCSMEM:
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg)))
return -EINVAL;
scc->stat.bufsize = memcfg.bufsize;
return 0;
case SIOCSCCGSTAT:
if (!arg || copy_to_user(arg, &scc->stat, sizeof(scc->stat)))
return -EINVAL;
return 0;
case SIOCSCCGKISS:
if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
return -EINVAL;
kiss_cmd.param = scc_get_param(scc, kiss_cmd.command);
if (copy_to_user(arg, &kiss_cmd, sizeof(kiss_cmd)))
return -EINVAL;
return 0;
case SIOCSCCSKISS:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
return -EINVAL;
return scc_set_param(scc, kiss_cmd.command, kiss_cmd.param);
case SIOCSCCCAL:
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (!arg || copy_from_user(&cal, arg, sizeof(cal)) || cal.time == 0)
return -EINVAL;
scc_start_calibrate(scc, cal.time, cal.pattern);
return 0;
default:
return -ENOIOCTLCMD;
}
return -EINVAL;
}
/* ----> set interface callsign <---- */
static int scc_net_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
dev_addr_set(dev, sa->sa_data);
return 0;
}
/* ----> get statistics <---- */
static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over;
scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under;
scc->dev_stat.rx_fifo_errors = scc->stat.rx_over;
scc->dev_stat.tx_fifo_errors = scc->stat.tx_under;
return &scc->dev_stat;
}
/* ******************************************************************** */
/* * dump statistics to /proc/net/z8530drv * */
/* ******************************************************************** */
#ifdef CONFIG_PROC_FS
static inline struct scc_channel *scc_net_seq_idx(loff_t pos)
{
int k;
for (k = 0; k < Nchips*2; ++k) {
if (!SCC_Info[k].init)
continue;
if (pos-- == 0)
return &SCC_Info[k];
}
return NULL;
}
static void *scc_net_seq_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? scc_net_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *scc_net_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
unsigned k;
struct scc_channel *scc = v;
++*pos;
for (k = (v == SEQ_START_TOKEN) ? 0 : (scc - SCC_Info)+1;
k < Nchips*2; ++k) {
if (SCC_Info[k].init)
return &SCC_Info[k];
}
return NULL;
}
static void scc_net_seq_stop(struct seq_file *seq, void *v)
{
}
static int scc_net_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "z8530drv-"VERSION"\n");
} else if (!Driver_Initialized) {
seq_puts(seq, "not initialized\n");
} else if (!Nchips) {
seq_puts(seq, "chips missing\n");
} else {
const struct scc_channel *scc = v;
const struct scc_stat *stat = &scc->stat;
const struct scc_kiss *kiss = &scc->kiss;
/* dev data ctrl irq clock brand enh vector special option
* baud nrz clocksrc softdcd bufsize
* rxints txints exints spints
* rcvd rxerrs over / xmit txerrs under / nospace bufsize
* txd pers slot tail ful wait min maxk idl defr txof grp
* W ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
* R ## ## XX ## ## ## ## ## XX ## ## ## ## ## ## ##
*/
seq_printf(seq, "%s\t%3.3lx %3.3lx %d %lu %2.2x %d %3.3lx %3.3lx %d\n",
scc->dev->name,
scc->data, scc->ctrl, scc->irq, scc->clock, scc->brand,
scc->enhanced, Vector_Latch, scc->special,
scc->option);
seq_printf(seq, "\t%lu %d %d %d %d\n",
scc->modem.speed, scc->modem.nrz,
scc->modem.clocksrc, kiss->softdcd,
stat->bufsize);
seq_printf(seq, "\t%lu %lu %lu %lu\n",
stat->rxints, stat->txints, stat->exints, stat->spints);
seq_printf(seq, "\t%lu %lu %d / %lu %lu %d / %d %d\n",
stat->rxframes, stat->rxerrs, stat->rx_over,
stat->txframes, stat->txerrs, stat->tx_under,
stat->nospace, stat->tx_state);
#define K(x) kiss->x
seq_printf(seq, "\t%d %d %d %d %d %d %d %d %d %d %d %d\n",
K(txdelay), K(persist), K(slottime), K(tailtime),
K(fulldup), K(waittime), K(mintime), K(maxkeyup),
K(idletime), K(maxdefer), K(tx_inhibit), K(group));
#undef K
#ifdef SCC_DEBUG
{
int reg;
seq_printf(seq, "\tW ");
for (reg = 0; reg < 16; reg++)
seq_printf(seq, "%2.2x ", scc->wreg[reg]);
seq_printf(seq, "\n");
seq_printf(seq, "\tR %2.2x %2.2x XX ", InReg(scc->ctrl,R0), InReg(scc->ctrl,R1));
for (reg = 3; reg < 8; reg++)
seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
seq_printf(seq, "XX ");
for (reg = 9; reg < 16; reg++)
seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
seq_printf(seq, "\n");
}
#endif
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations scc_net_seq_ops = {
.start = scc_net_seq_start,
.next = scc_net_seq_next,
.stop = scc_net_seq_stop,
.show = scc_net_seq_show,
};
#endif /* CONFIG_PROC_FS */
/* ******************************************************************** */
/* * Init SCC driver * */
/* ******************************************************************** */
static int __init scc_init_driver (void)
{
char devname[IFNAMSIZ];
printk(banner);
sprintf(devname,"%s0", SCC_DriverName);
rtnl_lock();
if (scc_net_alloc(devname, SCC_Info)) {
rtnl_unlock();
printk(KERN_ERR "z8530drv: cannot initialize module\n");
return -EIO;
}
rtnl_unlock();
proc_create_seq("z8530drv", 0, init_net.proc_net, &scc_net_seq_ops);
return 0;
}
static void __exit scc_cleanup_driver(void)
{
io_port ctrl;
int k;
struct scc_channel *scc;
struct net_device *dev;
if (Nchips == 0 && (dev = SCC_Info[0].dev))
{
unregister_netdev(dev);
free_netdev(dev);
}
/* Guard against chip prattle */
local_irq_disable();
for (k = 0; k < Nchips; k++)
if ( (ctrl = SCC_ctrl[k].chan_A) )
{
Outb(ctrl, 0);
OutReg(ctrl,R9,FHWRES); /* force hardware reset */
udelay(50);
}
/* To unload the port must be closed so no real IRQ pending */
for (k = 0; k < nr_irqs ; k++)
if (Ivec[k].used) free_irq(k, NULL);
local_irq_enable();
/* Now clean up */
for (k = 0; k < Nchips*2; k++)
{
scc = &SCC_Info[k];
if (scc->ctrl)
{
release_region(scc->ctrl, 1);
release_region(scc->data, 1);
}
if (scc->dev)
{
unregister_netdev(scc->dev);
free_netdev(scc->dev);
}
}
if (Vector_Latch)
release_region(Vector_Latch, 1);
remove_proc_entry("z8530drv", init_net.proc_net);
}
MODULE_AUTHOR("Joerg Reuter <[email protected]>");
MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");
MODULE_LICENSE("GPL");
module_init(scc_init_driver);
module_exit(scc_cleanup_driver);
| linux-master | drivers/net/hamradio/scc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*****************************************************************************/
/*
* hdlcdrv.c -- HDLC packet radio network driver.
*
* Copyright (C) 1996-2000 Thomas Sailer ([email protected])
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
* The driver was derived from Donald Beckers skeleton.c
* Written 1993-94 by Donald Becker.
*
* History:
* 0.1 21.09.1996 Started
* 18.10.1996 Changed to new user space access routines
* (copy_{to,from}_user)
* 0.2 21.11.1996 various small changes
* 0.3 03.03.1997 fixed (hopefully) IP not working with ax.25 as a module
* 0.4 16.04.1997 init code/data tagged
* 0.5 30.07.1997 made HDLC buffers bigger (solves a problem with the
* soundmodem driver)
* 0.6 05.04.1998 add spinlocks
* 0.7 03.08.1999 removed some old compatibility cruft
* 0.8 12.02.2000 adapted to softnet driver interface
*/
/*****************************************************************************/
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/hdlcdrv.h>
#include <linux/random.h>
#include <net/ax25.h>
#include <linux/uaccess.h>
#include <linux/crc-ccitt.h>
/* --------------------------------------------------------------------- */
#define KISS_VERBOSE
/* --------------------------------------------------------------------- */
#define PARAM_TXDELAY 1
#define PARAM_PERSIST 2
#define PARAM_SLOTTIME 3
#define PARAM_TXTAIL 4
#define PARAM_FULLDUP 5
#define PARAM_HARDWARE 6
#define PARAM_RETURN 255
/* --------------------------------------------------------------------- */
/*
* the CRC routines are stolen from WAMPES
* by Dieter Deyke
*/
/*---------------------------------------------------------------------------*/
static inline void append_crc_ccitt(unsigned char *buffer, int len)
{
unsigned int crc = crc_ccitt(0xffff, buffer, len) ^ 0xffff;
buffer += len;
*buffer++ = crc;
*buffer++ = crc >> 8;
}
/*---------------------------------------------------------------------------*/
static inline int check_crc_ccitt(const unsigned char *buf, int cnt)
{
return (crc_ccitt(0xffff, buf, cnt) & 0xffff) == 0xf0b8;
}
/*---------------------------------------------------------------------------*/
#if 0
static int calc_crc_ccitt(const unsigned char *buf, int cnt)
{
unsigned int crc = 0xffff;
for (; cnt > 0; cnt--)
crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff];
crc ^= 0xffff;
return crc & 0xffff;
}
#endif
/* ---------------------------------------------------------------------- */
#define tenms_to_2flags(s,tenms) ((tenms * s->par.bitrate) / 100 / 16)
/* ---------------------------------------------------------------------- */
/*
* The HDLC routines
*/
static int hdlc_rx_add_bytes(struct hdlcdrv_state *s, unsigned int bits,
int num)
{
int added = 0;
while (s->hdlcrx.rx_state && num >= 8) {
if (s->hdlcrx.len >= sizeof(s->hdlcrx.buffer)) {
s->hdlcrx.rx_state = 0;
return 0;
}
*s->hdlcrx.bp++ = bits >> (32-num);
s->hdlcrx.len++;
num -= 8;
added += 8;
}
return added;
}
static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
{
struct sk_buff *skb;
int pkt_len;
unsigned char *cp;
if (s->hdlcrx.len < 4)
return;
if (!check_crc_ccitt(s->hdlcrx.buffer, s->hdlcrx.len))
return;
pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */
if (!(skb = dev_alloc_skb(pkt_len))) {
printk("%s: memory squeeze, dropping packet\n", dev->name);
dev->stats.rx_dropped++;
return;
}
cp = skb_put(skb, pkt_len);
*cp++ = 0; /* KISS kludge */
memcpy(cp, s->hdlcrx.buffer, pkt_len - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
}
void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s)
{
int i;
unsigned int mask1, mask2, mask3, mask4, mask5, mask6, word;
if (!s || s->magic != HDLCDRV_MAGIC)
return;
if (test_and_set_bit(0, &s->hdlcrx.in_hdlc_rx))
return;
while (!hdlcdrv_hbuf_empty(&s->hdlcrx.hbuf)) {
word = hdlcdrv_hbuf_get(&s->hdlcrx.hbuf);
#ifdef HDLCDRV_DEBUG
hdlcdrv_add_bitbuffer_word(&s->bitbuf_hdlc, word);
#endif /* HDLCDRV_DEBUG */
s->hdlcrx.bitstream >>= 16;
s->hdlcrx.bitstream |= word << 16;
s->hdlcrx.bitbuf >>= 16;
s->hdlcrx.bitbuf |= word << 16;
s->hdlcrx.numbits += 16;
for(i = 15, mask1 = 0x1fc00, mask2 = 0x1fe00, mask3 = 0x0fc00,
mask4 = 0x1f800, mask5 = 0xf800, mask6 = 0xffff;
i >= 0;
i--, mask1 <<= 1, mask2 <<= 1, mask3 <<= 1, mask4 <<= 1,
mask5 <<= 1, mask6 = (mask6 << 1) | 1) {
if ((s->hdlcrx.bitstream & mask1) == mask1)
s->hdlcrx.rx_state = 0; /* abort received */
else if ((s->hdlcrx.bitstream & mask2) == mask3) {
/* flag received */
if (s->hdlcrx.rx_state) {
hdlc_rx_add_bytes(s, s->hdlcrx.bitbuf
<< (8+i),
s->hdlcrx.numbits
-8-i);
hdlc_rx_flag(dev, s);
}
s->hdlcrx.len = 0;
s->hdlcrx.bp = s->hdlcrx.buffer;
s->hdlcrx.rx_state = 1;
s->hdlcrx.numbits = i;
} else if ((s->hdlcrx.bitstream & mask4) == mask5) {
/* stuffed bit */
s->hdlcrx.numbits--;
s->hdlcrx.bitbuf = (s->hdlcrx.bitbuf & (~mask6)) |
((s->hdlcrx.bitbuf & mask6) << 1);
}
}
s->hdlcrx.numbits -= hdlc_rx_add_bytes(s, s->hdlcrx.bitbuf,
s->hdlcrx.numbits);
}
clear_bit(0, &s->hdlcrx.in_hdlc_rx);
}
/* ---------------------------------------------------------------------- */
static inline void do_kiss_params(struct hdlcdrv_state *s,
unsigned char *data, unsigned long len)
{
#ifdef KISS_VERBOSE
#define PKP(a,b) printk(KERN_INFO "hdlcdrv.c: channel params: " a "\n", b)
#else /* KISS_VERBOSE */
#define PKP(a,b)
#endif /* KISS_VERBOSE */
if (len < 2)
return;
switch(data[0]) {
case PARAM_TXDELAY:
s->ch_params.tx_delay = data[1];
PKP("TX delay = %ums", 10 * s->ch_params.tx_delay);
break;
case PARAM_PERSIST:
s->ch_params.ppersist = data[1];
PKP("p persistence = %u", s->ch_params.ppersist);
break;
case PARAM_SLOTTIME:
s->ch_params.slottime = data[1];
PKP("slot time = %ums", s->ch_params.slottime);
break;
case PARAM_TXTAIL:
s->ch_params.tx_tail = data[1];
PKP("TX tail = %ums", s->ch_params.tx_tail);
break;
case PARAM_FULLDUP:
s->ch_params.fulldup = !!data[1];
PKP("%s duplex", s->ch_params.fulldup ? "full" : "half");
break;
default:
break;
}
#undef PKP
}
/* ---------------------------------------------------------------------- */
void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
{
unsigned int mask1, mask2, mask3;
int i;
struct sk_buff *skb;
int pkt_len;
if (!s || s->magic != HDLCDRV_MAGIC)
return;
if (test_and_set_bit(0, &s->hdlctx.in_hdlc_tx))
return;
for (;;) {
if (s->hdlctx.numbits >= 16) {
if (hdlcdrv_hbuf_full(&s->hdlctx.hbuf)) {
clear_bit(0, &s->hdlctx.in_hdlc_tx);
return;
}
hdlcdrv_hbuf_put(&s->hdlctx.hbuf, s->hdlctx.bitbuf);
s->hdlctx.bitbuf >>= 16;
s->hdlctx.numbits -= 16;
}
switch (s->hdlctx.tx_state) {
default:
clear_bit(0, &s->hdlctx.in_hdlc_tx);
return;
case 0:
case 1:
if (s->hdlctx.numflags) {
s->hdlctx.numflags--;
s->hdlctx.bitbuf |=
0x7e7e << s->hdlctx.numbits;
s->hdlctx.numbits += 16;
break;
}
if (s->hdlctx.tx_state == 1) {
clear_bit(0, &s->hdlctx.in_hdlc_tx);
return;
}
if (!(skb = s->skb)) {
int flgs = tenms_to_2flags(s, s->ch_params.tx_tail);
if (flgs < 2)
flgs = 2;
s->hdlctx.tx_state = 1;
s->hdlctx.numflags = flgs;
break;
}
s->skb = NULL;
netif_wake_queue(dev);
pkt_len = skb->len-1; /* strip KISS byte */
if (pkt_len >= HDLCDRV_MAXFLEN || pkt_len < 2) {
s->hdlctx.tx_state = 0;
s->hdlctx.numflags = 1;
dev_kfree_skb_irq(skb);
break;
}
skb_copy_from_linear_data_offset(skb, 1,
s->hdlctx.buffer,
pkt_len);
dev_kfree_skb_irq(skb);
s->hdlctx.bp = s->hdlctx.buffer;
append_crc_ccitt(s->hdlctx.buffer, pkt_len);
s->hdlctx.len = pkt_len+2; /* the appended CRC */
s->hdlctx.tx_state = 2;
s->hdlctx.bitstream = 0;
dev->stats.tx_packets++;
break;
case 2:
if (!s->hdlctx.len) {
s->hdlctx.tx_state = 0;
s->hdlctx.numflags = 1;
break;
}
s->hdlctx.len--;
s->hdlctx.bitbuf |= *s->hdlctx.bp <<
s->hdlctx.numbits;
s->hdlctx.bitstream >>= 8;
s->hdlctx.bitstream |= (*s->hdlctx.bp++) << 16;
mask1 = 0x1f000;
mask2 = 0x10000;
mask3 = 0xffffffff >> (31-s->hdlctx.numbits);
s->hdlctx.numbits += 8;
for(i = 0; i < 8; i++, mask1 <<= 1, mask2 <<= 1,
mask3 = (mask3 << 1) | 1) {
if ((s->hdlctx.bitstream & mask1) != mask1)
continue;
s->hdlctx.bitstream &= ~mask2;
s->hdlctx.bitbuf =
(s->hdlctx.bitbuf & mask3) |
((s->hdlctx.bitbuf &
(~mask3)) << 1);
s->hdlctx.numbits++;
mask3 = (mask3 << 1) | 1;
}
break;
}
}
}
/* ---------------------------------------------------------------------- */
static void start_tx(struct net_device *dev, struct hdlcdrv_state *s)
{
s->hdlctx.tx_state = 0;
s->hdlctx.numflags = tenms_to_2flags(s, s->ch_params.tx_delay);
s->hdlctx.bitbuf = s->hdlctx.bitstream = s->hdlctx.numbits = 0;
hdlcdrv_transmitter(dev, s);
s->hdlctx.ptt = 1;
s->ptt_keyed++;
}
/* ---------------------------------------------------------------------- */
void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
{
if (!s || s->magic != HDLCDRV_MAGIC || s->hdlctx.ptt || !s->skb)
return;
if (s->ch_params.fulldup) {
start_tx(dev, s);
return;
}
if (s->hdlcrx.dcd) {
s->hdlctx.slotcnt = s->ch_params.slottime;
return;
}
if ((--s->hdlctx.slotcnt) > 0)
return;
s->hdlctx.slotcnt = s->ch_params.slottime;
if (get_random_u8() > s->ch_params.ppersist)
return;
start_tx(dev, s);
}
/* --------------------------------------------------------------------- */
/*
* ===================== network driver interface =========================
*/
static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct hdlcdrv_state *sm = netdev_priv(dev);
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
if (skb->data[0] != 0) {
do_kiss_params(sm, skb->data, skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (sm->skb) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
netif_stop_queue(dev);
sm->skb = skb;
return NETDEV_TX_OK;
}
/* --------------------------------------------------------------------- */
static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
dev_addr_set(dev, sa->sa_data);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int hdlcdrv_open(struct net_device *dev)
{
struct hdlcdrv_state *s = netdev_priv(dev);
int i;
if (!s->ops || !s->ops->open)
return -ENODEV;
/*
* initialise some variables
*/
s->opened = 1;
s->hdlcrx.hbuf.rd = s->hdlcrx.hbuf.wr = 0;
s->hdlcrx.in_hdlc_rx = 0;
s->hdlcrx.rx_state = 0;
s->hdlctx.hbuf.rd = s->hdlctx.hbuf.wr = 0;
s->hdlctx.in_hdlc_tx = 0;
s->hdlctx.tx_state = 1;
s->hdlctx.numflags = 0;
s->hdlctx.bitstream = s->hdlctx.bitbuf = s->hdlctx.numbits = 0;
s->hdlctx.ptt = 0;
s->hdlctx.slotcnt = s->ch_params.slottime;
s->hdlctx.calibrate = 0;
i = s->ops->open(dev);
if (i)
return i;
netif_start_queue(dev);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* The inverse routine to hdlcdrv_open().
*/
static int hdlcdrv_close(struct net_device *dev)
{
struct hdlcdrv_state *s = netdev_priv(dev);
int i = 0;
netif_stop_queue(dev);
if (s->ops && s->ops->close)
i = s->ops->close(dev);
dev_kfree_skb(s->skb);
s->skb = NULL;
s->opened = 0;
return i;
}
/* --------------------------------------------------------------------- */
static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct hdlcdrv_state *s = netdev_priv(dev);
struct hdlcdrv_ioctl bi;
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
if (in_compat_syscall()) /* to be implemented */
return -ENOIOCTLCMD;
if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
if (s->ops && s->ops->ioctl)
return s->ops->ioctl(dev, data, &bi, cmd);
return -ENOIOCTLCMD;
case HDLCDRVCTL_GETCHANNELPAR:
bi.data.cp.tx_delay = s->ch_params.tx_delay;
bi.data.cp.tx_tail = s->ch_params.tx_tail;
bi.data.cp.slottime = s->ch_params.slottime;
bi.data.cp.ppersist = s->ch_params.ppersist;
bi.data.cp.fulldup = s->ch_params.fulldup;
break;
case HDLCDRVCTL_SETCHANNELPAR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
s->ch_params.tx_delay = bi.data.cp.tx_delay;
s->ch_params.tx_tail = bi.data.cp.tx_tail;
s->ch_params.slottime = bi.data.cp.slottime;
s->ch_params.ppersist = bi.data.cp.ppersist;
s->ch_params.fulldup = bi.data.cp.fulldup;
s->hdlctx.slotcnt = 1;
return 0;
case HDLCDRVCTL_GETMODEMPAR:
bi.data.mp.iobase = dev->base_addr;
bi.data.mp.irq = dev->irq;
bi.data.mp.dma = dev->dma;
bi.data.mp.dma2 = s->ptt_out.dma2;
bi.data.mp.seriobase = s->ptt_out.seriobase;
bi.data.mp.pariobase = s->ptt_out.pariobase;
bi.data.mp.midiiobase = s->ptt_out.midiiobase;
break;
case HDLCDRVCTL_SETMODEMPAR:
if ((!capable(CAP_SYS_RAWIO)) || netif_running(dev))
return -EACCES;
dev->base_addr = bi.data.mp.iobase;
dev->irq = bi.data.mp.irq;
dev->dma = bi.data.mp.dma;
s->ptt_out.dma2 = bi.data.mp.dma2;
s->ptt_out.seriobase = bi.data.mp.seriobase;
s->ptt_out.pariobase = bi.data.mp.pariobase;
s->ptt_out.midiiobase = bi.data.mp.midiiobase;
return 0;
case HDLCDRVCTL_GETSTAT:
bi.data.cs.ptt = hdlcdrv_ptt(s);
bi.data.cs.dcd = s->hdlcrx.dcd;
bi.data.cs.ptt_keyed = s->ptt_keyed;
bi.data.cs.tx_packets = dev->stats.tx_packets;
bi.data.cs.tx_errors = dev->stats.tx_errors;
bi.data.cs.rx_packets = dev->stats.rx_packets;
bi.data.cs.rx_errors = dev->stats.rx_errors;
break;
case HDLCDRVCTL_OLDGETSTAT:
bi.data.ocs.ptt = hdlcdrv_ptt(s);
bi.data.ocs.dcd = s->hdlcrx.dcd;
bi.data.ocs.ptt_keyed = s->ptt_keyed;
break;
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
if (s->par.bitrate <= 0)
return -EINVAL;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;
case HDLCDRVCTL_GETSAMPLES:
#ifndef HDLCDRV_DEBUG
return -EPERM;
#else /* HDLCDRV_DEBUG */
if (s->bitbuf_channel.rd == s->bitbuf_channel.wr)
return -EAGAIN;
bi.data.bits =
s->bitbuf_channel.buffer[s->bitbuf_channel.rd];
s->bitbuf_channel.rd = (s->bitbuf_channel.rd+1) %
sizeof(s->bitbuf_channel.buffer);
break;
#endif /* HDLCDRV_DEBUG */
case HDLCDRVCTL_GETBITS:
#ifndef HDLCDRV_DEBUG
return -EPERM;
#else /* HDLCDRV_DEBUG */
if (s->bitbuf_hdlc.rd == s->bitbuf_hdlc.wr)
return -EAGAIN;
bi.data.bits =
s->bitbuf_hdlc.buffer[s->bitbuf_hdlc.rd];
s->bitbuf_hdlc.rd = (s->bitbuf_hdlc.rd+1) %
sizeof(s->bitbuf_hdlc.buffer);
break;
#endif /* HDLCDRV_DEBUG */
case HDLCDRVCTL_DRIVERNAME:
if (s->ops && s->ops->drvname) {
strscpy(bi.data.drivername, s->ops->drvname,
sizeof(bi.data.drivername));
break;
}
bi.data.drivername[0] = '\0';
break;
}
if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
}
/* --------------------------------------------------------------------- */
static const struct net_device_ops hdlcdrv_netdev = {
.ndo_open = hdlcdrv_open,
.ndo_stop = hdlcdrv_close,
.ndo_start_xmit = hdlcdrv_send_packet,
.ndo_siocdevprivate = hdlcdrv_siocdevprivate,
.ndo_set_mac_address = hdlcdrv_set_mac_address,
};
/*
* Initialize fields in hdlcdrv
*/
static void hdlcdrv_setup(struct net_device *dev)
{
static const struct hdlcdrv_channel_params dflt_ch_params = {
20, 2, 10, 40, 0
};
struct hdlcdrv_state *s = netdev_priv(dev);
/*
* initialize the hdlcdrv_state struct
*/
s->ch_params = dflt_ch_params;
s->ptt_keyed = 0;
spin_lock_init(&s->hdlcrx.hbuf.lock);
s->hdlcrx.hbuf.rd = s->hdlcrx.hbuf.wr = 0;
s->hdlcrx.in_hdlc_rx = 0;
s->hdlcrx.rx_state = 0;
spin_lock_init(&s->hdlctx.hbuf.lock);
s->hdlctx.hbuf.rd = s->hdlctx.hbuf.wr = 0;
s->hdlctx.in_hdlc_tx = 0;
s->hdlctx.tx_state = 1;
s->hdlctx.numflags = 0;
s->hdlctx.bitstream = s->hdlctx.bitbuf = s->hdlctx.numbits = 0;
s->hdlctx.ptt = 0;
s->hdlctx.slotcnt = s->ch_params.slottime;
s->hdlctx.calibrate = 0;
#ifdef HDLCDRV_DEBUG
s->bitbuf_channel.rd = s->bitbuf_channel.wr = 0;
s->bitbuf_channel.shreg = 0x80;
s->bitbuf_hdlc.rd = s->bitbuf_hdlc.wr = 0;
s->bitbuf_hdlc.shreg = 0x80;
#endif /* HDLCDRV_DEBUG */
/* Fill in the fields of the device structure */
s->skb = NULL;
dev->netdev_ops = &hdlcdrv_netdev;
dev->header_ops = &ax25_header_ops;
dev->type = ARPHRD_AX25; /* AF_AX25 device */
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->tx_queue_len = 16;
}
/* --------------------------------------------------------------------- */
struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
unsigned int privsize, const char *ifname,
unsigned int baseaddr, unsigned int irq,
unsigned int dma)
{
struct net_device *dev;
struct hdlcdrv_state *s;
int err;
if (privsize < sizeof(struct hdlcdrv_state))
privsize = sizeof(struct hdlcdrv_state);
dev = alloc_netdev(privsize, ifname, NET_NAME_UNKNOWN, hdlcdrv_setup);
if (!dev)
return ERR_PTR(-ENOMEM);
/*
* initialize part of the hdlcdrv_state struct
*/
s = netdev_priv(dev);
s->magic = HDLCDRV_MAGIC;
s->ops = ops;
dev->base_addr = baseaddr;
dev->irq = irq;
dev->dma = dma;
err = register_netdev(dev);
if (err < 0) {
printk(KERN_WARNING "hdlcdrv: cannot register net "
"device %s\n", dev->name);
free_netdev(dev);
dev = ERR_PTR(err);
}
return dev;
}
/* --------------------------------------------------------------------- */
void hdlcdrv_unregister(struct net_device *dev)
{
struct hdlcdrv_state *s = netdev_priv(dev);
BUG_ON(s->magic != HDLCDRV_MAGIC);
if (s->opened && s->ops->close)
s->ops->close(dev);
unregister_netdev(dev);
free_netdev(dev);
}
/* --------------------------------------------------------------------- */
EXPORT_SYMBOL(hdlcdrv_receiver);
EXPORT_SYMBOL(hdlcdrv_transmitter);
EXPORT_SYMBOL(hdlcdrv_arbitrate);
EXPORT_SYMBOL(hdlcdrv_register);
EXPORT_SYMBOL(hdlcdrv_unregister);
/* --------------------------------------------------------------------- */
static int __init hdlcdrv_init_driver(void)
{
printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n");
printk(KERN_INFO "hdlcdrv: version 0.8\n");
return 0;
}
/* --------------------------------------------------------------------- */
static void __exit hdlcdrv_cleanup_driver(void)
{
printk(KERN_INFO "hdlcdrv: cleanup\n");
}
/* --------------------------------------------------------------------- */
MODULE_AUTHOR("Thomas M. Sailer, [email protected], [email protected]");
MODULE_DESCRIPTION("Packet Radio network interface HDLC encoder/decoder");
MODULE_LICENSE("GPL");
module_init(hdlcdrv_init_driver);
module_exit(hdlcdrv_cleanup_driver);
/* --------------------------------------------------------------------- */
| linux-master | drivers/net/hamradio/hdlcdrv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) Hans Alblas PE1AYX <[email protected]>
* Copyright (C) 2004, 05 Ralf Baechle DL5RB <[email protected]>
* Copyright (C) 2004, 05 Thomas Osterried DL9SAU <[email protected]>
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/crc16.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/major.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/jiffies.h>
#include <linux/refcount.h>
#include <net/ax25.h>
#define AX_MTU 236
/* some arch define END as assembly function ending, just undef it */
#undef END
/* SLIP/KISS protocol characters. */
#define END 0300 /* indicates end of frame */
#define ESC 0333 /* indicates byte stuffing */
#define ESC_END 0334 /* ESC ESC_END means END 'data' */
#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
struct mkiss {
struct tty_struct *tty; /* ptr to TTY structure */
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
spinlock_t buflock;/* lock for rbuf and xbuf */
unsigned char *rbuff; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* pointer to next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
/* Detailed SLIP statistics. */
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
unsigned long flags; /* Flag values/ mode etc */
/* long req'd: used by set_bit --RR */
#define AXF_INUSE 0 /* Channel in use */
#define AXF_ESCAPE 1 /* ESC received */
#define AXF_ERROR 2 /* Parity, etc. error */
#define AXF_KEEPTEST 3 /* Keepalive test flag */
#define AXF_OUTWAIT 4 /* is outpacket was flag */
int mode;
int crcmode; /* MW: for FlexNet, SMACK etc. */
int crcauto; /* CRC auto mode */
#define CRC_MODE_NONE 0
#define CRC_MODE_FLEX 1
#define CRC_MODE_SMACK 2
#define CRC_MODE_FLEX_TEST 3
#define CRC_MODE_SMACK_TEST 4
refcount_t refcnt;
struct completion dead;
};
/*---------------------------------------------------------------------------*/
static const unsigned short crc_flex_table[] = {
0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38,
0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770,
0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9,
0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1,
0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a,
0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672,
0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb,
0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3,
0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c,
0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574,
0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd,
0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5,
0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e,
0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476,
0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf,
0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7,
0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30,
0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378,
0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1,
0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9,
0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32,
0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a,
0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3,
0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb,
0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34,
0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c,
0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5,
0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd,
0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36,
0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e,
0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7,
0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff
};
static unsigned short calc_crc_flex(unsigned char *cp, int size)
{
unsigned short crc = 0xffff;
while (size--)
crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
return crc;
}
static int check_crc_flex(unsigned char *cp, int size)
{
unsigned short crc = 0xffff;
if (size < 3)
return -1;
while (size--)
crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
if ((crc & 0xffff) != 0x7070)
return -1;
return 0;
}
static int check_crc_16(unsigned char *cp, int size)
{
unsigned short crc = 0x0000;
if (size < 3)
return -1;
crc = crc16(0, cp, size);
if (crc != 0x0000)
return -1;
return 0;
}
/*
* Standard encapsulation
*/
static int kiss_esc(unsigned char *s, unsigned char *d, int len)
{
unsigned char *ptr = d;
unsigned char c;
/*
* Send an initial END character to flush out any data that may have
* accumulated in the receiver due to line noise.
*/
*ptr++ = END;
while (len-- > 0) {
switch (c = *s++) {
case END:
*ptr++ = ESC;
*ptr++ = ESC_END;
break;
case ESC:
*ptr++ = ESC;
*ptr++ = ESC_ESC;
break;
default:
*ptr++ = c;
break;
}
}
*ptr++ = END;
return ptr - d;
}
/*
* MW:
* OK its ugly, but tell me a better solution without copying the
* packet to a temporary buffer :-)
*/
static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc,
int len)
{
unsigned char *ptr = d;
unsigned char c=0;
*ptr++ = END;
while (len > 0) {
if (len > 2)
c = *s++;
else if (len > 1)
c = crc >> 8;
else
c = crc & 0xff;
len--;
switch (c) {
case END:
*ptr++ = ESC;
*ptr++ = ESC_END;
break;
case ESC:
*ptr++ = ESC;
*ptr++ = ESC_ESC;
break;
default:
*ptr++ = c;
break;
}
}
*ptr++ = END;
return ptr - d;
}
/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
static void ax_bump(struct mkiss *ax)
{
struct sk_buff *skb;
int count;
spin_lock_bh(&ax->buflock);
if (ax->rbuff[0] > 0x0f) {
if (ax->rbuff[0] & 0x80) {
if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
ax->dev->stats.rx_errors++;
spin_unlock_bh(&ax->buflock);
return;
}
if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
printk(KERN_INFO
"mkiss: %s: Switching to crc-smack\n",
ax->dev->name);
ax->crcmode = CRC_MODE_SMACK;
}
ax->rcount -= 2;
*ax->rbuff &= ~0x80;
} else if (ax->rbuff[0] & 0x20) {
if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
ax->dev->stats.rx_errors++;
spin_unlock_bh(&ax->buflock);
return;
}
if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
printk(KERN_INFO
"mkiss: %s: Switching to crc-flexnet\n",
ax->dev->name);
ax->crcmode = CRC_MODE_FLEX;
}
ax->rcount -= 2;
/*
* dl9sau bugfix: the trailling two bytes flexnet crc
* will not be passed to the kernel. thus we have to
* correct the kissparm signature, because it indicates
* a crc but there's none
*/
*ax->rbuff &= ~0x20;
}
}
count = ax->rcount;
if ((skb = dev_alloc_skb(count)) == NULL) {
printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
ax->dev->name);
ax->dev->stats.rx_dropped++;
spin_unlock_bh(&ax->buflock);
return;
}
skb_put_data(skb, ax->rbuff, count);
skb->protocol = ax25_type_trans(skb, ax->dev);
netif_rx(skb);
ax->dev->stats.rx_packets++;
ax->dev->stats.rx_bytes += count;
spin_unlock_bh(&ax->buflock);
}
static void kiss_unesc(struct mkiss *ax, unsigned char s)
{
switch (s) {
case END:
/* drop keeptest bit = VSV */
if (test_bit(AXF_KEEPTEST, &ax->flags))
clear_bit(AXF_KEEPTEST, &ax->flags);
if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2))
ax_bump(ax);
clear_bit(AXF_ESCAPE, &ax->flags);
ax->rcount = 0;
return;
case ESC:
set_bit(AXF_ESCAPE, &ax->flags);
return;
case ESC_ESC:
if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
s = ESC;
break;
case ESC_END:
if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
s = END;
break;
}
spin_lock_bh(&ax->buflock);
if (!test_bit(AXF_ERROR, &ax->flags)) {
if (ax->rcount < ax->buffsize) {
ax->rbuff[ax->rcount++] = s;
spin_unlock_bh(&ax->buflock);
return;
}
ax->dev->stats.rx_over_errors++;
set_bit(AXF_ERROR, &ax->flags);
}
spin_unlock_bh(&ax->buflock);
}
static int ax_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
__dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return 0;
}
/*---------------------------------------------------------------------------*/
static void ax_changedmtu(struct mkiss *ax)
{
struct net_device *dev = ax->dev;
unsigned char *xbuff, *rbuff, *oxbuff, *orbuff;
int len;
len = dev->mtu * 2;
/*
* allow for arrival of larger UDP packets, even if we say not to
* also fixes a bug in which SunOS sends 512-byte packets even with
* an MSS of 128
*/
if (len < 576 * 2)
len = 576 * 2;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
if (xbuff == NULL || rbuff == NULL) {
printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, "
"MTU change cancelled.\n",
ax->dev->name);
dev->mtu = ax->mtu;
kfree(xbuff);
kfree(rbuff);
return;
}
spin_lock_bh(&ax->buflock);
oxbuff = ax->xbuff;
ax->xbuff = xbuff;
orbuff = ax->rbuff;
ax->rbuff = rbuff;
if (ax->xleft) {
if (ax->xleft <= len) {
memcpy(ax->xbuff, ax->xhead, ax->xleft);
} else {
ax->xleft = 0;
dev->stats.tx_dropped++;
}
}
ax->xhead = ax->xbuff;
if (ax->rcount) {
if (ax->rcount <= len) {
memcpy(ax->rbuff, orbuff, ax->rcount);
} else {
ax->rcount = 0;
dev->stats.rx_over_errors++;
set_bit(AXF_ERROR, &ax->flags);
}
}
ax->mtu = dev->mtu + 73;
ax->buffsize = len;
spin_unlock_bh(&ax->buflock);
kfree(oxbuff);
kfree(orbuff);
}
/* Encapsulate one AX.25 packet and stuff into a TTY queue. */
static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
{
struct mkiss *ax = netdev_priv(dev);
unsigned char *p;
int actual, count;
if (ax->mtu != ax->dev->mtu + 73) /* Someone has been ifconfigging */
ax_changedmtu(ax);
if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
dev->stats.tx_dropped++;
netif_start_queue(dev);
return;
}
p = icp;
spin_lock_bh(&ax->buflock);
if ((*p & 0x0f) != 0) {
/* Configuration Command (kissparms(1).
* Protocol spec says: never append CRC.
* This fixes a very old bug in the linux
* kiss driver. -- dl9sau */
switch (*p & 0xff) {
case 0x85:
/* command from userspace especially for us,
* not for delivery to the tnc */
if (len > 1) {
int cmd = (p[1] & 0xff);
switch(cmd) {
case 3:
ax->crcmode = CRC_MODE_SMACK;
break;
case 2:
ax->crcmode = CRC_MODE_FLEX;
break;
case 1:
ax->crcmode = CRC_MODE_NONE;
break;
case 0:
default:
ax->crcmode = CRC_MODE_SMACK_TEST;
cmd = 0;
}
ax->crcauto = (cmd ? 0 : 1);
printk(KERN_INFO "mkiss: %s: crc mode set to %d\n",
ax->dev->name, cmd);
}
spin_unlock_bh(&ax->buflock);
netif_start_queue(dev);
return;
default:
count = kiss_esc(p, ax->xbuff, len);
}
} else {
unsigned short crc;
switch (ax->crcmode) {
case CRC_MODE_SMACK_TEST:
ax->crcmode = CRC_MODE_FLEX_TEST;
printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name);
fallthrough;
case CRC_MODE_SMACK:
*p |= 0x80;
crc = swab16(crc16(0, p, len));
count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
case CRC_MODE_FLEX_TEST:
ax->crcmode = CRC_MODE_NONE;
printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name);
fallthrough;
case CRC_MODE_FLEX:
*p |= 0x20;
crc = calc_crc_flex(p, len);
count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
default:
count = kiss_esc(p, ax->xbuff, len);
}
}
spin_unlock_bh(&ax->buflock);
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
actual = ax->tty->ops->write(ax->tty, ax->xbuff, count);
dev->stats.tx_packets++;
dev->stats.tx_bytes += actual;
netif_trans_update(ax->dev);
ax->xleft = count - actual;
ax->xhead = ax->xbuff + actual;
}
/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mkiss *ax = netdev_priv(dev);
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
if (!netif_running(dev)) {
printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
return NETDEV_TX_BUSY;
}
if (netif_queue_stopped(dev)) {
/*
* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
return NETDEV_TX_BUSY;
}
printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
(tty_chars_in_buffer(ax->tty) || ax->xleft) ?
"bad line quality" : "driver error");
ax->xleft = 0;
clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
netif_start_queue(dev);
}
/* We were not busy, so we are now... :-) */
netif_stop_queue(dev);
ax_encaps(dev, skb->data, skb->len);
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int ax_open_dev(struct net_device *dev)
{
struct mkiss *ax = netdev_priv(dev);
if (ax->tty == NULL)
return -ENODEV;
return 0;
}
/* Open the low-level part of the AX25 channel. Easy! */
static int ax_open(struct net_device *dev)
{
struct mkiss *ax = netdev_priv(dev);
unsigned long len;
if (ax->tty == NULL)
return -ENODEV;
/*
* Allocate the frame buffers:
*
* rbuff Receive buffer.
* xbuff Transmit buffer.
*/
len = dev->mtu * 2;
/*
* allow for arrival of larger UDP packets, even if we say not to
* also fixes a bug in which SunOS sends 512-byte packets even with
* an MSS of 128
*/
if (len < 576 * 2)
len = 576 * 2;
if ((ax->rbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL)
goto norbuff;
if ((ax->xbuff = kmalloc(len + 4, GFP_KERNEL)) == NULL)
goto noxbuff;
ax->mtu = dev->mtu + 73;
ax->buffsize = len;
ax->rcount = 0;
ax->xleft = 0;
ax->flags &= (1 << AXF_INUSE); /* Clear ESCAPE & ERROR flags */
spin_lock_init(&ax->buflock);
return 0;
noxbuff:
kfree(ax->rbuff);
norbuff:
return -ENOMEM;
}
/* Close the low-level part of the AX25 channel. Easy! */
static int ax_close(struct net_device *dev)
{
struct mkiss *ax = netdev_priv(dev);
if (ax->tty)
clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
netif_stop_queue(dev);
return 0;
}
static const struct net_device_ops ax_netdev_ops = {
.ndo_open = ax_open_dev,
.ndo_stop = ax_close,
.ndo_start_xmit = ax_xmit,
.ndo_set_mac_address = ax_set_mac_address,
};
static void ax_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;
dev->netdev_ops = &ax_netdev_ops;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
* We have a potential race on dereferencing tty->disc_data, because the tty
* layer provides no locking at all - thus one cpu could be running
* sixpack_receive_buf while another calls sixpack_close, which zeroes
* tty->disc_data and frees the memory that sixpack_receive_buf is using. The
* best way to fix this is to use a rwlock in the tty struct, but for now we
* use a single global rwlock for all ttys in ppp line discipline.
*/
static DEFINE_RWLOCK(disc_data_lock);
static struct mkiss *mkiss_get(struct tty_struct *tty)
{
struct mkiss *ax;
read_lock(&disc_data_lock);
ax = tty->disc_data;
if (ax)
refcount_inc(&ax->refcnt);
read_unlock(&disc_data_lock);
return ax;
}
static void mkiss_put(struct mkiss *ax)
{
if (refcount_dec_and_test(&ax->refcnt))
complete(&ax->dead);
}
static int crc_force = 0; /* Can be overridden with insmod */
static int mkiss_open(struct tty_struct *tty)
{
struct net_device *dev;
struct mkiss *ax;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
dev = alloc_netdev(sizeof(struct mkiss), "ax%d", NET_NAME_UNKNOWN,
ax_setup);
if (!dev) {
err = -ENOMEM;
goto out;
}
ax = netdev_priv(dev);
ax->dev = dev;
spin_lock_init(&ax->buflock);
refcount_set(&ax->refcnt, 1);
init_completion(&ax->dead);
ax->tty = tty;
tty->disc_data = ax;
tty->receive_room = 65535;
tty_driver_flush_buffer(tty);
/* Restore default settings */
dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */
err = ax_open(ax->dev);
if (err)
goto out_free_netdev;
err = register_netdev(dev);
if (err)
goto out_free_buffers;
/* after register_netdev() - because else printk smashes the kernel */
switch (crc_force) {
case 3:
ax->crcmode = CRC_MODE_SMACK;
printk(KERN_INFO "mkiss: %s: crc mode smack forced.\n",
ax->dev->name);
break;
case 2:
ax->crcmode = CRC_MODE_FLEX;
printk(KERN_INFO "mkiss: %s: crc mode flexnet forced.\n",
ax->dev->name);
break;
case 1:
ax->crcmode = CRC_MODE_NONE;
printk(KERN_INFO "mkiss: %s: crc mode disabled.\n",
ax->dev->name);
break;
case 0:
default:
crc_force = 0;
printk(KERN_INFO "mkiss: %s: crc mode is auto.\n",
ax->dev->name);
ax->crcmode = CRC_MODE_SMACK_TEST;
}
ax->crcauto = (crc_force ? 0 : 1);
netif_start_queue(dev);
/* Done. We have linked the TTY line to a channel. */
return 0;
out_free_buffers:
kfree(ax->rbuff);
kfree(ax->xbuff);
out_free_netdev:
free_netdev(dev);
out:
return err;
}
static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
write_lock_irq(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!ax)
return;
/*
* We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish.
*/
if (!refcount_dec_and_test(&ax->refcnt))
wait_for_completion(&ax->dead);
/*
* Halt the transmit queue so that a new transmit cannot scribble
* on our buffers
*/
netif_stop_queue(ax->dev);
unregister_netdev(ax->dev);
/* Free all AX25 frame buffers after unreg. */
kfree(ax->rbuff);
kfree(ax->xbuff);
ax->tty = NULL;
free_netdev(ax->dev);
}
/* Perform I/O control on an active ax25 channel. */
static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct mkiss *ax = mkiss_get(tty);
struct net_device *dev;
unsigned int tmp, err;
/* First make sure we're connected. */
if (ax == NULL)
return -ENXIO;
dev = ax->dev;
switch (cmd) {
case SIOCGIFNAME:
err = copy_to_user((void __user *) arg, ax->dev->name,
strlen(ax->dev->name) + 1) ? -EFAULT : 0;
break;
case SIOCGIFENCAP:
err = put_user(4, (int __user *) arg);
break;
case SIOCSIFENCAP:
if (get_user(tmp, (int __user *) arg)) {
err = -EFAULT;
break;
}
ax->mode = tmp;
dev->addr_len = AX25_ADDR_LEN;
dev->hard_header_len = AX25_KISS_HEADER_LEN +
AX25_MAX_HEADER_LEN + 3;
dev->type = ARPHRD_AX25;
err = 0;
break;
case SIOCSIFHWADDR: {
char addr[AX25_ADDR_LEN];
if (copy_from_user(&addr,
(void __user *) arg, AX25_ADDR_LEN)) {
err = -EFAULT;
break;
}
netif_tx_lock_bh(dev);
__dev_addr_set(dev, addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
break;
}
default:
err = -ENOIOCTLCMD;
}
mkiss_put(ax);
return err;
}
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
* a block of data has been received, which can now be decapsulated
* and sent on to the AX.25 layer for further processing.
*/
static void mkiss_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct mkiss *ax = mkiss_get(tty);
if (!ax)
return;
/*
* Argh! mtu change time! - costs us the packet part received
* at the change
*/
if (ax->mtu != ax->dev->mtu + 73)
ax_changedmtu(ax);
/* Read the characters out of the buffer */
while (count--) {
if (fp != NULL && *fp++) {
if (!test_and_set_bit(AXF_ERROR, &ax->flags))
ax->dev->stats.rx_errors++;
cp++;
continue;
}
kiss_unesc(ax, *cp++);
}
mkiss_put(ax);
tty_unthrottle(tty);
}
/*
* Called by the driver when there's room for more data. If we have
* more packets to send, we send them here.
*/
static void mkiss_write_wakeup(struct tty_struct *tty)
{
struct mkiss *ax = mkiss_get(tty);
int actual;
if (!ax)
return;
if (ax->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet
*/
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
netif_wake_queue(ax->dev);
goto out;
}
actual = tty->ops->write(tty, ax->xhead, ax->xleft);
ax->xleft -= actual;
ax->xhead += actual;
out:
mkiss_put(ax);
}
static struct tty_ldisc_ops ax_ldisc = {
.owner = THIS_MODULE,
.num = N_AX25,
.name = "mkiss",
.open = mkiss_open,
.close = mkiss_close,
.ioctl = mkiss_ioctl,
.receive_buf = mkiss_receive_buf,
.write_wakeup = mkiss_write_wakeup
};
static const char banner[] __initconst = KERN_INFO \
"mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
static const char msg_regfail[] __initconst = KERN_ERR \
"mkiss: can't register line discipline (err = %d)\n";
static int __init mkiss_init_driver(void)
{
int status;
printk(banner);
status = tty_register_ldisc(&ax_ldisc);
if (status != 0)
printk(msg_regfail, status);
return status;
}
static void __exit mkiss_exit_driver(void)
{
tty_unregister_ldisc(&ax_ldisc);
}
MODULE_AUTHOR("Ralf Baechle DL5RB <[email protected]>");
MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
module_param(crc_force, int, 0);
MODULE_PARM_DESC(crc_force, "crc [0 = auto | 1 = none | 2 = flexnet | 3 = smack]");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_AX25);
module_init(mkiss_init_driver);
module_exit(mkiss_exit_driver);
| linux-master | drivers/net/hamradio/mkiss.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* G8BPQ compatible "AX.25 via ethernet" driver release 004
*
* This code REQUIRES 2.0.0 or higher/ NET3.029
*
* This is a "pseudo" network driver to allow AX.25 over Ethernet
* using G8BPQ encapsulation. It has been extracted from the protocol
* implementation because
*
* - things got unreadable within the protocol stack
* - to cure the protocol stack from "feature-ism"
* - a protocol implementation shouldn't need to know on
* which hardware it is running
* - user-level programs like the AX.25 utilities shouldn't
* need to know about the hardware.
* - IP over ethernet encapsulated AX.25 was impossible
* - rxecho.c did not work
* - to have room for extensions
* - it just deserves to "live" as an own driver
*
* This driver can use any ethernet destination address, and can be
* limited to accept frames from one dedicated ethernet card only.
*
* Note that the driver sets up the BPQ devices automagically on
* startup or (if started before the "insmod" of an ethernet device)
* on "ifconfig up". It hopefully will remove the BPQ on "rmmod"ing
* the ethernet device (in fact: as soon as another ethernet or bpq
* device gets "ifconfig"ured).
*
* I have heard that several people are thinking of experiments
* with highspeed packet radio using existing ethernet cards.
* Well, this driver is prepared for this purpose, just add
* your tx key control and a txdelay / tailtime algorithm,
* probably some buffering, and /voila/...
*
* History
* BPQ 001 Joerg(DL1BKE) Extracted BPQ code from AX.25
* protocol stack and added my own
* yet existing patches
* BPQ 002 Joerg(DL1BKE) Scan network device list on
* startup.
* BPQ 003 Joerg(DL1BKE) Ethernet destination address
* and accepted source address
* can be configured by an ioctl()
* call.
* Fixed to match Linux networking
* changes - 2.1.15.
* BPQ 004 Joerg(DL1BKE) Fixed to not lock up on ifconfig.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/net_namespace.h>
#include <linux/bpqether.h>
static const char banner[] __initconst = KERN_INFO \
"AX.25: bpqether driver version 004\n";
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
static struct packet_type bpq_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_BPQ),
.func = bpq_rcv,
};
static struct notifier_block bpq_dev_notifier = {
.notifier_call = bpq_device_event,
};
struct bpqdev {
struct list_head bpq_list; /* list of bpq devices chain */
struct net_device *ethdev; /* link to ethernet device */
struct net_device *axdev; /* bpq device (bpq#) */
char dest_addr[6]; /* ether destination address */
char acpt_addr[6]; /* accept ether frames from this address only */
};
static LIST_HEAD(bpq_devices);
/*
* bpqether network devices are paired with ethernet devices below them, so
* form a special "super class" of normal ethernet devices; split their locks
* off into a separate class since they always nest.
*/
static struct lock_class_key bpq_netdev_xmit_lock_key;
static struct lock_class_key bpq_netdev_addr_lock_key;
static void bpq_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
}
static void bpq_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
}
/* ------------------------------------------------------------------------ */
/*
* Get the ethernet device for a BPQ device
*/
static inline struct net_device *bpq_get_ether_dev(struct net_device *dev)
{
struct bpqdev *bpq = netdev_priv(dev);
return bpq ? bpq->ethdev : NULL;
}
/*
* Get the BPQ device for the ethernet device
*/
static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
{
struct bpqdev *bpq;
list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
lockdep_rtnl_is_held()) {
if (bpq->ethdev == dev)
return bpq->axdev;
}
return NULL;
}
static inline int dev_is_ethdev(struct net_device *dev)
{
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
}
/* ------------------------------------------------------------------------ */
/*
* Receive an AX.25 frame via an ethernet interface.
*/
static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
{
int len;
char * ptr;
struct ethhdr *eth;
struct bpqdev *bpq;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
return NET_RX_DROP;
if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
goto drop;
rcu_read_lock();
dev = bpq_get_ax25_dev(dev);
if (dev == NULL || !netif_running(dev))
goto drop_unlock;
/*
* if we want to accept frames from just one ethernet device
* we check the source address of the sender.
*/
bpq = netdev_priv(dev);
eth = eth_hdr(skb);
if (!(bpq->acpt_addr[0] & 0x01) &&
!ether_addr_equal(eth->h_source, bpq->acpt_addr))
goto drop_unlock;
if (skb_cow(skb, sizeof(struct ethhdr)))
goto drop_unlock;
len = skb->data[0] + skb->data[1] * 256 - 5;
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
ptr = skb_push(skb, 1);
*ptr = 0;
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
unlock:
rcu_read_unlock();
return 0;
drop_unlock:
kfree_skb(skb);
goto unlock;
drop:
kfree_skb(skb);
return 0;
}
/*
* Send an AX.25 frame via an ethernet interface
*/
static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
struct bpqdev *bpq;
struct net_device *orig_dev;
int size;
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
/*
* Just to be *really* sure not to send anything if the interface
* is down, the ethernet device may have gone.
*/
if (!netif_running(dev)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
skb_pull(skb, 1); /* Drop KISS byte */
size = skb->len;
/*
* We're about to mess with the skb which may still shared with the
* generic networking code so unshare and ensure it's got enough
* space for the BPQ headers.
*/
if (skb_cow(skb, AX25_BPQ_HEADER_LEN)) {
if (net_ratelimit())
pr_err("bpqether: out of memory\n");
kfree_skb(skb);
return NETDEV_TX_OK;
}
ptr = skb_push(skb, 2); /* Make space for length */
*ptr++ = (size + 5) % 256;
*ptr++ = (size + 5) / 256;
bpq = netdev_priv(dev);
orig_dev = dev;
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
orig_dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
skb->protocol = ax25_type_trans(skb, dev);
skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
dev->stats.tx_packets++;
dev->stats.tx_bytes+=skb->len;
dev_queue_xmit(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
/*
* Set AX.25 callsign
*/
static int bpq_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *)addr;
dev_addr_set(dev, sa->sa_data);
return 0;
}
/* Ioctl commands
*
* SIOCSBPQETHOPT reserved for enhancements
* SIOCSBPQETHADDR set the destination and accepted
* source ethernet address (broadcast
* or multicast: accept all)
*/
static int bpq_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct bpq_ethaddr __user *ethaddr = data;
struct bpqdev *bpq = netdev_priv(dev);
struct bpq_req req;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case SIOCSBPQETHOPT:
if (copy_from_user(&req, data, sizeof(struct bpq_req)))
return -EFAULT;
switch (req.cmd) {
case SIOCGBPQETHPARAM:
case SIOCSBPQETHPARAM:
default:
return -EINVAL;
}
break;
case SIOCSBPQETHADDR:
if (copy_from_user(bpq->dest_addr, ethaddr->destination, ETH_ALEN))
return -EFAULT;
if (copy_from_user(bpq->acpt_addr, ethaddr->accept, ETH_ALEN))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* open/close a device
*/
static int bpq_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
static int bpq_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
/*
* Proc filesystem
*/
static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
int i = 1;
struct bpqdev *bpqdev;
rcu_read_lock();
if (*pos == 0)
return SEQ_START_TOKEN;
list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) {
if (i == *pos)
return bpqdev;
}
return NULL;
}
static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *p;
struct bpqdev *bpqdev = v;
++*pos;
if (v == SEQ_START_TOKEN)
p = rcu_dereference(list_next_rcu(&bpq_devices));
else
p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
return (p == &bpq_devices) ? NULL
: list_entry(p, struct bpqdev, bpq_list);
}
static void bpq_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int bpq_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"dev ether destination accept from\n");
else {
const struct bpqdev *bpqdev = v;
seq_printf(seq, "%-5s %-10s %pM ",
bpqdev->axdev->name, bpqdev->ethdev->name,
bpqdev->dest_addr);
if (is_multicast_ether_addr(bpqdev->acpt_addr))
seq_printf(seq, "*\n");
else
seq_printf(seq, "%pM\n", bpqdev->acpt_addr);
}
return 0;
}
static const struct seq_operations bpq_seqops = {
.start = bpq_seq_start,
.next = bpq_seq_next,
.stop = bpq_seq_stop,
.show = bpq_seq_show,
};
#endif
/* ------------------------------------------------------------------------ */
static const struct net_device_ops bpq_netdev_ops = {
.ndo_open = bpq_open,
.ndo_stop = bpq_close,
.ndo_start_xmit = bpq_xmit,
.ndo_set_mac_address = bpq_set_mac_address,
.ndo_siocdevprivate = bpq_siocdevprivate,
};
static void bpq_setup(struct net_device *dev)
{
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
dev->flags = 0;
dev->features = NETIF_F_LLTX; /* Allow recursion */
#if IS_ENABLED(CONFIG_AX25)
dev->header_ops = &ax25_header_ops;
#endif
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/*
* Setup a new device.
*/
static int bpq_new_device(struct net_device *edev)
{
int err;
struct net_device *ndev;
struct bpqdev *bpq;
ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d", NET_NAME_UNKNOWN,
bpq_setup);
if (!ndev)
return -ENOMEM;
bpq = netdev_priv(ndev);
dev_hold(edev);
bpq->ethdev = edev;
bpq->axdev = ndev;
eth_broadcast_addr(bpq->dest_addr);
eth_broadcast_addr(bpq->acpt_addr);
err = register_netdevice(ndev);
if (err)
goto error;
bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
return 0;
error:
dev_put(edev);
free_netdev(ndev);
return err;
}
static void bpq_free_device(struct net_device *ndev)
{
struct bpqdev *bpq = netdev_priv(ndev);
dev_put(bpq->ethdev);
list_del_rcu(&bpq->bpq_list);
unregister_netdevice(ndev);
}
/*
* Handle device status changes.
*/
static int bpq_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP: /* new ethernet device -> new BPQ interface */
if (bpq_get_ax25_dev(dev) == NULL)
bpq_new_device(dev);
break;
case NETDEV_DOWN: /* ethernet device closed -> close BPQ interface */
if ((dev = bpq_get_ax25_dev(dev)) != NULL)
dev_close(dev);
break;
case NETDEV_UNREGISTER: /* ethernet device removed -> free BPQ interface */
if ((dev = bpq_get_ax25_dev(dev)) != NULL)
bpq_free_device(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
/* ------------------------------------------------------------------------ */
/*
* Initialize driver. To be called from af_ax25 if not compiled as a
* module
*/
static int __init bpq_init_driver(void)
{
#ifdef CONFIG_PROC_FS
if (!proc_create_seq("bpqether", 0444, init_net.proc_net, &bpq_seqops)) {
printk(KERN_ERR
"bpq: cannot create /proc/net/bpqether entry.\n");
return -ENOENT;
}
#endif /* CONFIG_PROC_FS */
dev_add_pack(&bpq_packet_type);
register_netdevice_notifier(&bpq_dev_notifier);
printk(banner);
return 0;
}
static void __exit bpq_cleanup_driver(void)
{
struct bpqdev *bpq;
dev_remove_pack(&bpq_packet_type);
unregister_netdevice_notifier(&bpq_dev_notifier);
remove_proc_entry("bpqether", init_net.proc_net);
rtnl_lock();
while (!list_empty(&bpq_devices)) {
bpq = list_entry(bpq_devices.next, struct bpqdev, bpq_list);
bpq_free_device(bpq->axdev);
}
rtnl_unlock();
}
MODULE_AUTHOR("Joerg Reuter DL1BKE <[email protected]>");
MODULE_DESCRIPTION("Transmit and receive AX.25 packets over Ethernet");
MODULE_LICENSE("GPL");
module_init(bpq_init_driver);
module_exit(bpq_cleanup_driver);
| linux-master | drivers/net/hamradio/bpqether.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 6pack.c This module implements the 6pack protocol for kernel-based
* devices like TTY. It interfaces between a raw TTY and the
* kernel's AX.25 protocol layers.
*
* Authors: Andreas Könsgen <[email protected]>
* Ralf Baechle DL5RB <[email protected]>
*
* Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by
*
* Laurence Culhane, <[email protected]>
* Fred N. van Kempen, <[email protected]>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/semaphore.h>
#include <linux/refcount.h>
#define SIXPACK_VERSION "Revision: 0.3.0"
/* sixpack priority commands */
#define SIXP_SEOF 0x40 /* start and end of a 6pack frame */
#define SIXP_TX_URUN 0x48 /* transmit overrun */
#define SIXP_RX_ORUN 0x50 /* receive overrun */
#define SIXP_RX_BUF_OVL 0x58 /* receive buffer overflow */
#define SIXP_CHKSUM 0xFF /* valid checksum of a 6pack frame */
/* masks to get certain bits out of the status bytes sent by the TNC */
#define SIXP_CMD_MASK 0xC0
#define SIXP_CHN_MASK 0x07
#define SIXP_PRIO_CMD_MASK 0x80
#define SIXP_STD_CMD_MASK 0x40
#define SIXP_PRIO_DATA_MASK 0x38
#define SIXP_TX_MASK 0x20
#define SIXP_RX_MASK 0x10
#define SIXP_RX_DCD_MASK 0x18
#define SIXP_LEDS_ON 0x78
#define SIXP_LEDS_OFF 0x60
#define SIXP_CON 0x08
#define SIXP_STA 0x10
#define SIXP_FOUND_TNC 0xe9
#define SIXP_CON_ON 0x68
#define SIXP_DCD_MASK 0x08
#define SIXP_DAMA_OFF 0
/* default level 2 parameters */
#define SIXP_TXDELAY 25 /* 250 ms */
#define SIXP_PERSIST 50 /* in 256ths */
#define SIXP_SLOTTIME 10 /* 100 ms */
#define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
#define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
/* 6pack configuration. */
#define SIXP_NRUNIT 31 /* MAX number of 6pack channels */
#define SIXP_MTU 256 /* Default MTU */
enum sixpack_flags {
SIXPF_ERROR, /* Parity, etc. error */
};
struct sixpack {
/* Various fields. */
struct tty_struct *tty; /* ptr to TTY structure */
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
unsigned char *rbuff; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
unsigned char raw_buf[4];
unsigned char cooked_buf[400];
unsigned int rx_count;
unsigned int rx_count_cooked;
spinlock_t rxlock;
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
unsigned long flags; /* Flag values/ mode etc */
unsigned char mode; /* 6pack mode */
/* 6pack stuff */
unsigned char tx_delay;
unsigned char persistence;
unsigned char slottime;
unsigned char duplex;
unsigned char led_state;
unsigned char status;
unsigned char status1;
unsigned char status2;
unsigned char tx_enable;
unsigned char tnc_state;
struct timer_list tx_t;
struct timer_list resync_t;
refcount_t refcnt;
struct completion dead;
spinlock_t lock;
};
#define AX25_6PACK_HEADER_LEN 0
static void sixpack_decode(struct sixpack *, const unsigned char[], int);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
* Perform the persistence/slottime algorithm for CSMA access. If the
* persistence check was successful, write the data to the serial driver.
* Note that in case of DAMA operation, the data is not sent here.
*/
static void sp_xmit_on_air(struct timer_list *t)
{
struct sixpack *sp = from_timer(sp, t, tx_t);
int actual, when = sp->slottime;
static unsigned char random;
random = random * 17 + 41;
if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) {
sp->led_state = 0x70;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
sp->tx_enable = 1;
actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2);
sp->xleft -= actual;
sp->xhead += actual;
sp->led_state = 0x60;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
sp->status2 = 0;
} else
mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100);
}
/* ----> 6pack timer interrupt handler and friends. <---- */
/* Encapsulate one AX.25 frame and stuff into a TTY queue. */
static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
{
unsigned char *msg, *p = icp;
int actual, count;
if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
msg = "oversized transmit packet!";
goto out_drop;
}
if (p[0] > 5) {
msg = "invalid KISS command";
goto out_drop;
}
if ((p[0] != 0) && (len > 2)) {
msg = "KISS control packet too long";
goto out_drop;
}
if ((p[0] == 0) && (len < 15)) {
msg = "bad AX.25 packet to transmit";
goto out_drop;
}
count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay);
set_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags);
switch (p[0]) {
case 1: sp->tx_delay = p[1];
return;
case 2: sp->persistence = p[1];
return;
case 3: sp->slottime = p[1];
return;
case 4: /* ignored */
return;
case 5: sp->duplex = p[1];
return;
}
if (p[0] != 0)
return;
/*
* In case of fullduplex or DAMA operation, we don't take care about the
* state of the DCD or of any timers, as the determination of the
* correct time to send is the job of the AX.25 layer. We send
* immediately after data has arrived.
*/
if (sp->duplex == 1) {
sp->led_state = 0x70;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
sp->tx_enable = 1;
actual = sp->tty->ops->write(sp->tty, sp->xbuff, count);
sp->xleft = count - actual;
sp->xhead = sp->xbuff + actual;
sp->led_state = 0x60;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
} else {
sp->xleft = count;
sp->xhead = sp->xbuff;
sp->status2 = count;
sp_xmit_on_air(&sp->tx_t);
}
return;
out_drop:
sp->dev->stats.tx_dropped++;
netif_start_queue(sp->dev);
if (net_ratelimit())
printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg);
}
/* Encapsulate an IP datagram and kick it into a TTY queue. */
static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sixpack *sp = netdev_priv(dev);
if (skb->protocol == htons(ETH_P_IP))
return ax25_ip_xmit(skb);
spin_lock_bh(&sp->lock);
/* We were not busy, so we are now... :-) */
netif_stop_queue(dev);
dev->stats.tx_bytes += skb->len;
sp_encaps(sp, skb->data, skb->len);
spin_unlock_bh(&sp->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int sp_open_dev(struct net_device *dev)
{
struct sixpack *sp = netdev_priv(dev);
if (sp->tty == NULL)
return -ENODEV;
return 0;
}
/* Close the low-level part of the 6pack channel. */
static int sp_close(struct net_device *dev)
{
struct sixpack *sp = netdev_priv(dev);
spin_lock_bh(&sp->lock);
if (sp->tty) {
/* TTY discipline is running. */
clear_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags);
}
netif_stop_queue(dev);
spin_unlock_bh(&sp->lock);
return 0;
}
static int sp_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr_ax25 *sa = addr;
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
__dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
return 0;
}
static const struct net_device_ops sp_netdev_ops = {
.ndo_open = sp_open_dev,
.ndo_stop = sp_close,
.ndo_start_xmit = sp_xmit,
.ndo_set_mac_address = sp_set_mac_address,
};
static void sp_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
/* Only activated in AX.25 mode */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = 0;
}
/* Send one completely decapsulated IP datagram to the IP layer. */
/*
* This is the routine that sends the received data to the kernel AX.25.
* 'cmd' is the KISS command. For AX.25 data, it is zero.
*/
static void sp_bump(struct sixpack *sp, char cmd)
{
struct sk_buff *skb;
int count;
unsigned char *ptr;
count = sp->rcount + 1;
sp->dev->stats.rx_bytes += count;
if ((skb = dev_alloc_skb(count + 1)) == NULL)
goto out_mem;
ptr = skb_put(skb, count + 1);
*ptr++ = cmd; /* KISS command */
memcpy(ptr, sp->cooked_buf + 1, count);
skb->protocol = ax25_type_trans(skb, sp->dev);
netif_rx(skb);
sp->dev->stats.rx_packets++;
return;
out_mem:
sp->dev->stats.rx_dropped++;
}
/* ----------------------------------------------------------------------- */
/*
* We have a potential race on dereferencing tty->disc_data, because the tty
* layer provides no locking at all - thus one cpu could be running
* sixpack_receive_buf while another calls sixpack_close, which zeroes
* tty->disc_data and frees the memory that sixpack_receive_buf is using. The
* best way to fix this is to use a rwlock in the tty struct, but for now we
* use a single global rwlock for all ttys in ppp line discipline.
*/
static DEFINE_RWLOCK(disc_data_lock);
static struct sixpack *sp_get(struct tty_struct *tty)
{
struct sixpack *sp;
read_lock(&disc_data_lock);
sp = tty->disc_data;
if (sp)
refcount_inc(&sp->refcnt);
read_unlock(&disc_data_lock);
return sp;
}
static void sp_put(struct sixpack *sp)
{
if (refcount_dec_and_test(&sp->refcnt))
complete(&sp->dead);
}
/*
* Called by the TTY driver when there's room for more data. If we have
* more packets to send, we send them here.
*/
static void sixpack_write_wakeup(struct tty_struct *tty)
{
struct sixpack *sp = sp_get(tty);
int actual;
if (!sp)
return;
if (sp->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sp->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sp->tx_enable = 0;
netif_wake_queue(sp->dev);
goto out;
}
if (sp->tx_enable) {
actual = tty->ops->write(tty, sp->xhead, sp->xleft);
sp->xleft -= actual;
sp->xhead += actual;
}
out:
sp_put(sp);
}
/* ----------------------------------------------------------------------- */
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the tty module in the kernel when
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct sixpack *sp;
int count1;
if (!count)
return;
sp = sp_get(tty);
if (!sp)
return;
/* Read the characters out of the buffer */
count1 = count;
while (count) {
count--;
if (fp && *fp++) {
if (!test_and_set_bit(SIXPF_ERROR, &sp->flags))
sp->dev->stats.rx_errors++;
continue;
}
}
sixpack_decode(sp, cp, count1);
sp_put(sp);
tty_unthrottle(tty);
}
/*
* Try to resync the TNC. Called by the resync timer defined in
* decode_prio_command
*/
#define TNC_UNINITIALIZED 0
#define TNC_UNSYNC_STARTUP 1
#define TNC_UNSYNCED 2
#define TNC_IN_SYNC 3
static void __tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
{
char *msg;
switch (new_tnc_state) {
default: /* gcc oh piece-o-crap ... */
case TNC_UNSYNC_STARTUP:
msg = "Synchronizing with TNC";
break;
case TNC_UNSYNCED:
msg = "Lost synchronization with TNC\n";
break;
case TNC_IN_SYNC:
msg = "Found TNC";
break;
}
sp->tnc_state = new_tnc_state;
printk(KERN_INFO "%s: %s\n", sp->dev->name, msg);
}
static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
{
int old_tnc_state = sp->tnc_state;
if (old_tnc_state != new_tnc_state)
__tnc_set_sync_state(sp, new_tnc_state);
}
static void resync_tnc(struct timer_list *t)
{
struct sixpack *sp = from_timer(sp, t, resync_t);
static char resync_cmd = 0xe8;
/* clear any data that might have been received */
sp->rx_count = 0;
sp->rx_count_cooked = 0;
/* reset state machine */
sp->status = 1;
sp->status1 = 1;
sp->status2 = 0;
/* resync the TNC */
sp->led_state = 0x60;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
sp->tty->ops->write(sp->tty, &resync_cmd, 1);
/* Start resync timer again -- the TNC might be still absent */
mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
}
static inline int tnc_init(struct sixpack *sp)
{
unsigned char inbyte = 0xe8;
tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP);
sp->tty->ops->write(sp->tty, &inbyte, 1);
mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
return 0;
}
/*
* Open the high-level part of the 6pack channel.
* This function is called by the TTY module when the
* 6pack line discipline is called for. Because we are
* sure the tty line exists, we only have to link it to
* a free 6pcack channel...
*/
static int sixpack_open(struct tty_struct *tty)
{
char *rbuff = NULL, *xbuff = NULL;
struct net_device *dev;
struct sixpack *sp;
unsigned long len;
int err = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
dev = alloc_netdev(sizeof(struct sixpack), "sp%d", NET_NAME_UNKNOWN,
sp_setup);
if (!dev) {
err = -ENOMEM;
goto out;
}
sp = netdev_priv(dev);
sp->dev = dev;
spin_lock_init(&sp->lock);
spin_lock_init(&sp->rxlock);
refcount_set(&sp->refcnt, 1);
init_completion(&sp->dead);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
len = dev->mtu * 2;
rbuff = kmalloc(len + 4, GFP_KERNEL);
xbuff = kmalloc(len + 4, GFP_KERNEL);
if (rbuff == NULL || xbuff == NULL) {
err = -ENOBUFS;
goto out_free;
}
spin_lock_bh(&sp->lock);
sp->tty = tty;
sp->rbuff = rbuff;
sp->xbuff = xbuff;
sp->mtu = AX25_MTU + 73;
sp->buffsize = len;
sp->rcount = 0;
sp->rx_count = 0;
sp->rx_count_cooked = 0;
sp->xleft = 0;
sp->flags = 0; /* Clear ESCAPE & ERROR flags */
sp->duplex = 0;
sp->tx_delay = SIXP_TXDELAY;
sp->persistence = SIXP_PERSIST;
sp->slottime = SIXP_SLOTTIME;
sp->led_state = 0x60;
sp->status = 1;
sp->status1 = 1;
sp->status2 = 0;
sp->tx_enable = 0;
netif_start_queue(dev);
timer_setup(&sp->tx_t, sp_xmit_on_air, 0);
timer_setup(&sp->resync_t, resync_tnc, 0);
spin_unlock_bh(&sp->lock);
/* Done. We have linked the TTY line to a channel. */
tty->disc_data = sp;
tty->receive_room = 65536;
/* Now we're ready to register. */
err = register_netdev(dev);
if (err)
goto out_free;
tnc_init(sp);
return 0;
out_free:
kfree(xbuff);
kfree(rbuff);
free_netdev(dev);
out:
return err;
}
/*
* Close down a 6pack channel.
* This means flushing out any pending queues, and then restoring the
* TTY line discipline to what it was before it got hooked to 6pack
* (which usually is TTY again).
*/
static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!sp)
return;
/*
* We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish.
*/
if (!refcount_dec_and_test(&sp->refcnt))
wait_for_completion(&sp->dead);
/* We must stop the queue to avoid potentially scribbling
* on the free buffers. The sp->dead completion is not sufficient
* to protect us from sp->xbuff access.
*/
netif_stop_queue(sp->dev);
unregister_netdev(sp->dev);
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
}
/* Perform I/O control on an active 6pack channel. */
static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct sixpack *sp = sp_get(tty);
struct net_device *dev;
unsigned int tmp, err;
if (!sp)
return -ENXIO;
dev = sp->dev;
switch(cmd) {
case SIOCGIFNAME:
err = copy_to_user((void __user *) arg, dev->name,
strlen(dev->name) + 1) ? -EFAULT : 0;
break;
case SIOCGIFENCAP:
err = put_user(0, (int __user *) arg);
break;
case SIOCSIFENCAP:
if (get_user(tmp, (int __user *) arg)) {
err = -EFAULT;
break;
}
sp->mode = tmp;
dev->addr_len = AX25_ADDR_LEN;
dev->hard_header_len = AX25_KISS_HEADER_LEN +
AX25_MAX_HEADER_LEN + 3;
dev->type = ARPHRD_AX25;
err = 0;
break;
case SIOCSIFHWADDR: {
char addr[AX25_ADDR_LEN];
if (copy_from_user(&addr,
(void __user *)arg, AX25_ADDR_LEN)) {
err = -EFAULT;
break;
}
netif_tx_lock_bh(dev);
__dev_addr_set(dev, &addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
break;
}
default:
err = tty_mode_ioctl(tty, cmd, arg);
}
sp_put(sp);
return err;
}
static struct tty_ldisc_ops sp_ldisc = {
.owner = THIS_MODULE,
.num = N_6PACK,
.name = "6pack",
.open = sixpack_open,
.close = sixpack_close,
.ioctl = sixpack_ioctl,
.receive_buf = sixpack_receive_buf,
.write_wakeup = sixpack_write_wakeup,
};
/* Initialize 6pack control device -- register 6pack line discipline */
static const char msg_banner[] __initconst = KERN_INFO \
"AX.25: 6pack driver, " SIXPACK_VERSION "\n";
static const char msg_regfail[] __initconst = KERN_ERR \
"6pack: can't register line discipline (err = %d)\n";
static int __init sixpack_init_driver(void)
{
int status;
printk(msg_banner);
/* Register the provided line protocol discipline */
status = tty_register_ldisc(&sp_ldisc);
if (status)
printk(msg_regfail, status);
return status;
}
static void __exit sixpack_exit_driver(void)
{
tty_unregister_ldisc(&sp_ldisc);
}
/* encode an AX.25 packet into 6pack */
static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw,
int length, unsigned char tx_delay)
{
int count = 0;
unsigned char checksum = 0, buf[400];
int raw_count = 0;
tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK;
tx_buf_raw[raw_count++] = SIXP_SEOF;
buf[0] = tx_delay;
for (count = 1; count < length; count++)
buf[count] = tx_buf[count];
for (count = 0; count < length; count++)
checksum += buf[count];
buf[length] = (unsigned char) 0xff - checksum;
for (count = 0; count <= length; count++) {
if ((count % 3) == 0) {
tx_buf_raw[raw_count++] = (buf[count] & 0x3f);
tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30);
} else if ((count % 3) == 1) {
tx_buf_raw[raw_count++] |= (buf[count] & 0x0f);
tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x3c);
} else {
tx_buf_raw[raw_count++] |= (buf[count] & 0x03);
tx_buf_raw[raw_count++] = (buf[count] >> 2);
}
}
if ((length % 3) != 2)
raw_count++;
tx_buf_raw[raw_count++] = SIXP_SEOF;
return raw_count;
}
/* decode 4 sixpack-encoded bytes into 3 data bytes */
static void decode_data(struct sixpack *sp, unsigned char inbyte)
{
unsigned char *buf;
if (sp->rx_count != 3) {
sp->raw_buf[sp->rx_count++] = inbyte;
return;
}
if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
pr_err("6pack: cooked buffer overrun, data loss\n");
sp->rx_count = 0;
return;
}
buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0);
sp->cooked_buf[sp->rx_count_cooked++] =
(buf[1] & 0x0f) | ((buf[2] << 2) & 0xf0);
sp->cooked_buf[sp->rx_count_cooked++] =
(buf[2] & 0x03) | (inbyte << 2);
sp->rx_count = 0;
}
/* identify and execute a 6pack priority command byte */
static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
{
int actual;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
/* RX and DCD flags can only be set in the same prio command,
if the DCD flag has been set without the RX flag in the previous
prio command. If DCD has not been set before, something in the
transmission has gone wrong. In this case, RX and DCD are
cleared in order to prevent the decode_data routine from
reading further data that might be corrupt. */
if (((sp->status & SIXP_DCD_MASK) == 0) &&
((cmd & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)) {
if (sp->status != 1)
printk(KERN_DEBUG "6pack: protocol violation\n");
else
sp->status = 0;
cmd &= ~SIXP_RX_DCD_MASK;
}
sp->status = cmd & SIXP_PRIO_DATA_MASK;
} else { /* output watchdog char if idle */
if ((sp->status2 != 0) && (sp->duplex == 1)) {
sp->led_state = 0x70;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
sp->tx_enable = 1;
actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2);
sp->xleft -= actual;
sp->xhead += actual;
sp->led_state = 0x60;
sp->status2 = 0;
}
}
/* needed to trigger the TNC watchdog */
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
/* if the state byte has been received, the TNC is present,
so the resync timer can be reset. */
if (sp->tnc_state == TNC_IN_SYNC)
mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
}
/* identify and execute a standard 6pack command byte */
static void decode_std_command(struct sixpack *sp, unsigned char cmd)
{
unsigned char checksum = 0, rest = 0;
short i;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
case SIXP_SEOF:
if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) {
if ((sp->status & SIXP_RX_DCD_MASK) ==
SIXP_RX_DCD_MASK) {
sp->led_state = 0x68;
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
}
} else {
sp->led_state = 0x60;
/* fill trailing bytes with zeroes */
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
spin_lock_bh(&sp->rxlock);
rest = sp->rx_count;
if (rest != 0)
for (i = rest; i <= 3; i++)
decode_data(sp, 0);
if (rest == 2)
sp->rx_count_cooked -= 2;
else if (rest == 3)
sp->rx_count_cooked -= 1;
for (i = 0; i < sp->rx_count_cooked; i++)
checksum += sp->cooked_buf[i];
if (checksum != SIXP_CHKSUM) {
printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum);
} else {
sp->rcount = sp->rx_count_cooked-2;
sp_bump(sp, 0);
}
sp->rx_count_cooked = 0;
spin_unlock_bh(&sp->rxlock);
}
break;
case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
break;
case SIXP_RX_ORUN: printk(KERN_DEBUG "6pack: RX overrun\n");
break;
case SIXP_RX_BUF_OVL:
printk(KERN_DEBUG "6pack: RX buffer overflow\n");
}
}
/* decode a 6pack packet */
static void
sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
{
unsigned char inbyte;
int count1;
for (count1 = 0; count1 < count; count1++) {
inbyte = pre_rbuff[count1];
if (inbyte == SIXP_FOUND_TNC) {
tnc_set_sync_state(sp, TNC_IN_SYNC);
del_timer(&sp->resync_t);
}
if ((inbyte & SIXP_PRIO_CMD_MASK) != 0)
decode_prio_command(sp, inbyte);
else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
decode_std_command(sp, inbyte);
else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) {
spin_lock_bh(&sp->rxlock);
decode_data(sp, inbyte);
spin_unlock_bh(&sp->rxlock);
}
}
}
MODULE_AUTHOR("Ralf Baechle DO1GRB <[email protected]>");
MODULE_DESCRIPTION("6pack driver for AX.25");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_6PACK);
module_init(sixpack_init_driver);
module_exit(sixpack_exit_driver);
| linux-master | drivers/net/hamradio/6pack.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*****************************************************************************/
/*
* baycom_ser_fdx.c -- baycom ser12 fullduplex radio modem driver.
*
* Copyright (C) 1996-2000 Thomas Sailer ([email protected])
*
* Please note that the GPL allows you to use the driver, NOT the radio.
* In order to use the radio, you need a license from the communications
* authority of your country.
*
* Supported modems
*
* ser12: This is a very simple 1200 baud AFSK modem. The modem consists only
* of a modulator/demodulator chip, usually a TI TCM3105. The computer
* is responsible for regenerating the receiver bit clock, as well as
* for handling the HDLC protocol. The modem connects to a serial port,
* hence the name. Since the serial port is not used as an async serial
* port, the kernel driver for serial ports cannot be used, and this
* driver only supports standard serial hardware (8250, 16450, 16550A)
*
* This modem usually draws its supply current out of the otherwise unused
* TXD pin of the serial port. Thus a contiguous stream of 0x00-bytes
* is transmitted to achieve a positive supply voltage.
*
* hsk: This is a 4800 baud FSK modem, designed for TNC use. It works fine
* in 'baycom-mode' :-) In contrast to the TCM3105 modem, power is
* externally supplied. So there's no need to provide the 0x00-byte-stream
* when receiving or idle, which drastically reduces interrupt load.
*
* Command line options (insmod command line)
*
* mode ser# hardware DCD
* ser#* software DCD
* ser#+ hardware DCD, inverted signal at DCD pin
* '#' denotes the baud rate / 100, eg. ser12* is '1200 baud, soft DCD'
* iobase base address of the port; common values are 0x3f8, 0x2f8, 0x3e8, 0x2e8
* baud baud rate (between 300 and 4800)
* irq interrupt line of the port; common values are 4,3
*
* History:
* 0.1 26.06.1996 Adapted from baycom.c and made network driver interface
* 18.10.1996 Changed to new user space access routines (copy_{to,from}_user)
* 0.3 26.04.1997 init code/data tagged
* 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints)
* 0.5 11.11.1997 ser12/par96 split into separate files
* 0.6 24.01.1998 Thorsten Kranzkowski, dl8bcu and Thomas Sailer:
* reduced interrupt load in transmit case
* reworked receiver
* 0.7 03.08.1999 adapt to Linus' new __setup/__initcall
* 0.8 10.08.1999 use module_init/module_exit
* 0.9 12.02.2000 adapted to softnet driver interface
* 0.10 03.07.2000 fix interface name handling
*/
/*****************************************************************************/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
#include <linux/jiffies.h>
#include <linux/time64.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
/* --------------------------------------------------------------------- */
#define BAYCOM_DEBUG
/* --------------------------------------------------------------------- */
static const char bc_drvname[] = "baycom_ser_fdx";
static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
"baycom_ser_fdx: version 0.10\n";
/* --------------------------------------------------------------------- */
#define NR_PORTS 4
static struct net_device *baycom_device[NR_PORTS];
/* --------------------------------------------------------------------- */
#define RBR(iobase) (iobase+0)
#define THR(iobase) (iobase+0)
#define IER(iobase) (iobase+1)
#define IIR(iobase) (iobase+2)
#define FCR(iobase) (iobase+2)
#define LCR(iobase) (iobase+3)
#define MCR(iobase) (iobase+4)
#define LSR(iobase) (iobase+5)
#define MSR(iobase) (iobase+6)
#define SCR(iobase) (iobase+7)
#define DLL(iobase) (iobase+0)
#define DLM(iobase) (iobase+1)
#define SER12_EXTENT 8
/* ---------------------------------------------------------------------- */
/*
* Information that need to be kept for each board.
*/
struct baycom_state {
struct hdlcdrv_state hdrv;
unsigned int baud, baud_us, baud_arbdiv, baud_uartdiv, baud_dcdtimeout;
int opt_dcd;
struct modem_state {
unsigned char flags;
unsigned char ptt;
unsigned int shreg;
struct modem_state_ser12 {
unsigned char tx_bit;
unsigned char last_rxbit;
int dcd_sum0, dcd_sum1, dcd_sum2;
int dcd_time;
unsigned int pll_time;
unsigned int txshreg;
} ser12;
} modem;
#ifdef BAYCOM_DEBUG
struct debug_vals {
unsigned long last_jiffies;
unsigned cur_intcnt;
unsigned last_intcnt;
int cur_pllcorr;
int last_pllcorr;
} debug_vals;
#endif /* BAYCOM_DEBUG */
};
/* --------------------------------------------------------------------- */
static inline void baycom_int_freq(struct baycom_state *bc)
{
#ifdef BAYCOM_DEBUG
unsigned long cur_jiffies = jiffies;
/*
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr;
bc->debug_vals.cur_pllcorr = 0;
}
#endif /* BAYCOM_DEBUG */
}
/* --------------------------------------------------------------------- */
/*
* ===================== SER12 specific routines =========================
*/
/* --------------------------------------------------------------------- */
static inline void ser12_set_divisor(struct net_device *dev,
unsigned int divisor)
{
outb(0x81, LCR(dev->base_addr)); /* DLAB = 1 */
outb(divisor, DLL(dev->base_addr));
outb(divisor >> 8, DLM(dev->base_addr));
outb(0x01, LCR(dev->base_addr)); /* word length = 6 */
/*
* make sure the next interrupt is generated;
* 0 must be used to power the modem; the modem draws its
* power from the TxD line
*/
outb(0x00, THR(dev->base_addr));
/*
* it is important not to set the divider while transmitting;
* this reportedly makes some UARTs generating interrupts
* in the hundredthousands per second region
* Reported by: [email protected] (Ignacio Arenaza Nuno)
*/
}
static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs)
{
int timediff;
int bdus8 = bc->baud_us >> 3;
int bdus4 = bc->baud_us >> 2;
int bdus2 = bc->baud_us >> 1;
timediff = 1000000 + ts->tv_nsec / NSEC_PER_USEC -
bc->modem.ser12.pll_time;
while (timediff >= 500000)
timediff -= 1000000;
while (timediff >= bdus2) {
timediff -= bc->baud_us;
bc->modem.ser12.pll_time += bc->baud_us;
bc->modem.ser12.dcd_time--;
/* first check if there is room to add a bit */
if (bc->modem.shreg & 1) {
hdlcdrv_putbits(&bc->hdrv, (bc->modem.shreg >> 1) ^ 0xffff);
bc->modem.shreg = 0x10000;
}
/* add a one bit */
bc->modem.shreg >>= 1;
}
if (bc->modem.ser12.dcd_time <= 0) {
if (!bc->opt_dcd)
hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 +
bc->modem.ser12.dcd_sum1 +
bc->modem.ser12.dcd_sum2) < 0);
bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1;
bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0;
bc->modem.ser12.dcd_sum0 = 2; /* slight bias */
bc->modem.ser12.dcd_time += 120;
}
if (bc->modem.ser12.last_rxbit != curs) {
bc->modem.ser12.last_rxbit = curs;
bc->modem.shreg |= 0x10000;
/* adjust the PLL */
if (timediff > 0)
bc->modem.ser12.pll_time += bdus8;
else
bc->modem.ser12.pll_time += 1000000 - bdus8;
/* update DCD */
if (abs(timediff) > bdus4)
bc->modem.ser12.dcd_sum0 += 4;
else
bc->modem.ser12.dcd_sum0--;
#ifdef BAYCOM_DEBUG
bc->debug_vals.cur_pllcorr = timediff;
#endif /* BAYCOM_DEBUG */
}
while (bc->modem.ser12.pll_time >= 1000000)
bc->modem.ser12.pll_time -= 1000000;
}
/* --------------------------------------------------------------------- */
static irqreturn_t ser12_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct baycom_state *bc = netdev_priv(dev);
struct timespec64 ts;
unsigned char iir, msr;
unsigned int txcount = 0;
if (!bc || bc->hdrv.magic != HDLCDRV_MAGIC)
return IRQ_NONE;
/* fast way out for shared irq */
if ((iir = inb(IIR(dev->base_addr))) & 1)
return IRQ_NONE;
/* get current time */
ktime_get_ts64(&ts);
msr = inb(MSR(dev->base_addr));
/* delta DCD */
if ((msr & 8) && bc->opt_dcd)
hdlcdrv_setdcd(&bc->hdrv, !((msr ^ bc->opt_dcd) & 0x80));
do {
switch (iir & 6) {
case 6:
inb(LSR(dev->base_addr));
break;
case 4:
inb(RBR(dev->base_addr));
break;
case 2:
/*
* make sure the next interrupt is generated;
* 0 must be used to power the modem; the modem draws its
* power from the TxD line
*/
outb(0x00, THR(dev->base_addr));
baycom_int_freq(bc);
txcount++;
/*
* first output the last bit (!) then call HDLC transmitter,
* since this may take quite long
*/
if (bc->modem.ptt)
outb(0x0e | (!!bc->modem.ser12.tx_bit), MCR(dev->base_addr));
else
outb(0x0d, MCR(dev->base_addr)); /* transmitter off */
break;
default:
msr = inb(MSR(dev->base_addr));
/* delta DCD */
if ((msr & 8) && bc->opt_dcd)
hdlcdrv_setdcd(&bc->hdrv, !((msr ^ bc->opt_dcd) & 0x80));
break;
}
iir = inb(IIR(dev->base_addr));
} while (!(iir & 1));
ser12_rx(dev, bc, &ts, msr & 0x10); /* CTS */
if (bc->modem.ptt && txcount) {
if (bc->modem.ser12.txshreg <= 1) {
bc->modem.ser12.txshreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv);
if (!hdlcdrv_ptt(&bc->hdrv)) {
ser12_set_divisor(dev, 115200/100/8);
bc->modem.ptt = 0;
goto end_transmit;
}
}
bc->modem.ser12.tx_bit = !(bc->modem.ser12.tx_bit ^ (bc->modem.ser12.txshreg & 1));
bc->modem.ser12.txshreg >>= 1;
}
end_transmit:
local_irq_enable();
if (!bc->modem.ptt && txcount) {
hdlcdrv_arbitrate(dev, &bc->hdrv);
if (hdlcdrv_ptt(&bc->hdrv)) {
ser12_set_divisor(dev, bc->baud_uartdiv);
bc->modem.ser12.txshreg = 1;
bc->modem.ptt = 1;
}
}
hdlcdrv_transmitter(dev, &bc->hdrv);
hdlcdrv_receiver(dev, &bc->hdrv);
local_irq_disable();
return IRQ_HANDLED;
}
/* --------------------------------------------------------------------- */
enum uart { c_uart_unknown, c_uart_8250,
c_uart_16450, c_uart_16550, c_uart_16550A};
static const char *uart_str[] = {
"unknown", "8250", "16450", "16550", "16550A"
};
static enum uart ser12_check_uart(unsigned int iobase)
{
unsigned char b1,b2,b3;
enum uart u;
enum uart uart_tab[] =
{ c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A };
b1 = inb(MCR(iobase));
outb(b1 | 0x10, MCR(iobase)); /* loopback mode */
b2 = inb(MSR(iobase));
outb(0x1a, MCR(iobase));
b3 = inb(MSR(iobase)) & 0xf0;
outb(b1, MCR(iobase)); /* restore old values */
outb(b2, MSR(iobase));
if (b3 != 0x90)
return c_uart_unknown;
inb(RBR(iobase));
inb(RBR(iobase));
outb(0x01, FCR(iobase)); /* enable FIFOs */
u = uart_tab[(inb(IIR(iobase)) >> 6) & 3];
if (u == c_uart_16450) {
outb(0x5a, SCR(iobase));
b1 = inb(SCR(iobase));
outb(0xa5, SCR(iobase));
b2 = inb(SCR(iobase));
if ((b1 != 0x5a) || (b2 != 0xa5))
u = c_uart_8250;
}
return u;
}
/* --------------------------------------------------------------------- */
static int ser12_open(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
enum uart u;
if (!dev || !bc)
return -ENXIO;
if (!dev->base_addr || dev->base_addr > 0xffff-SER12_EXTENT ||
dev->irq < 2 || dev->irq > nr_irqs) {
printk(KERN_INFO "baycom_ser_fdx: invalid portnumber (max %u) "
"or irq (2 <= irq <= %d)\n",
0xffff-SER12_EXTENT, nr_irqs);
return -ENXIO;
}
if (bc->baud < 300 || bc->baud > 4800) {
printk(KERN_INFO "baycom_ser_fdx: invalid baudrate "
"(300...4800)\n");
return -EINVAL;
}
if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
dev->base_addr);
return -EACCES;
}
memset(&bc->modem, 0, sizeof(bc->modem));
bc->hdrv.par.bitrate = bc->baud;
bc->baud_us = 1000000/bc->baud;
bc->baud_uartdiv = (115200/8)/bc->baud;
if ((u = ser12_check_uart(dev->base_addr)) == c_uart_unknown){
release_region(dev->base_addr, SER12_EXTENT);
return -EIO;
}
outb(0, FCR(dev->base_addr)); /* disable FIFOs */
outb(0x0d, MCR(dev->base_addr));
outb(0, IER(dev->base_addr));
if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
"baycom_ser_fdx", dev)) {
release_region(dev->base_addr, SER12_EXTENT);
return -EBUSY;
}
/*
* set the SIO to 6 Bits/character; during receive,
* the baud rate is set to produce 100 ints/sec
* to feed the channel arbitration process,
* during transmit to baud ints/sec to run
* the transmitter
*/
ser12_set_divisor(dev, 115200/100/8);
/*
* enable transmitter empty interrupt and modem status interrupt
*/
outb(0x0a, IER(dev->base_addr));
/*
* make sure the next interrupt is generated;
* 0 must be used to power the modem; the modem draws its
* power from the TxD line
*/
outb(0x00, THR(dev->base_addr));
hdlcdrv_setdcd(&bc->hdrv, 0);
printk(KERN_INFO "%s: ser_fdx at iobase 0x%lx irq %u baud %u uart %s\n",
bc_drvname, dev->base_addr, dev->irq, bc->baud, uart_str[u]);
return 0;
}
/* --------------------------------------------------------------------- */
static int ser12_close(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
if (!dev || !bc)
return -EINVAL;
/*
* disable interrupts
*/
outb(0, IER(dev->base_addr));
outb(1, MCR(dev->base_addr));
free_irq(dev->irq, dev);
release_region(dev->base_addr, SER12_EXTENT);
printk(KERN_INFO "%s: close ser_fdx at iobase 0x%lx irq %u\n",
bc_drvname, dev->base_addr, dev->irq);
return 0;
}
/* --------------------------------------------------------------------- */
/*
* ===================== hdlcdrv driver interface =========================
*/
/* --------------------------------------------------------------------- */
static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
static const struct hdlcdrv_ops ser12_ops = {
.drvname = bc_drvname,
.drvinfo = bc_drvinfo,
.open = ser12_open,
.close = ser12_close,
.ioctl = baycom_ioctl,
};
/* --------------------------------------------------------------------- */
static int baycom_setmode(struct baycom_state *bc, const char *modestr)
{
unsigned int baud;
if (!strncmp(modestr, "ser", 3)) {
baud = simple_strtoul(modestr+3, NULL, 10);
if (baud >= 3 && baud <= 48)
bc->baud = baud*100;
}
if (strchr(modestr, '*'))
bc->opt_dcd = 0;
else if (strchr(modestr, '+'))
bc->opt_dcd = -1;
else
bc->opt_dcd = 1;
return 0;
}
/* --------------------------------------------------------------------- */
static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
struct baycom_ioctl bi;
if (!dev)
return -EINVAL;
bc = netdev_priv(dev);
BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC);
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
switch (hi->cmd) {
default:
break;
case HDLCDRVCTL_GETMODE:
sprintf(hi->data.modename, "ser%u", bc->baud / 100);
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : "+");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
case HDLCDRVCTL_SETMODE:
if (netif_running(dev) || !capable(CAP_NET_ADMIN))
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "ser12,ser3,ser24");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
case HDLCDRVCTL_MODEMPARMASK:
return HDLCDRV_PARMASK_IOBASE | HDLCDRV_PARMASK_IRQ;
}
if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
return -ENOIOCTLCMD;
#ifdef BAYCOM_DEBUG
case BAYCOMCTL_GETDEBUG:
bi.data.dbg.debug1 = bc->hdrv.ptt_keyed;
bi.data.dbg.debug2 = bc->debug_vals.last_intcnt;
bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr;
break;
#endif /* BAYCOM_DEBUG */
}
if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
}
/* --------------------------------------------------------------------- */
/*
* command line settable parameters
*/
static char *mode[NR_PORTS] = { "ser12*", };
static int iobase[NR_PORTS] = { 0x3f8, };
static int irq[NR_PORTS] = { 4, };
static int baud[NR_PORTS] = { [0 ... NR_PORTS-1] = 1200 };
module_param_array(mode, charp, NULL, 0);
MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD");
module_param_hw_array(iobase, int, ioport, NULL, 0);
MODULE_PARM_DESC(iobase, "baycom io base address");
module_param_hw_array(irq, int, irq, NULL, 0);
MODULE_PARM_DESC(irq, "baycom irq number");
module_param_array(baud, int, NULL, 0);
MODULE_PARM_DESC(baud, "baycom baud rate (300 to 4800)");
MODULE_AUTHOR("Thomas M. Sailer, [email protected], [email protected]");
MODULE_DESCRIPTION("Baycom ser12 full duplex amateur radio modem driver");
MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
static int __init init_baycomserfdx(void)
{
int i, found = 0;
char set_hw = 1;
printk(bc_drvinfo);
/*
* register net devices
*/
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev;
struct baycom_state *bc;
char ifname[IFNAMSIZ];
sprintf(ifname, "bcsf%d", i);
if (!mode[i])
set_hw = 0;
if (!set_hw)
iobase[i] = irq[i] = 0;
dev = hdlcdrv_register(&ser12_ops,
sizeof(struct baycom_state),
ifname, iobase[i], irq[i], 0);
if (IS_ERR(dev))
break;
bc = netdev_priv(dev);
if (set_hw && baycom_setmode(bc, mode[i]))
set_hw = 0;
bc->baud = baud[i];
found++;
baycom_device[i] = dev;
}
if (!found)
return -ENXIO;
return 0;
}
static void __exit cleanup_baycomserfdx(void)
{
int i;
for(i = 0; i < NR_PORTS; i++) {
struct net_device *dev = baycom_device[i];
if (dev)
hdlcdrv_unregister(dev);
}
}
module_init(init_baycomserfdx);
module_exit(cleanup_baycomserfdx);
/* --------------------------------------------------------------------- */
#ifndef MODULE
/*
* format: baycom_ser_fdx=io,irq,mode
* mode: ser# hardware DCD
* ser#* software DCD
* ser#+ hardware DCD, inverted signal at DCD pin
* '#' denotes the baud rate / 100, eg. ser12* is '1200 baud, soft DCD'
*/
static int __init baycom_ser_fdx_setup(char *str)
{
static unsigned nr_dev;
int ints[4];
if (nr_dev >= NR_PORTS)
return 0;
str = get_options(str, 4, ints);
if (ints[0] < 2)
return 0;
mode[nr_dev] = str;
iobase[nr_dev] = ints[1];
irq[nr_dev] = ints[2];
if (ints[0] >= 3)
baud[nr_dev] = ints[3];
nr_dev++;
return 1;
}
__setup("baycom_ser_fdx=", baycom_ser_fdx_setup);
#endif /* MODULE */
/* --------------------------------------------------------------------- */
| linux-master | drivers/net/hamradio/baycom_ser_fdx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ppp_deflate.c - interface the zlib procedures for Deflate compression
* and decompression (as used by gzip) to the PPP code.
*
* Copyright 1994-1998 Paul Mackerras.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/zlib.h>
#include <asm/unaligned.h>
/*
* State for a Deflate (de)compressor.
*/
struct ppp_deflate_state {
int seqno;
int w_size;
int unit;
int mru;
int debug;
z_stream strm;
struct compstat stats;
};
#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
static void *z_comp_alloc(unsigned char *options, int opt_len);
static void *z_decomp_alloc(unsigned char *options, int opt_len);
static void z_comp_free(void *state);
static void z_decomp_free(void *state);
static int z_comp_init(void *state, unsigned char *options,
int opt_len,
int unit, int hdrlen, int debug);
static int z_decomp_init(void *state, unsigned char *options,
int opt_len,
int unit, int hdrlen, int mru, int debug);
static int z_compress(void *state, unsigned char *rptr,
unsigned char *obuf,
int isize, int osize);
static void z_incomp(void *state, unsigned char *ibuf, int icnt);
static int z_decompress(void *state, unsigned char *ibuf,
int isize, unsigned char *obuf, int osize);
static void z_comp_reset(void *state);
static void z_decomp_reset(void *state);
static void z_comp_stats(void *state, struct compstat *stats);
/**
* z_comp_free - free the memory used by a compressor
* @arg: pointer to the private state for the compressor.
*/
static void z_comp_free(void *arg)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
zlib_deflateEnd(&state->strm);
vfree(state->strm.workspace);
kfree(state);
}
}
/**
* z_comp_alloc - allocate space for a compressor.
* @options: pointer to CCP option data
* @opt_len: length of the CCP option at @options.
*
* The @options pointer points to the a buffer containing the
* CCP option data for the compression being negotiated. It is
* formatted according to RFC1979, and describes the window
* size that the peer is requesting that we use in compressing
* data to be sent to it.
*
* Returns the pointer to the private state for the compressor,
* or NULL if we could not allocate enough memory.
*/
static void *z_comp_alloc(unsigned char *options, int opt_len)
{
struct ppp_deflate_state *state;
int w_size;
if (opt_len != CILEN_DEFLATE ||
(options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
options[1] != CILEN_DEFLATE ||
DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
options[3] != DEFLATE_CHK_SEQUENCE)
return NULL;
w_size = DEFLATE_SIZE(options[2]);
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
state = kzalloc(sizeof(*state),
GFP_KERNEL);
if (state == NULL)
return NULL;
state->strm.next_in = NULL;
state->w_size = w_size;
state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
if (state->strm.workspace == NULL)
goto out_free;
if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY)
!= Z_OK)
goto out_free;
return (void *) state;
out_free:
z_comp_free(state);
return NULL;
}
/**
* z_comp_init - initialize a previously-allocated compressor.
* @arg: pointer to the private state for the compressor
* @options: pointer to the CCP option data describing the
* compression that was negotiated with the peer
* @opt_len: length of the CCP option data at @options
* @unit: PPP unit number for diagnostic messages
* @hdrlen: ignored (present for backwards compatibility)
* @debug: debug flag; if non-zero, debug messages are printed.
*
* The CCP options described by @options must match the options
* specified when the compressor was allocated. The compressor
* history is reset. Returns 0 for failure (CCP options don't
* match) or 1 for success.
*/
static int z_comp_init(void *arg, unsigned char *options, int opt_len,
int unit, int hdrlen, int debug)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (opt_len < CILEN_DEFLATE ||
(options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
options[1] != CILEN_DEFLATE ||
DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
DEFLATE_SIZE(options[2]) != state->w_size ||
options[3] != DEFLATE_CHK_SEQUENCE)
return 0;
state->seqno = 0;
state->unit = unit;
state->debug = debug;
zlib_deflateReset(&state->strm);
return 1;
}
/**
* z_comp_reset - reset a previously-allocated compressor.
* @arg: pointer to private state for the compressor.
*
* This clears the history for the compressor and makes it
* ready to start emitting a new compressed stream.
*/
static void z_comp_reset(void *arg)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
state->seqno = 0;
zlib_deflateReset(&state->strm);
}
/**
* z_compress - compress a PPP packet with Deflate compression.
* @arg: pointer to private state for the compressor
* @rptr: uncompressed packet (input)
* @obuf: compressed packet (output)
* @isize: size of uncompressed packet
* @osize: space available at @obuf
*
* Returns the length of the compressed packet, or 0 if the
* packet is incompressible.
*/
static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
int isize, int osize)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
int r, proto, off, olen, oavail;
unsigned char *wptr;
/*
* Check that the protocol is in the range we handle.
*/
proto = PPP_PROTOCOL(rptr);
if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
return 0;
/* Don't generate compressed packets which are larger than
the uncompressed packet. */
if (osize > isize)
osize = isize;
wptr = obuf;
/*
* Copy over the PPP header and store the 2-byte sequence number.
*/
wptr[0] = PPP_ADDRESS(rptr);
wptr[1] = PPP_CONTROL(rptr);
put_unaligned_be16(PPP_COMP, wptr + 2);
wptr += PPP_HDRLEN;
put_unaligned_be16(state->seqno, wptr);
wptr += DEFLATE_OVHD;
olen = PPP_HDRLEN + DEFLATE_OVHD;
state->strm.next_out = wptr;
state->strm.avail_out = oavail = osize - olen;
++state->seqno;
off = (proto > 0xff) ? 2 : 3; /* skip 1st proto byte if 0 */
rptr += off;
state->strm.next_in = rptr;
state->strm.avail_in = (isize - off);
for (;;) {
r = zlib_deflate(&state->strm, Z_PACKET_FLUSH);
if (r != Z_OK) {
if (state->debug)
printk(KERN_ERR
"z_compress: deflate returned %d\n", r);
break;
}
if (state->strm.avail_out == 0) {
olen += oavail;
state->strm.next_out = NULL;
state->strm.avail_out = oavail = 1000000;
} else {
break; /* all done */
}
}
olen += oavail - state->strm.avail_out;
/*
* See if we managed to reduce the size of the packet.
*/
if (olen < isize && olen <= osize) {
state->stats.comp_bytes += olen;
state->stats.comp_packets++;
} else {
state->stats.inc_bytes += isize;
state->stats.inc_packets++;
olen = 0;
}
state->stats.unc_bytes += isize;
state->stats.unc_packets++;
return olen;
}
/**
* z_comp_stats - return compression statistics for a compressor
* or decompressor.
* @arg: pointer to private space for the (de)compressor
* @stats: pointer to a struct compstat to receive the result.
*/
static void z_comp_stats(void *arg, struct compstat *stats)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
*stats = state->stats;
}
/**
* z_decomp_free - Free the memory used by a decompressor.
* @arg: pointer to private space for the decompressor.
*/
static void z_decomp_free(void *arg)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
vfree(state->strm.workspace);
kfree(state);
}
}
/**
* z_decomp_alloc - allocate space for a decompressor.
* @options: pointer to CCP option data
* @opt_len: length of the CCP option at @options.
*
* The @options pointer points to the a buffer containing the
* CCP option data for the compression being negotiated. It is
* formatted according to RFC1979, and describes the window
* size that we are requesting the peer to use in compressing
* data to be sent to us.
*
* Returns the pointer to the private state for the decompressor,
* or NULL if we could not allocate enough memory.
*/
static void *z_decomp_alloc(unsigned char *options, int opt_len)
{
struct ppp_deflate_state *state;
int w_size;
if (opt_len != CILEN_DEFLATE ||
(options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
options[1] != CILEN_DEFLATE ||
DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
options[3] != DEFLATE_CHK_SEQUENCE)
return NULL;
w_size = DEFLATE_SIZE(options[2]);
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return NULL;
state->w_size = w_size;
state->strm.next_out = NULL;
state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (state->strm.workspace == NULL)
goto out_free;
if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK)
goto out_free;
return (void *) state;
out_free:
z_decomp_free(state);
return NULL;
}
/**
* z_decomp_init - initialize a previously-allocated decompressor.
* @arg: pointer to the private state for the decompressor
* @options: pointer to the CCP option data describing the
* compression that was negotiated with the peer
* @opt_len: length of the CCP option data at @options
* @unit: PPP unit number for diagnostic messages
* @hdrlen: ignored (present for backwards compatibility)
* @mru: maximum length of decompressed packets
* @debug: debug flag; if non-zero, debug messages are printed.
*
* The CCP options described by @options must match the options
* specified when the decompressor was allocated. The decompressor
* history is reset. Returns 0 for failure (CCP options don't
* match) or 1 for success.
*/
static int z_decomp_init(void *arg, unsigned char *options, int opt_len,
int unit, int hdrlen, int mru, int debug)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (opt_len < CILEN_DEFLATE ||
(options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
options[1] != CILEN_DEFLATE ||
DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
DEFLATE_SIZE(options[2]) != state->w_size ||
options[3] != DEFLATE_CHK_SEQUENCE)
return 0;
state->seqno = 0;
state->unit = unit;
state->debug = debug;
state->mru = mru;
zlib_inflateReset(&state->strm);
return 1;
}
/**
* z_decomp_reset - reset a previously-allocated decompressor.
* @arg: pointer to private state for the decompressor.
*
* This clears the history for the decompressor and makes it
* ready to receive a new compressed stream.
*/
static void z_decomp_reset(void *arg)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
state->seqno = 0;
zlib_inflateReset(&state->strm);
}
/**
* z_decompress - decompress a Deflate-compressed packet.
* @arg: pointer to private state for the decompressor
* @ibuf: pointer to input (compressed) packet data
* @isize: length of input packet
* @obuf: pointer to space for output (decompressed) packet
* @osize: amount of space available at @obuf
*
* Because of patent problems, we return DECOMP_ERROR for errors
* found by inspecting the input data and for system problems, but
* DECOMP_FATALERROR for any errors which could possibly be said to
* be being detected "after" decompression. For DECOMP_ERROR,
* we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
* infringing a patent of Motorola's if we do, so we take CCP down
* instead.
*
* Given that the frame has the correct sequence number and a good FCS,
* errors such as invalid codes in the input most likely indicate a
* bug, so we return DECOMP_FATALERROR for them in order to turn off
* compression, even though they are detected by inspecting the input.
*/
static int z_decompress(void *arg, unsigned char *ibuf, int isize,
unsigned char *obuf, int osize)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
int olen, seq, r;
int decode_proto, overflow;
unsigned char overflow_buf[1];
if (isize <= PPP_HDRLEN + DEFLATE_OVHD) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: short pkt (%d)\n",
state->unit, isize);
return DECOMP_ERROR;
}
/* Check the sequence number. */
seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
if (seq != (state->seqno & 0xffff)) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
state->unit, seq, state->seqno & 0xffff);
return DECOMP_ERROR;
}
++state->seqno;
/*
* Fill in the first part of the PPP header. The protocol field
* comes from the decompressed data.
*/
obuf[0] = PPP_ADDRESS(ibuf);
obuf[1] = PPP_CONTROL(ibuf);
obuf[2] = 0;
/*
* Set up to call inflate. We set avail_out to 1 initially so we can
* look at the first byte of the output and decide whether we have
* a 1-byte or 2-byte protocol field.
*/
state->strm.next_in = ibuf + PPP_HDRLEN + DEFLATE_OVHD;
state->strm.avail_in = isize - (PPP_HDRLEN + DEFLATE_OVHD);
state->strm.next_out = obuf + 3;
state->strm.avail_out = 1;
decode_proto = 1;
overflow = 0;
/*
* Call inflate, supplying more input or output as needed.
*/
for (;;) {
r = zlib_inflate(&state->strm, Z_PACKET_FLUSH);
if (r != Z_OK) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n",
state->unit, r, (state->strm.msg? state->strm.msg: ""));
return DECOMP_FATALERROR;
}
if (state->strm.avail_out != 0)
break; /* all done */
if (decode_proto) {
state->strm.avail_out = osize - PPP_HDRLEN;
if ((obuf[3] & 1) == 0) {
/* 2-byte protocol field */
obuf[2] = obuf[3];
--state->strm.next_out;
++state->strm.avail_out;
}
decode_proto = 0;
} else if (!overflow) {
/*
* We've filled up the output buffer; the only way to
* find out whether inflate has any more characters
* left is to give it another byte of output space.
*/
state->strm.next_out = overflow_buf;
state->strm.avail_out = 1;
overflow = 1;
} else {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: ran out of mru\n",
state->unit);
return DECOMP_FATALERROR;
}
}
if (decode_proto) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: didn't get proto\n",
state->unit);
return DECOMP_ERROR;
}
olen = osize + overflow - state->strm.avail_out;
state->stats.unc_bytes += olen;
state->stats.unc_packets++;
state->stats.comp_bytes += isize;
state->stats.comp_packets++;
return olen;
}
/**
* z_incomp - add incompressible input data to the history.
* @arg: pointer to private state for the decompressor
* @ibuf: pointer to input packet data
* @icnt: length of input data.
*/
static void z_incomp(void *arg, unsigned char *ibuf, int icnt)
{
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
int proto, r;
/*
* Check that the protocol is one we handle.
*/
proto = PPP_PROTOCOL(ibuf);
if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
return;
++state->seqno;
/*
* We start at the either the 1st or 2nd byte of the protocol field,
* depending on whether the protocol value is compressible.
*/
state->strm.next_in = ibuf + 3;
state->strm.avail_in = icnt - 3;
if (proto > 0xff) {
--state->strm.next_in;
++state->strm.avail_in;
}
r = zlib_inflateIncomp(&state->strm);
if (r != Z_OK) {
/* gak! */
if (state->debug) {
printk(KERN_DEBUG "z_incomp%d: inflateIncomp returned %d (%s)\n",
state->unit, r, (state->strm.msg? state->strm.msg: ""));
}
return;
}
/*
* Update stats.
*/
state->stats.inc_bytes += icnt;
state->stats.inc_packets++;
state->stats.unc_bytes += icnt;
state->stats.unc_packets++;
}
/*************************************************************
* Module interface table
*************************************************************/
/* These are in ppp_generic.c */
extern int ppp_register_compressor (struct compressor *cp);
extern void ppp_unregister_compressor (struct compressor *cp);
/*
* Procedures exported to if_ppp.c.
*/
static struct compressor ppp_deflate = {
.compress_proto = CI_DEFLATE,
.comp_alloc = z_comp_alloc,
.comp_free = z_comp_free,
.comp_init = z_comp_init,
.comp_reset = z_comp_reset,
.compress = z_compress,
.comp_stat = z_comp_stats,
.decomp_alloc = z_decomp_alloc,
.decomp_free = z_decomp_free,
.decomp_init = z_decomp_init,
.decomp_reset = z_decomp_reset,
.decompress = z_decompress,
.incomp = z_incomp,
.decomp_stat = z_comp_stats,
.owner = THIS_MODULE
};
static struct compressor ppp_deflate_draft = {
.compress_proto = CI_DEFLATE_DRAFT,
.comp_alloc = z_comp_alloc,
.comp_free = z_comp_free,
.comp_init = z_comp_init,
.comp_reset = z_comp_reset,
.compress = z_compress,
.comp_stat = z_comp_stats,
.decomp_alloc = z_decomp_alloc,
.decomp_free = z_decomp_free,
.decomp_init = z_decomp_init,
.decomp_reset = z_decomp_reset,
.decompress = z_decompress,
.incomp = z_incomp,
.decomp_stat = z_comp_stats,
.owner = THIS_MODULE
};
static int __init deflate_init(void)
{
int rc;
rc = ppp_register_compressor(&ppp_deflate);
if (rc)
return rc;
rc = ppp_register_compressor(&ppp_deflate_draft);
if (rc) {
ppp_unregister_compressor(&ppp_deflate);
return rc;
}
pr_info("PPP Deflate Compression module registered\n");
return 0;
}
static void __exit deflate_cleanup(void)
{
ppp_unregister_compressor(&ppp_deflate);
ppp_unregister_compressor(&ppp_deflate_draft);
}
module_init(deflate_init);
module_exit(deflate_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
| linux-master | drivers/net/ppp/ppp_deflate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PPP synchronous tty channel driver for Linux.
*
* This is a ppp channel driver that can be used with tty device drivers
* that are frame oriented, such as synchronous HDLC devices.
*
* Complete PPP frames without encoding/decoding are exchanged between
* the channel driver and the device driver.
*
* The async map IOCTL codes are implemented to keep the user mode
* applications happy if they call them. Synchronous PPP does not use
* the async maps.
*
* Copyright 1999 Paul Mackerras.
*
* Also touched by the grubby hands of Paul Fulghum [email protected]
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames over sync serial lines. It relies on
* the generic PPP layer to give it frames to send and to process
* received frames. It implements the PPP line discipline.
*
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
* ==FILEVERSION 20040616==
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/tty.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/refcount.h>
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#define PPP_VERSION "2.4.2"
/* Structure for storing local state. */
struct syncppp {
struct tty_struct *tty;
unsigned int flags;
unsigned int rbits;
int mru;
spinlock_t xmit_lock;
spinlock_t recv_lock;
unsigned long xmit_flags;
u32 xaccm[8];
u32 raccm;
unsigned int bytes_sent;
unsigned int bytes_rcvd;
struct sk_buff *tpkt;
unsigned long last_xmit;
struct sk_buff_head rqueue;
struct tasklet_struct tsk;
refcount_t refcnt;
struct completion dead_cmp;
struct ppp_channel chan; /* interface to generic ppp layer */
};
/* Bit numbers in xmit_flags */
#define XMIT_WAKEUP 0
#define XMIT_FULL 1
/* Bits in rbits */
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
#define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
/*
* Prototypes.
*/
static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
static void ppp_sync_process(struct tasklet_struct *t);
static int ppp_sync_push(struct syncppp *ap);
static void ppp_sync_flush_output(struct syncppp *ap);
static void ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags,
int count);
static const struct ppp_channel_ops sync_ops = {
.start_xmit = ppp_sync_send,
.ioctl = ppp_sync_ioctl,
};
/*
* Utility procedure to print a buffer in hex/ascii
*/
static void
ppp_print_buffer (const char *name, const __u8 *buf, int count)
{
if (name != NULL)
printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
}
/*
* Routines implementing the synchronous PPP line discipline.
*/
/*
* We have a potential race on dereferencing tty->disc_data,
* because the tty layer provides no locking at all - thus one
* cpu could be running ppp_synctty_receive while another
* calls ppp_synctty_close, which zeroes tty->disc_data and
* frees the memory that ppp_synctty_receive is using. The best
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*
* FIXME: Fixed in tty_io nowadays.
*/
static DEFINE_RWLOCK(disc_data_lock);
static struct syncppp *sp_get(struct tty_struct *tty)
{
struct syncppp *ap;
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void sp_put(struct syncppp *ap)
{
if (refcount_dec_and_test(&ap->refcnt))
complete(&ap->dead_cmp);
}
/*
* Called when a tty is put into sync-PPP line discipline.
*/
static int
ppp_sync_open(struct tty_struct *tty)
{
struct syncppp *ap;
int err;
int speed;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
err = -ENOMEM;
if (!ap)
goto out;
/* initialize the syncppp structure */
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
spin_lock_init(&ap->recv_lock);
ap->xaccm[0] = ~0U;
ap->xaccm[3] = 0x60000000U;
ap->raccm = ~0U;
skb_queue_head_init(&ap->rqueue);
tasklet_setup(&ap->tsk, ppp_sync_process);
refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead_cmp);
ap->chan.private = ap;
ap->chan.ops = &sync_ops;
ap->chan.mtu = PPP_MRU;
ap->chan.hdrlen = 2; /* for A/C bytes */
speed = tty_get_baud_rate(tty);
ap->chan.speed = speed;
err = ppp_register_channel(&ap->chan);
if (err)
goto out_free;
tty->disc_data = ap;
tty->receive_room = 65536;
return 0;
out_free:
kfree(ap);
out:
return err;
}
/*
* Called when the tty is put into another line discipline
* or it hangs up. We have to wait for any cpu currently
* executing in any of the other ppp_synctty_* routines to
* finish before we can call ppp_unregister_channel and free
* the syncppp struct. This routine must be called from
* process context, not interrupt or softirq context.
*/
static void
ppp_sync_close(struct tty_struct *tty)
{
struct syncppp *ap;
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!ap)
return;
/*
* We have now ensured that nobody can start using ap from now
* on, but we have to wait for all existing users to finish.
* Note that ppp_unregister_channel ensures that no calls to
* our channel ops (i.e. ppp_sync_send/ioctl) are in progress
* by the time it returns.
*/
if (!refcount_dec_and_test(&ap->refcnt))
wait_for_completion(&ap->dead_cmp);
tasklet_kill(&ap->tsk);
ppp_unregister_channel(&ap->chan);
skb_queue_purge(&ap->rqueue);
kfree_skb(ap->tpkt);
kfree(ap);
}
/*
* Called on tty hangup in process context.
*
* Wait for I/O to driver to complete and unregister PPP channel.
* This is already done by the close routine, so just call that.
*/
static void ppp_sync_hangup(struct tty_struct *tty)
{
ppp_sync_close(tty);
}
/*
* Read does nothing - no data is ever available this way.
* Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
ppp_sync_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t count,
void **cookie, unsigned long offset)
{
return -EAGAIN;
}
/*
* Write on the tty does nothing, the packets all come in
* from the ppp generic stuff.
*/
static ssize_t
ppp_sync_write(struct tty_struct *tty, struct file *file, const u8 *buf,
size_t count)
{
return -EAGAIN;
}
static int
ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = sp_get(tty);
int __user *p = (int __user *)arg;
int err, val;
if (!ap)
return -ENXIO;
err = -EFAULT;
switch (cmd) {
case PPPIOCGCHAN:
err = -EFAULT;
if (put_user(ppp_channel_index(&ap->chan), p))
break;
err = 0;
break;
case PPPIOCGUNIT:
err = -EFAULT;
if (put_user(ppp_unit_number(&ap->chan), p))
break;
err = 0;
break;
case TCFLSH:
/* flush our buffers and the serial port's buffer */
if (arg == TCIOFLUSH || arg == TCOFLUSH)
ppp_sync_flush_output(ap);
err = n_tty_ioctl_helper(tty, cmd, arg);
break;
case FIONREAD:
val = 0;
if (put_user(val, p))
break;
err = 0;
break;
default:
err = tty_mode_ioctl(tty, cmd, arg);
break;
}
sp_put(ap);
return err;
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
ppp_sync_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags,
size_t count)
{
struct syncppp *ap = sp_get(tty);
unsigned long flags;
if (!ap)
return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
sp_put(ap);
tty_unthrottle(tty);
}
static void
ppp_sync_wakeup(struct tty_struct *tty)
{
struct syncppp *ap = sp_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (!ap)
return;
set_bit(XMIT_WAKEUP, &ap->xmit_flags);
tasklet_schedule(&ap->tsk);
sp_put(ap);
}
static struct tty_ldisc_ops ppp_sync_ldisc = {
.owner = THIS_MODULE,
.num = N_SYNC_PPP,
.name = "pppsync",
.open = ppp_sync_open,
.close = ppp_sync_close,
.hangup = ppp_sync_hangup,
.read = ppp_sync_read,
.write = ppp_sync_write,
.ioctl = ppp_synctty_ioctl,
.receive_buf = ppp_sync_receive,
.write_wakeup = ppp_sync_wakeup,
};
static int __init
ppp_sync_init(void)
{
int err;
err = tty_register_ldisc(&ppp_sync_ldisc);
if (err != 0)
printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
err);
return err;
}
/*
* The following routines provide the PPP channel interface.
*/
static int
ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = chan->private;
int err, val;
u32 accm[8];
void __user *argp = (void __user *)arg;
u32 __user *p = argp;
err = -EFAULT;
switch (cmd) {
case PPPIOCGFLAGS:
val = ap->flags | ap->rbits;
if (put_user(val, (int __user *) argp))
break;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, (int __user *) argp))
break;
ap->flags = val & ~SC_RCV_BITS;
spin_lock_irq(&ap->recv_lock);
ap->rbits = val & SC_RCV_BITS;
spin_unlock_irq(&ap->recv_lock);
err = 0;
break;
case PPPIOCGASYNCMAP:
if (put_user(ap->xaccm[0], p))
break;
err = 0;
break;
case PPPIOCSASYNCMAP:
if (get_user(ap->xaccm[0], p))
break;
err = 0;
break;
case PPPIOCGRASYNCMAP:
if (put_user(ap->raccm, p))
break;
err = 0;
break;
case PPPIOCSRASYNCMAP:
if (get_user(ap->raccm, p))
break;
err = 0;
break;
case PPPIOCGXASYNCMAP:
if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
break;
err = 0;
break;
case PPPIOCSXASYNCMAP:
if (copy_from_user(accm, argp, sizeof(accm)))
break;
accm[2] &= ~0x40000000U; /* can't escape 0x5e */
accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
err = 0;
break;
case PPPIOCGMRU:
if (put_user(ap->mru, (int __user *) argp))
break;
err = 0;
break;
case PPPIOCSMRU:
if (get_user(val, (int __user *) argp))
break;
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
/*
* This is called at softirq level to deliver received packets
* to the ppp_generic code, and to tell the ppp_generic code
* if we can accept more output now.
*/
static void ppp_sync_process(struct tasklet_struct *t)
{
struct syncppp *ap = from_tasklet(ap, t, tsk);
struct sk_buff *skb;
/* process received packets */
while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
if (skb->len == 0) {
/* zero length buffers indicate error */
ppp_input_error(&ap->chan, 0);
kfree_skb(skb);
}
else
ppp_input(&ap->chan, skb);
}
/* try to push more stuff out */
if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
ppp_output_wakeup(&ap->chan);
}
/*
* Procedures for encapsulation and framing.
*/
static struct sk_buff*
ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
{
int proto;
unsigned char *data;
int islcp;
data = skb->data;
proto = get_unaligned_be16(data);
/* LCP packets with codes between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
* have been negotiated.
*/
islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
/* compress protocol field if option enabled */
if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
skb_pull(skb,1);
/* prepend address/control fields if necessary */
if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
if (skb_headroom(skb) < 2) {
struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
if (npkt == NULL) {
kfree_skb(skb);
return NULL;
}
skb_reserve(npkt,2);
skb_copy_from_linear_data(skb,
skb_put(npkt, skb->len), skb->len);
consume_skb(skb);
skb = npkt;
}
skb_push(skb,2);
skb->data[0] = PPP_ALLSTATIONS;
skb->data[1] = PPP_UI;
}
ap->last_xmit = jiffies;
if (skb && ap->flags & SC_LOG_OUTPKT)
ppp_print_buffer ("send buffer", skb->data, skb->len);
return skb;
}
/*
* Transmit-side routines.
*/
/*
* Send a packet to the peer over an sync tty line.
* Returns 1 iff the packet was accepted.
* If the packet was not accepted, we will call ppp_output_wakeup
* at some later time.
*/
static int
ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct syncppp *ap = chan->private;
ppp_sync_push(ap);
if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
return 0; /* already full */
skb = ppp_sync_txmunge(ap, skb);
if (skb != NULL)
ap->tpkt = skb;
else
clear_bit(XMIT_FULL, &ap->xmit_flags);
ppp_sync_push(ap);
return 1;
}
/*
* Push as much data as possible out to the tty.
*/
static int
ppp_sync_push(struct syncppp *ap)
{
int sent, done = 0;
struct tty_struct *tty = ap->tty;
int tty_stuffed = 0;
if (!spin_trylock_bh(&ap->xmit_lock))
return 0;
for (;;) {
if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
tty_stuffed = 0;
if (!tty_stuffed && ap->tpkt) {
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
if (sent < 0)
goto flush; /* error, e.g. loss of CD */
if (sent < ap->tpkt->len) {
tty_stuffed = 1;
} else {
consume_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
continue;
}
/* haven't made any progress */
spin_unlock_bh(&ap->xmit_lock);
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
(!tty_stuffed && ap->tpkt)))
break;
if (!spin_trylock_bh(&ap->xmit_lock))
break;
}
return done;
flush:
if (ap->tpkt) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_unlock_bh(&ap->xmit_lock);
return done;
}
/*
* Flush output from our internal buffers.
* Called for the TCFLSH ioctl.
*/
static void
ppp_sync_flush_output(struct syncppp *ap)
{
int done = 0;
spin_lock_bh(&ap->xmit_lock);
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_unlock_bh(&ap->xmit_lock);
if (done)
ppp_output_wakeup(&ap->chan);
}
/*
* Receive-side routines.
*/
/* called when the tty driver has data for us.
*
* Data is frame oriented: each call to ppp_sync_input is considered
* a whole frame. If the 1st flag byte is non-zero then the whole
* frame is considered to be in error and is tossed.
*/
static void
ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
{
struct sk_buff *skb;
unsigned char *p;
if (count == 0)
return;
if (ap->flags & SC_LOG_INPKT)
ppp_print_buffer ("receive buffer", buf, count);
/* stuff the chars in the skb */
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb) {
printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
goto err;
}
/* Try to get the payload 4-byte aligned */
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
if (flags && *flags) {
/* error flag set, ignore frame */
goto err;
} else if (count > skb_tailroom(skb)) {
/* packet overflowed MRU */
goto err;
}
skb_put_data(skb, buf, count);
/* strip address/control field if present */
p = skb->data;
if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
/* chop off address/control */
if (skb->len < 3)
goto err;
p = skb_pull(skb, 2);
}
/* PPP packet length should be >= 2 bytes when protocol field is not
* compressed.
*/
if (!(p[0] & 0x01) && skb->len < 2)
goto err;
/* queue the frame to be processed */
skb_queue_tail(&ap->rqueue, skb);
return;
err:
/* queue zero length packet as error indication */
if (skb || (skb = dev_alloc_skb(0))) {
skb_trim(skb, 0);
skb_queue_tail(&ap->rqueue, skb);
}
}
static void __exit
ppp_sync_cleanup(void)
{
tty_unregister_ldisc(&ppp_sync_ldisc);
}
module_init(ppp_sync_init);
module_exit(ppp_sync_cleanup);
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SYNC_PPP);
| linux-master | drivers/net/ppp/ppp_synctty.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/** -*- linux-c -*- ***********************************************************
* Linux PPP over Ethernet (PPPoX/PPPoE) Sockets
*
* PPPoX --- Generic PPP encapsulation socket family
* PPPoE --- PPP over Ethernet (RFC 2516)
*
* Version: 0.7.0
*
* 070228 : Fix to allow multiple sessions with same remote MAC and same
* session id by including the local device ifindex in the
* tuple identifying a session. This also ensures packets can't
* be injected into a session from interfaces other than the one
* specified by userspace. Florian Zumbiehl <[email protected]>
* (Oh, BTW, this one is YYMMDD, in case you were wondering ...)
* 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
* 030700 : Fixed connect logic to allow for disconnect.
* 270700 : Fixed potential SMP problems; we must protect against
* simultaneous invocation of ppp_input
* and ppp_unregister_channel.
* 040800 : Respect reference count mechanisms on net-devices.
* 200800 : fix kfree(skb) in pppoe_rcv (acme)
* Module reference count is decremented in the right spot now,
* guards against sock_put not actually freeing the sk
* in pppoe_release.
* 051000 : Initialization cleanup.
* 111100 : Fix recvmsg.
* 050101 : Fix PADT processing.
* 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey)
* 170701 : Do not lock_sock with rwlock held. (DaveM)
* Ignore discovery frames if user has socket
* locked. (DaveM)
* Ignore return value of dev_queue_xmit in __pppoe_xmit
* or else we may kfree an SKB twice. (DaveM)
* 190701 : When doing copies of skb's in __pppoe_xmit, always delete
* the original skb that was passed in on success, never on
* failure. Delete the copy of the skb on failure to avoid
* a memory leak.
* 081001 : Misc. cleanup (licence string, non-blocking, prevent
* reference of device on close).
* 121301 : New ppp channels interface; cannot unregister a channel
* from interrupts. Thus, we mark the socket as a ZOMBIE
* and do the unregistration later.
* 081002 : seq_file support for proc stuff -acme
* 111602 : Merge all 2.4 fixes into 2.5/2.6 tree. Label 2.5/2.6
* as version 0.7. Spacing cleanup.
* Author: Michal Ostrowski <[email protected]>
* Contributors:
* Arnaldo Carvalho de Melo <[email protected]>
* David S. Miller ([email protected])
*
* License:
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/if_pppox.h>
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/notifier.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#define PPPOE_HASH_BITS CONFIG_PPPOE_HASH_BITS
#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
static const struct ppp_channel_ops pppoe_chan_ops;
/* per-net private data for this module */
static unsigned int pppoe_net_id __read_mostly;
struct pppoe_net {
/*
* we could use _single_ hash table for all
* nets by injecting net id into the hash but
* it would increase hash chains and add
* a few additional math comparisons messy
* as well, moreover in case of SMP less locking
* controversy here
*/
struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
rwlock_t hash_lock;
};
/*
* PPPoE could be in the following stages:
* 1) Discovery stage (to obtain remote MAC and Session ID)
* 2) Session stage (MAC and SID are known)
*
* Ethernet frames have a special tag for this but
* we use simpler approach based on session id
*/
static inline bool stage_session(__be16 sid)
{
return sid != 0;
}
static inline struct pppoe_net *pppoe_pernet(struct net *net)
{
return net_generic(net, pppoe_net_id);
}
static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
{
return a->sid == b->sid && ether_addr_equal(a->remote, b->remote);
}
static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
{
return a->sid == sid && ether_addr_equal(a->remote, addr);
}
#if 8 % PPPOE_HASH_BITS
#error 8 must be a multiple of PPPOE_HASH_BITS
#endif
static int hash_item(__be16 sid, unsigned char *addr)
{
unsigned char hash = 0;
unsigned int i;
for (i = 0; i < ETH_ALEN; i++)
hash ^= addr[i];
for (i = 0; i < sizeof(sid_t) * 8; i += 8)
hash ^= (__force __u32)sid >> i;
for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;)
hash ^= hash >> i;
return hash & PPPOE_HASH_MASK;
}
/**********************************************************************
*
* Set/get/delete/rehash items (internal versions)
*
**********************************************************************/
static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
unsigned char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
ret = pn->hash_table[hash];
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex)
return ret;
ret = ret->next;
}
return NULL;
}
static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
{
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
struct pppox_sock *ret;
ret = pn->hash_table[hash];
while (ret) {
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
ret = ret->next;
}
po->next = pn->hash_table[hash];
pn->hash_table[hash] = po;
return 0;
}
static void __delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret, **src;
ret = pn->hash_table[hash];
src = &pn->hash_table[hash];
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex) {
*src = ret->next;
break;
}
src = &ret->next;
ret = ret->next;
}
}
/**********************************************************************
*
* Set/get/delete/rehash items
*
**********************************************************************/
static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
unsigned char *addr, int ifindex)
{
struct pppox_sock *po;
read_lock_bh(&pn->hash_lock);
po = __get_item(pn, sid, addr, ifindex);
if (po)
sock_hold(sk_pppox(po));
read_unlock_bh(&pn->hash_lock);
return po;
}
static inline struct pppox_sock *get_item_by_addr(struct net *net,
struct sockaddr_pppox *sp)
{
struct net_device *dev;
struct pppoe_net *pn;
struct pppox_sock *pppox_sock = NULL;
int ifindex;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
pn = pppoe_pernet(net);
pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
sp->sa_addr.pppoe.remote, ifindex);
}
rcu_read_unlock();
return pppox_sock;
}
static inline void delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
write_lock_bh(&pn->hash_lock);
__delete_item(pn, sid, addr, ifindex);
write_unlock_bh(&pn->hash_lock);
}
/***************************************************************************
*
* Handler for device events.
* Certain device events require that sockets be unconnected.
*
**************************************************************************/
static void pppoe_flush_dev(struct net_device *dev)
{
struct pppoe_net *pn;
int i;
pn = pppoe_pernet(dev_net(dev));
write_lock_bh(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
struct pppox_sock *po = pn->hash_table[i];
struct sock *sk;
while (po) {
while (po && po->pppoe_dev != dev) {
po = po->next;
}
if (!po)
break;
sk = sk_pppox(po);
/* We always grab the socket lock, followed by the
* hash_lock, in that order. Since we should hold the
* sock lock while doing any unbinding, we need to
* release the lock we're holding. Hold a reference to
* the sock so it doesn't disappear as we're jumping
* between locks.
*/
sock_hold(sk);
write_unlock_bh(&pn->hash_lock);
lock_sock(sk);
if (po->pppoe_dev == dev &&
sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
pppox_unbind_sock(sk);
sk->sk_state_change(sk);
po->pppoe_dev = NULL;
dev_put(dev);
}
release_sock(sk);
sock_put(sk);
/* Restart the process from the start of the current
* hash chain. We dropped locks so the world may have
* change from underneath us.
*/
BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
write_lock_bh(&pn->hash_lock);
po = pn->hash_table[i];
}
}
write_unlock_bh(&pn->hash_lock);
}
static int pppoe_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
/* Only look at sockets that are using this specific device. */
switch (event) {
case NETDEV_CHANGEADDR:
case NETDEV_CHANGEMTU:
/* A change in mtu or address is a bad thing, requiring
* LCP re-negotiation.
*/
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
/* Find every socket on this device and kill it. */
pppoe_flush_dev(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block pppoe_notifier = {
.notifier_call = pppoe_device_event,
};
/************************************************************************
*
* Do the real work of receiving a PPPoE Session frame.
*
***********************************************************************/
static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
{
struct pppox_sock *po = pppox_sk(sk);
struct pppox_sock *relay_po;
/* Backlog receive. Semantics of backlog rcv preclude any code from
* executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
* can't change.
*/
if (skb->pkt_type == PACKET_OTHERHOST)
goto abort_kfree;
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
relay_po = get_item_by_addr(sock_net(sk),
&po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
goto abort_put;
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
goto abort_put;
sock_put(sk_pppox(relay_po));
} else {
if (sock_queue_rcv_skb(sk, skb))
goto abort_kfree;
}
return NET_RX_SUCCESS;
abort_put:
sock_put(sk_pppox(relay_po));
abort_kfree:
kfree_skb(skb);
return NET_RX_DROP;
}
/************************************************************************
*
* Receive wrapper called in BH context.
*
***********************************************************************/
static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct pppoe_hdr *ph;
struct pppox_sock *po;
struct pppoe_net *pn;
int len;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
if (skb_mac_header_len(skb) < ETH_HLEN)
goto drop;
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto drop;
ph = pppoe_hdr(skb);
len = ntohs(ph->length);
skb_pull_rcsum(skb, sizeof(*ph));
if (skb->len < len)
goto drop;
if (pskb_trim_rcsum(skb, len))
goto drop;
ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
/* Note that get_item does a sock_hold(), so sk_pppox(po)
* is known to be safe.
*/
po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (!po)
goto drop;
return sk_receive_skb(sk_pppox(po), skb, 0);
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
}
static void pppoe_unbind_sock_work(struct work_struct *work)
{
struct pppox_sock *po = container_of(work, struct pppox_sock,
proto.pppoe.padt_work);
struct sock *sk = sk_pppox(po);
lock_sock(sk);
if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
pppox_unbind_sock(sk);
release_sock(sk);
sock_put(sk);
}
/************************************************************************
*
* Receive a PPPoE Discovery frame.
* This is solely for detection of PADT frames
*
***********************************************************************/
static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct pppoe_hdr *ph;
struct pppox_sock *po;
struct pppoe_net *pn;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
if (skb->pkt_type != PACKET_HOST)
goto abort;
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto abort;
ph = pppoe_hdr(skb);
if (ph->code != PADT_CODE)
goto abort;
pn = pppoe_pernet(dev_net(dev));
po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po)
if (!schedule_work(&po->proto.pppoe.padt_work))
sock_put(sk_pppox(po));
abort:
kfree_skb(skb);
out:
return NET_RX_SUCCESS; /* Lies... :-) */
}
static struct packet_type pppoes_ptype __read_mostly = {
.type = cpu_to_be16(ETH_P_PPP_SES),
.func = pppoe_rcv,
};
static struct packet_type pppoed_ptype __read_mostly = {
.type = cpu_to_be16(ETH_P_PPP_DISC),
.func = pppoe_disc_rcv,
};
static struct proto pppoe_sk_proto __read_mostly = {
.name = "PPPOE",
.owner = THIS_MODULE,
.obj_size = sizeof(struct pppox_sock),
};
/***********************************************************************
*
* Initialize a new struct sock.
*
**********************************************************************/
static int pppoe_create(struct net *net, struct socket *sock, int kern)
{
struct sock *sk;
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->state = SS_UNCONNECTED;
sock->ops = &pppoe_ops;
sk->sk_backlog_rcv = pppoe_rcv_core;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_family = PF_PPPOX;
sk->sk_protocol = PX_PROTO_OE;
INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
pppoe_unbind_sock_work);
return 0;
}
static int pppoe_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct pppox_sock *po;
struct pppoe_net *pn;
struct net *net = NULL;
if (!sk)
return 0;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
release_sock(sk);
return -EBADF;
}
po = pppox_sk(sk);
if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
pppox_unbind_sock(sk);
/* Signal the death of the socket. */
sk->sk_state = PPPOX_DEAD;
net = sock_net(sk);
pn = pppoe_pernet(net);
/*
* protect "po" from concurrent updates
* on pppoe_flush_dev
*/
delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
po->pppoe_ifindex);
sock_orphan(sk);
sock->sk = NULL;
skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
return 0;
}
static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct net_device *dev = NULL;
struct pppoe_net *pn;
struct net *net = NULL;
int error;
lock_sock(sk);
error = -EINVAL;
if (sockaddr_len != sizeof(struct sockaddr_pppox))
goto end;
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
/* Check for already bound sockets */
error = -EBUSY;
if ((sk->sk_state & PPPOX_CONNECTED) &&
stage_session(sp->sa_addr.pppoe.sid))
goto end;
/* Check for already disconnected sockets, on attempts to disconnect */
error = -EALREADY;
if ((sk->sk_state & PPPOX_DEAD) &&
!stage_session(sp->sa_addr.pppoe.sid))
goto end;
error = 0;
/* Delete the old binding */
if (stage_session(po->pppoe_pa.sid)) {
pppox_unbind_sock(sk);
pn = pppoe_pernet(sock_net(sk));
delete_item(pn, po->pppoe_pa.sid,
po->pppoe_pa.remote, po->pppoe_ifindex);
if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
po->pppoe_ifindex = 0;
memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
memset(&po->chan, 0, sizeof(po->chan));
po->next = NULL;
po->num = 0;
sk->sk_state = PPPOX_NONE;
}
/* Re-bind in session stage only */
if (stage_session(sp->sa_addr.pppoe.sid)) {
error = -ENODEV;
net = sock_net(sk);
dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
if (!dev)
goto err_put;
po->pppoe_dev = dev;
po->pppoe_ifindex = dev->ifindex;
pn = pppoe_pernet(net);
if (!(dev->flags & IFF_UP)) {
goto err_put;
}
memcpy(&po->pppoe_pa,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
write_lock_bh(&pn->hash_lock);
error = __set_item(pn, po);
write_unlock_bh(&pn->hash_lock);
if (error < 0)
goto err_put;
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
dev->hard_header_len);
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
delete_item(pn, po->pppoe_pa.sid,
po->pppoe_pa.remote, po->pppoe_ifindex);
goto err_put;
}
sk->sk_state = PPPOX_CONNECTED;
}
po->num = sp->sa_addr.pppoe.sid;
end:
release_sock(sk);
return error;
err_put:
if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
goto end;
}
static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
int len = sizeof(struct sockaddr_pppox);
struct sockaddr_pppox sp;
sp.sa_family = AF_PPPOX;
sp.sa_protocol = PX_PROTO_OE;
memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa,
sizeof(struct pppoe_addr));
memcpy(uaddr, &sp, len);
return len;
}
static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
struct pppox_sock *po = pppox_sk(sk);
int val;
int err;
switch (cmd) {
case PPPIOCGMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (put_user(po->pppoe_dev->mtu -
sizeof(struct pppoe_hdr) -
PPP_HDRLEN,
(int __user *)arg))
break;
err = 0;
break;
case PPPIOCSMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (get_user(val, (int __user *)arg))
break;
if (val < (po->pppoe_dev->mtu
- sizeof(struct pppoe_hdr)
- PPP_HDRLEN))
err = 0;
else
err = -EINVAL;
break;
case PPPIOCSFLAGS:
err = -EFAULT;
if (get_user(val, (int __user *)arg))
break;
err = 0;
break;
case PPPOEIOCSFWD:
{
struct pppox_sock *relay_po;
err = -EBUSY;
if (sk->sk_state & (PPPOX_BOUND | PPPOX_DEAD))
break;
err = -ENOTCONN;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
/* PPPoE address from the user specifies an outbound
PPPoE address which frames are forwarded to */
err = -EFAULT;
if (copy_from_user(&po->pppoe_relay,
(void __user *)arg,
sizeof(struct sockaddr_pppox)))
break;
err = -EINVAL;
if (po->pppoe_relay.sa_family != AF_PPPOX ||
po->pppoe_relay.sa_protocol != PX_PROTO_OE)
break;
/* Check that the socket referenced by the address
actually exists. */
relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
if (!relay_po)
break;
sock_put(sk_pppox(relay_po));
sk->sk_state |= PPPOX_RELAY;
err = 0;
break;
}
case PPPOEIOCDFWD:
err = -EALREADY;
if (!(sk->sk_state & PPPOX_RELAY))
break;
sk->sk_state &= ~PPPOX_RELAY;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
size_t total_len)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
struct pppox_sock *po = pppox_sk(sk);
int error;
struct pppoe_hdr hdr;
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
int hlen;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
error = -ENOTCONN;
goto end;
}
hdr.ver = 1;
hdr.type = 1;
hdr.code = 0;
hdr.sid = po->num;
dev = po->pppoe_dev;
error = -EMSGSIZE;
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
hlen = LL_RESERVED_SPACE(dev);
skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
dev->needed_tailroom, 0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr));
start = (char *)&ph->tag[0];
error = memcpy_from_msg(start, m, total_len);
if (error < 0) {
kfree_skb(skb);
goto end;
}
error = total_len;
dev_hard_header(skb, dev, ETH_P_PPP_SES,
po->pppoe_pa.remote, NULL, total_len);
memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
ph->length = htons(total_len);
dev_queue_xmit(skb);
end:
release_sock(sk);
return error;
}
/************************************************************************
*
* xmit function for internal use.
*
***********************************************************************/
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
{
struct pppox_sock *po = pppox_sk(sk);
struct net_device *dev = po->pppoe_dev;
struct pppoe_hdr *ph;
int data_len = skb->len;
/* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
* xmit operations conclude prior to an unregistration call. Thus
* sk->sk_state cannot change, so we don't need to do lock_sock().
* But, we also can't do a lock_sock since that introduces a potential
* deadlock as we'd reverse the lock ordering used when calling
* ppp_unregister_channel().
*/
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
if (!dev)
goto abort;
/* Copy the data if there is no space for the header or if it's
* read-only.
*/
if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
goto abort;
__skb_push(skb, sizeof(*ph));
skb_reset_network_header(skb);
ph = pppoe_hdr(skb);
ph->ver = 1;
ph->type = 1;
ph->code = 0;
ph->sid = po->num;
ph->length = htons(data_len);
skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
skb->dev = dev;
dev_hard_header(skb, dev, ETH_P_PPP_SES,
po->pppoe_pa.remote, NULL, data_len);
dev_queue_xmit(skb);
return 1;
abort:
kfree_skb(skb);
return 1;
}
/************************************************************************
*
* xmit function called by generic PPP driver
* sends PPP frame over PPPoE socket
*
***********************************************************************/
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
struct sock *sk = chan->private;
return __pppoe_xmit(sk, skb);
}
static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path,
const struct ppp_channel *chan)
{
struct sock *sk = chan->private;
struct pppox_sock *po = pppox_sk(sk);
struct net_device *dev = po->pppoe_dev;
if (sock_flag(sk, SOCK_DEAD) ||
!(sk->sk_state & PPPOX_CONNECTED) || !dev)
return -1;
path->type = DEV_PATH_PPPOE;
path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev;
ctx->dev = dev;
return 0;
}
static const struct ppp_channel_ops pppoe_chan_ops = {
.start_xmit = pppoe_xmit,
.fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
size_t total_len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
if (sk->sk_state & PPPOX_BOUND) {
error = -EIO;
goto end;
}
skb = skb_recv_datagram(sk, flags, &error);
if (error < 0)
goto end;
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
error = skb_copy_datagram_msg(skb, 0, m, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
}
}
kfree_skb(skb);
end:
return error;
}
#ifdef CONFIG_PROC_FS
static int pppoe_seq_show(struct seq_file *seq, void *v)
{
struct pppox_sock *po;
char *dev_name;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "Id Address Device\n");
goto out;
}
po = v;
dev_name = po->pppoe_pa.dev;
seq_printf(seq, "%08X %pM %8s\n",
po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
out:
return 0;
}
static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
{
struct pppox_sock *po;
int i;
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
po = pn->hash_table[i];
while (po) {
if (!pos--)
goto out;
po = po->next;
}
}
out:
return po;
}
static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(pn->hash_lock)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
loff_t l = *pos;
read_lock_bh(&pn->hash_lock);
return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
}
static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
struct pppox_sock *po;
++*pos;
if (v == SEQ_START_TOKEN) {
po = pppoe_get_idx(pn, 0);
goto out;
}
po = v;
if (po->next)
po = po->next;
else {
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
po = NULL;
while (++hash < PPPOE_HASH_SIZE) {
po = pn->hash_table[hash];
if (po)
break;
}
}
out:
return po;
}
static void pppoe_seq_stop(struct seq_file *seq, void *v)
__releases(pn->hash_lock)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
read_unlock_bh(&pn->hash_lock);
}
static const struct seq_operations pppoe_seq_ops = {
.start = pppoe_seq_start,
.next = pppoe_seq_next,
.stop = pppoe_seq_stop,
.show = pppoe_seq_show,
};
#endif /* CONFIG_PROC_FS */
static const struct proto_ops pppoe_ops = {
.family = AF_PPPOX,
.owner = THIS_MODULE,
.release = pppoe_release,
.bind = sock_no_bind,
.connect = pppoe_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pppoe_getname,
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.sendmsg = pppoe_sendmsg,
.recvmsg = pppoe_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = pppox_compat_ioctl,
#endif
};
static const struct pppox_proto pppoe_proto = {
.create = pppoe_create,
.ioctl = pppoe_ioctl,
.owner = THIS_MODULE,
};
static __net_init int pppoe_init_net(struct net *net)
{
struct pppoe_net *pn = pppoe_pernet(net);
struct proc_dir_entry *pde;
rwlock_init(&pn->hash_lock);
pde = proc_create_net("pppoe", 0444, net->proc_net,
&pppoe_seq_ops, sizeof(struct seq_net_private));
#ifdef CONFIG_PROC_FS
if (!pde)
return -ENOMEM;
#endif
return 0;
}
static __net_exit void pppoe_exit_net(struct net *net)
{
remove_proc_entry("pppoe", net->proc_net);
}
static struct pernet_operations pppoe_net_ops = {
.init = pppoe_init_net,
.exit = pppoe_exit_net,
.id = &pppoe_net_id,
.size = sizeof(struct pppoe_net),
};
static int __init pppoe_init(void)
{
int err;
err = register_pernet_device(&pppoe_net_ops);
if (err)
goto out;
err = proto_register(&pppoe_sk_proto, 0);
if (err)
goto out_unregister_net_ops;
err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
if (err)
goto out_unregister_pppoe_proto;
dev_add_pack(&pppoes_ptype);
dev_add_pack(&pppoed_ptype);
register_netdevice_notifier(&pppoe_notifier);
return 0;
out_unregister_pppoe_proto:
proto_unregister(&pppoe_sk_proto);
out_unregister_net_ops:
unregister_pernet_device(&pppoe_net_ops);
out:
return err;
}
static void __exit pppoe_exit(void)
{
unregister_netdevice_notifier(&pppoe_notifier);
dev_remove_pack(&pppoed_ptype);
dev_remove_pack(&pppoes_ptype);
unregister_pppox_proto(PX_PROTO_OE);
proto_unregister(&pppoe_sk_proto);
unregister_pernet_device(&pppoe_net_ops);
}
module_init(pppoe_init);
module_exit(pppoe_exit);
MODULE_AUTHOR("Michal Ostrowski <[email protected]>");
MODULE_DESCRIPTION("PPP over Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OE);
| linux-master | drivers/net/ppp/pppoe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic PPP layer for Linux.
*
* Copyright 1999-2002 Paul Mackerras.
*
* The generic PPP layer handles the PPP network interfaces, the
* /dev/ppp device, packet and VJ compression, and multilink.
* It talks to PPP `channels' via the interface defined in
* include/linux/ppp_channel.h. Channels provide the basic means for
* sending and receiving PPP frames on some kind of communications
* channel.
*
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
* ==FILEVERSION 20041108==
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/ppp_defs.h>
#include <linux/filter.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/ppp-comp.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/stddef.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#define PPP_VERSION "2.4.2"
/*
* Network protocols we support.
*/
#define NP_IP 0 /* Internet Protocol V4 */
#define NP_IPV6 1 /* Internet Protocol V6 */
#define NP_IPX 2 /* IPX protocol */
#define NP_AT 3 /* Appletalk protocol */
#define NP_MPLS_UC 4 /* MPLS unicast */
#define NP_MPLS_MC 5 /* MPLS multicast */
#define NUM_NP 6 /* Number of NPs. */
#define MPHDRLEN 6 /* multilink protocol header length */
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
#define PPP_PROTO_LEN 2
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
* points to one of these.
*/
struct ppp_file {
enum {
INTERFACE=1, CHANNEL
} kind;
struct sk_buff_head xq; /* pppd transmit queue */
struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
refcount_t refcnt; /* # refs (incl /dev/ppp attached) */
int hdrlen; /* space to leave for headers */
int index; /* interface unit / channel number */
int dead; /* unit/channel has been shut down */
};
#define PF_TO_X(pf, X) container_of(pf, X, file)
#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
/*
* Data structure to hold primary network stats for which
* we want to use 64 bit storage. Other network stats
* are stored in dev->stats of the ppp strucute.
*/
struct ppp_link_stats {
u64 rx_packets;
u64 tx_packets;
u64 rx_bytes;
u64 tx_bytes;
};
/*
* Data structure describing one ppp unit.
* A ppp unit corresponds to a ppp network interface device
* and represents a multilink bundle.
* It can have 0 or more ppp channels connected to it.
*/
struct ppp {
struct ppp_file file; /* stuff for read/write/poll 0 */
struct file *owner; /* file that owns this unit 48 */
struct list_head channels; /* list of attached channels 4c */
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
int __percpu *xmit_recursion; /* xmit recursion detect */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
unsigned int rstate; /* receive state bits 6c */
int debug; /* debug flags 70 */
struct slcompress *vj; /* state for VJ header compression */
enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
struct compressor *xcomp; /* transmit packet compressor 8c */
void *xc_state; /* its internal state 90 */
struct compressor *rcomp; /* receive decompressor 94 */
void *rc_state; /* its internal state 98 */
unsigned long last_xmit; /* jiffies when last pkt sent 9c */
unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
struct net_device *dev; /* network interface device a4 */
int closing; /* is device closing down? a8 */
#ifdef CONFIG_PPP_MULTILINK
int nxchan; /* next channel to send something on */
u32 nxseq; /* next sequence number to send */
int mrru; /* MP: max reconst. receive unit */
u32 nextseq; /* MP: seq no of next packet */
u32 minseq; /* MP: min of most recent seqnos */
struct sk_buff_head mrq; /* MP: receive reconstruction queue */
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
struct bpf_prog *pass_filter; /* filter for packets to pass */
struct bpf_prog *active_filter; /* filter for pkts to reset idle */
#endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */
struct ppp_link_stats stats64; /* 64 bit network stats */
};
/*
* Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
* SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
* SC_MUST_COMP
* Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
* Bits in xstate: SC_COMP_RUN
*/
#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
|SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
|SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
/*
* Private data structure for each channel.
* This includes the data structure used for multilink.
*/
struct channel {
struct ppp_file file; /* stuff for read/write/poll */
struct list_head list; /* link in all/new_channels list */
struct ppp_channel *chan; /* public channel data structure */
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
struct ppp *ppp; /* ppp unit we're connected to */
struct net *chan_net; /* the net channel belongs to */
netns_tracker ns_tracker;
struct list_head clist; /* link in list of channels per unit */
rwlock_t upl; /* protects `ppp' and 'bridge' */
struct channel __rcu *bridge; /* "bridged" ppp channel */
#ifdef CONFIG_PPP_MULTILINK
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */
int speed; /* speed of the corresponding ppp channel*/
#endif /* CONFIG_PPP_MULTILINK */
};
struct ppp_config {
struct file *file;
s32 unit;
bool ifname_is_set;
};
/*
* SMP locking issues:
* Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
* list and the ppp.n_channels field, you need to take both locks
* before you modify them.
* The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
* channel.downl.
*/
static DEFINE_MUTEX(ppp_mutex);
static atomic_t ppp_unit_count = ATOMIC_INIT(0);
static atomic_t channel_count = ATOMIC_INIT(0);
/* per-net private data for this module */
static unsigned int ppp_net_id __read_mostly;
struct ppp_net {
/* units to ppp mapping */
struct idr units_idr;
/*
* all_ppp_mutex protects the units_idr mapping.
* It also ensures that finding a ppp unit in the units_idr
* map and updating its file.refcnt field is atomic.
*/
struct mutex all_ppp_mutex;
/* channels */
struct list_head all_channels;
struct list_head new_channels;
int last_channel_index;
/*
* all_channels_lock protects all_channels and
* last_channel_index, and the atomicity of find
* a channel and updating its file.refcnt field.
*/
spinlock_t all_channels_lock;
};
/* Get the PPP protocol number from a skb */
#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
/* We limit the length of ppp->file.rq to this (arbitrary) value */
#define PPP_MAX_RQLEN 32
/*
* Maximum number of multilink fragments queued up.
* This has to be large enough to cope with the maximum latency of
* the slowest channel relative to the others. Strictly it should
* depend on the number of channels and their characteristics.
*/
#define PPP_MP_MAX_QLEN 128
/* Multilink header bits. */
#define B 0x80 /* this fragment begins a packet */
#define E 0x40 /* this fragment ends a packet */
/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
#define seq_before(a, b) ((s32)((a) - (b)) < 0)
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
struct channel *pch);
static void ppp_receive_error(struct ppp *ppp);
static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
struct sk_buff *skb);
#ifdef CONFIG_PPP_MULTILINK
static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
struct channel *pch);
static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
#endif /* CONFIG_PPP_MULTILINK */
static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data);
static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
static int ppp_create_interface(struct net *net, struct file *file, int *unit);
static void init_ppp_file(struct ppp_file *pf, int kind);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
static int ppp_connect_channel(struct channel *pch, int unit);
static int ppp_disconnect_channel(struct channel *pch);
static void ppp_destroy_channel(struct channel *pch);
static int unit_get(struct idr *p, void *ptr, int min);
static int unit_set(struct idr *p, void *ptr, int n);
static void unit_put(struct idr *p, int n);
static void *unit_find(struct idr *p, int n);
static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
static struct class *ppp_class;
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
{
return net_generic(net, ppp_net_id);
}
/* Translates a PPP protocol number to a NP index (NP == network protocol) */
static inline int proto_to_npindex(int proto)
{
switch (proto) {
case PPP_IP:
return NP_IP;
case PPP_IPV6:
return NP_IPV6;
case PPP_IPX:
return NP_IPX;
case PPP_AT:
return NP_AT;
case PPP_MPLS_UC:
return NP_MPLS_UC;
case PPP_MPLS_MC:
return NP_MPLS_MC;
}
return -EINVAL;
}
/* Translates an NP index into a PPP protocol number */
static const int npindex_to_proto[NUM_NP] = {
PPP_IP,
PPP_IPV6,
PPP_IPX,
PPP_AT,
PPP_MPLS_UC,
PPP_MPLS_MC,
};
/* Translates an ethertype into an NP index */
static inline int ethertype_to_npindex(int ethertype)
{
switch (ethertype) {
case ETH_P_IP:
return NP_IP;
case ETH_P_IPV6:
return NP_IPV6;
case ETH_P_IPX:
return NP_IPX;
case ETH_P_PPPTALK:
case ETH_P_ATALK:
return NP_AT;
case ETH_P_MPLS_UC:
return NP_MPLS_UC;
case ETH_P_MPLS_MC:
return NP_MPLS_MC;
}
return -1;
}
/* Translates an NP index into an ethertype */
static const int npindex_to_ethertype[NUM_NP] = {
ETH_P_IP,
ETH_P_IPV6,
ETH_P_IPX,
ETH_P_PPPTALK,
ETH_P_MPLS_UC,
ETH_P_MPLS_MC,
};
/*
* Locking shorthand.
*/
#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
ppp_recv_lock(ppp); } while (0)
#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
ppp_xmit_unlock(ppp); } while (0)
/*
* /dev/ppp device routines.
* The /dev/ppp device is used by pppd to control the ppp unit.
* It supports the read, write, ioctl and poll functions.
* Open instances of /dev/ppp can be in one of three states:
* unattached, attached to a ppp unit, or attached to a ppp channel.
*/
static int ppp_open(struct inode *inode, struct file *file)
{
/*
* This could (should?) be enforced by the permissions on /dev/ppp.
*/
if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
return -EPERM;
return 0;
}
static int ppp_release(struct inode *unused, struct file *file)
{
struct ppp_file *pf = file->private_data;
struct ppp *ppp;
if (pf) {
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
if (file == ppp->owner)
unregister_netdevice(ppp->dev);
rtnl_unlock();
}
if (refcount_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
case INTERFACE:
ppp_destroy_interface(PF_TO_PPP(pf));
break;
case CHANNEL:
ppp_destroy_channel(PF_TO_CHANNEL(pf));
break;
}
}
}
return 0;
}
static ssize_t ppp_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct ppp_file *pf = file->private_data;
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
struct sk_buff *skb = NULL;
struct iovec iov;
struct iov_iter to;
ret = count;
if (!pf)
return -ENXIO;
add_wait_queue(&pf->rwait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
skb = skb_dequeue(&pf->rq);
if (skb)
break;
ret = 0;
if (pf->dead)
break;
if (pf->kind == INTERFACE) {
/*
* Return 0 (EOF) on an interface that has no
* channels connected, unless it is looping
* network traffic (demand mode).
*/
struct ppp *ppp = PF_TO_PPP(pf);
ppp_recv_lock(ppp);
if (ppp->n_channels == 0 &&
(ppp->flags & SC_LOOP_TRAFFIC) == 0) {
ppp_recv_unlock(ppp);
break;
}
ppp_recv_unlock(ppp);
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&pf->rwait, &wait);
if (!skb)
goto out;
ret = -EOVERFLOW;
if (skb->len > count)
goto outf;
ret = -EFAULT;
iov.iov_base = buf;
iov.iov_len = count;
iov_iter_init(&to, ITER_DEST, &iov, 1, count);
if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
goto outf;
ret = skb->len;
outf:
kfree_skb(skb);
out:
return ret;
}
static ssize_t ppp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ppp_file *pf = file->private_data;
struct sk_buff *skb;
ssize_t ret;
if (!pf)
return -ENXIO;
/* All PPP packets should start with the 2-byte protocol */
if (count < PPP_PROTO_LEN)
return -EINVAL;
ret = -ENOMEM;
skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
if (!skb)
goto out;
skb_reserve(skb, pf->hdrlen);
ret = -EFAULT;
if (copy_from_user(skb_put(skb, count), buf, count)) {
kfree_skb(skb);
goto out;
}
switch (pf->kind) {
case INTERFACE:
ppp_xmit_process(PF_TO_PPP(pf), skb);
break;
case CHANNEL:
skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
ret = count;
out:
return ret;
}
/* No kernel lock - fine */
static __poll_t ppp_poll(struct file *file, poll_table *wait)
{
struct ppp_file *pf = file->private_data;
__poll_t mask;
if (!pf)
return 0;
poll_wait(file, &pf->rwait, wait);
mask = EPOLLOUT | EPOLLWRNORM;
if (skb_peek(&pf->rq))
mask |= EPOLLIN | EPOLLRDNORM;
if (pf->dead)
mask |= EPOLLHUP;
else if (pf->kind == INTERFACE) {
/* see comment in ppp_read */
struct ppp *ppp = PF_TO_PPP(pf);
ppp_recv_lock(ppp);
if (ppp->n_channels == 0 &&
(ppp->flags & SC_LOOP_TRAFFIC) == 0)
mask |= EPOLLIN | EPOLLRDNORM;
ppp_recv_unlock(ppp);
}
return mask;
}
#ifdef CONFIG_PPP_FILTER
static struct bpf_prog *get_filter(struct sock_fprog *uprog)
{
struct sock_fprog_kern fprog;
struct bpf_prog *res = NULL;
int err;
if (!uprog->len)
return NULL;
/* uprog->len is unsigned short, so no overflow here */
fprog.len = uprog->len;
fprog.filter = memdup_user(uprog->filter,
uprog->len * sizeof(struct sock_filter));
if (IS_ERR(fprog.filter))
return ERR_CAST(fprog.filter);
err = bpf_prog_create(&res, &fprog);
kfree(fprog.filter);
return err ? ERR_PTR(err) : res;
}
static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p)
{
struct sock_fprog uprog;
if (copy_from_user(&uprog, p, sizeof(struct sock_fprog)))
return ERR_PTR(-EFAULT);
return get_filter(&uprog);
}
#ifdef CONFIG_COMPAT
struct sock_fprog32 {
unsigned short len;
compat_caddr_t filter;
};
#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
{
struct sock_fprog32 uprog32;
struct sock_fprog uprog;
if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32)))
return ERR_PTR(-EFAULT);
uprog.len = uprog32.len;
uprog.filter = compat_ptr(uprog32.filter);
return get_filter(&uprog);
}
#endif
#endif
/* Bridge one PPP channel to another.
* When two channels are bridged, ppp_input on one channel is redirected to
* the other's ops->start_xmit handler.
* In order to safely bridge channels we must reject channels which are already
* part of a bridge instance, or which form part of an existing unit.
* Once successfully bridged, each channel holds a reference on the other
* to prevent it being freed while the bridge is extant.
*/
static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
{
write_lock_bh(&pch->upl);
if (pch->ppp ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
write_unlock_bh(&pch->upl);
return -EALREADY;
}
refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
write_unlock_bh(&pch->upl);
write_lock_bh(&pchb->upl);
if (pchb->ppp ||
rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) {
write_unlock_bh(&pchb->upl);
goto err_unset;
}
refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
write_unlock_bh(&pchb->upl);
return 0;
err_unset:
write_lock_bh(&pch->upl);
/* Re-read pch->bridge with upl held in case it was modified concurrently */
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
synchronize_rcu();
if (pchb)
if (refcount_dec_and_test(&pchb->file.refcnt))
ppp_destroy_channel(pchb);
return -EALREADY;
}
static int ppp_unbridge_channels(struct channel *pch)
{
struct channel *pchb, *pchbb;
write_lock_bh(&pch->upl);
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
if (!pchb) {
write_unlock_bh(&pch->upl);
return -EINVAL;
}
RCU_INIT_POINTER(pch->bridge, NULL);
write_unlock_bh(&pch->upl);
/* Only modify pchb if phcb->bridge points back to pch.
* If not, it implies that there has been a race unbridging (and possibly
* even rebridging) pchb. We should leave pchb alone to avoid either a
* refcount underflow, or breaking another established bridge instance.
*/
write_lock_bh(&pchb->upl);
pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl));
if (pchbb == pch)
RCU_INIT_POINTER(pchb->bridge, NULL);
write_unlock_bh(&pchb->upl);
synchronize_rcu();
if (pchbb == pch)
if (refcount_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch);
if (refcount_dec_and_test(&pchb->file.refcnt))
ppp_destroy_channel(pchb);
return 0;
}
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
struct ppp_idle32 idle32;
struct ppp_idle64 idle64;
struct npioctl npi;
int unit, cflags;
struct slcompress *vj;
void __user *argp = (void __user *)arg;
int __user *p = argp;
mutex_lock(&ppp_mutex);
pf = file->private_data;
if (!pf) {
err = ppp_unattached_ioctl(current->nsproxy->net_ns,
pf, file, cmd, arg);
goto out;
}
if (cmd == PPPIOCDETACH) {
/*
* PPPIOCDETACH is no longer supported as it was heavily broken,
* and is only known to have been used by pppd older than
* ppp-2.4.2 (released November 2003).
*/
pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
current->comm, current->pid);
err = -EINVAL;
goto out;
}
if (pf->kind == CHANNEL) {
struct channel *pch, *pchb;
struct ppp_channel *chan;
struct ppp_net *pn;
pch = PF_TO_CHANNEL(pf);
switch (cmd) {
case PPPIOCCONNECT:
if (get_user(unit, p))
break;
err = ppp_connect_channel(pch, unit);
break;
case PPPIOCDISCONN:
err = ppp_disconnect_channel(pch);
break;
case PPPIOCBRIDGECHAN:
if (get_user(unit, p))
break;
err = -ENXIO;
pn = ppp_pernet(current->nsproxy->net_ns);
spin_lock_bh(&pn->all_channels_lock);
pchb = ppp_find_channel(pn, unit);
/* Hold a reference to prevent pchb being freed while
* we establish the bridge.
*/
if (pchb)
refcount_inc(&pchb->file.refcnt);
spin_unlock_bh(&pn->all_channels_lock);
if (!pchb)
break;
err = ppp_bridge_channels(pch, pchb);
/* Drop earlier refcount now bridge establishment is complete */
if (refcount_dec_and_test(&pchb->file.refcnt))
ppp_destroy_channel(pchb);
break;
case PPPIOCUNBRIDGECHAN:
err = ppp_unbridge_channels(pch);
break;
default:
down_read(&pch->chan_sem);
chan = pch->chan;
err = -ENOTTY;
if (chan && chan->ops->ioctl)
err = chan->ops->ioctl(chan, cmd, arg);
up_read(&pch->chan_sem);
}
goto out;
}
if (pf->kind != INTERFACE) {
/* can't happen */
pr_err("PPP: not interface or channel??\n");
err = -EINVAL;
goto out;
}
ppp = PF_TO_PPP(pf);
switch (cmd) {
case PPPIOCSMRU:
if (get_user(val, p))
break;
ppp->mru = val;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, p))
break;
ppp_lock(ppp);
cflags = ppp->flags & ~val;
#ifdef CONFIG_PPP_MULTILINK
if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
ppp->nextseq = 0;
#endif
ppp->flags = val & SC_FLAG_BITS;
ppp_unlock(ppp);
if (cflags & SC_CCP_OPEN)
ppp_ccp_closed(ppp);
err = 0;
break;
case PPPIOCGFLAGS:
val = ppp->flags | ppp->xstate | ppp->rstate;
if (put_user(val, p))
break;
err = 0;
break;
case PPPIOCSCOMPRESS:
{
struct ppp_option_data data;
if (copy_from_user(&data, argp, sizeof(data)))
err = -EFAULT;
else
err = ppp_set_compress(ppp, &data);
break;
}
case PPPIOCGUNIT:
if (put_user(ppp->file.index, p))
break;
err = 0;
break;
case PPPIOCSDEBUG:
if (get_user(val, p))
break;
ppp->debug = val;
err = 0;
break;
case PPPIOCGDEBUG:
if (put_user(ppp->debug, p))
break;
err = 0;
break;
case PPPIOCGIDLE32:
idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
idle32.recv_idle = (jiffies - ppp->last_recv) / HZ;
if (copy_to_user(argp, &idle32, sizeof(idle32)))
break;
err = 0;
break;
case PPPIOCGIDLE64:
idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
idle64.recv_idle = (jiffies - ppp->last_recv) / HZ;
if (copy_to_user(argp, &idle64, sizeof(idle64)))
break;
err = 0;
break;
case PPPIOCSMAXCID:
if (get_user(val, p))
break;
val2 = 15;
if ((val >> 16) != 0) {
val2 = val >> 16;
val &= 0xffff;
}
vj = slhc_init(val2+1, val+1);
if (IS_ERR(vj)) {
err = PTR_ERR(vj);
break;
}
ppp_lock(ppp);
if (ppp->vj)
slhc_free(ppp->vj);
ppp->vj = vj;
ppp_unlock(ppp);
err = 0;
break;
case PPPIOCGNPMODE:
case PPPIOCSNPMODE:
if (copy_from_user(&npi, argp, sizeof(npi)))
break;
err = proto_to_npindex(npi.protocol);
if (err < 0)
break;
i = err;
if (cmd == PPPIOCGNPMODE) {
err = -EFAULT;
npi.mode = ppp->npmode[i];
if (copy_to_user(argp, &npi, sizeof(npi)))
break;
} else {
ppp->npmode[i] = npi.mode;
/* we may be able to transmit more packets now (??) */
netif_wake_queue(ppp->dev);
}
err = 0;
break;
#ifdef CONFIG_PPP_FILTER
case PPPIOCSPASS:
case PPPIOCSACTIVE:
{
struct bpf_prog *filter = ppp_get_filter(argp);
struct bpf_prog **which;
if (IS_ERR(filter)) {
err = PTR_ERR(filter);
break;
}
if (cmd == PPPIOCSPASS)
which = &ppp->pass_filter;
else
which = &ppp->active_filter;
ppp_lock(ppp);
if (*which)
bpf_prog_destroy(*which);
*which = filter;
ppp_unlock(ppp);
err = 0;
break;
}
#endif /* CONFIG_PPP_FILTER */
#ifdef CONFIG_PPP_MULTILINK
case PPPIOCSMRRU:
if (get_user(val, p))
break;
ppp_recv_lock(ppp);
ppp->mrru = val;
ppp_recv_unlock(ppp);
err = 0;
break;
#endif /* CONFIG_PPP_MULTILINK */
default:
err = -ENOTTY;
}
out:
mutex_unlock(&ppp_mutex);
return err;
}
#ifdef CONFIG_COMPAT
struct ppp_option_data32 {
compat_uptr_t ptr;
u32 length;
compat_int_t transmit;
};
#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ppp_file *pf;
int err = -ENOIOCTLCMD;
void __user *argp = (void __user *)arg;
mutex_lock(&ppp_mutex);
pf = file->private_data;
if (pf && pf->kind == INTERFACE) {
struct ppp *ppp = PF_TO_PPP(pf);
switch (cmd) {
#ifdef CONFIG_PPP_FILTER
case PPPIOCSPASS32:
case PPPIOCSACTIVE32:
{
struct bpf_prog *filter = compat_ppp_get_filter(argp);
struct bpf_prog **which;
if (IS_ERR(filter)) {
err = PTR_ERR(filter);
break;
}
if (cmd == PPPIOCSPASS32)
which = &ppp->pass_filter;
else
which = &ppp->active_filter;
ppp_lock(ppp);
if (*which)
bpf_prog_destroy(*which);
*which = filter;
ppp_unlock(ppp);
err = 0;
break;
}
#endif /* CONFIG_PPP_FILTER */
case PPPIOCSCOMPRESS32:
{
struct ppp_option_data32 data32;
if (copy_from_user(&data32, argp, sizeof(data32))) {
err = -EFAULT;
} else {
struct ppp_option_data data = {
.ptr = compat_ptr(data32.ptr),
.length = data32.length,
.transmit = data32.transmit
};
err = ppp_set_compress(ppp, &data);
}
break;
}
}
}
mutex_unlock(&ppp_mutex);
/* all other commands have compatible arguments */
if (err == -ENOIOCTLCMD)
err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
return err;
}
#endif
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg)
{
int unit, err = -EFAULT;
struct ppp *ppp;
struct channel *chan;
struct ppp_net *pn;
int __user *p = (int __user *)arg;
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */
if (get_user(unit, p))
break;
err = ppp_create_interface(net, file, &unit);
if (err < 0)
break;
err = -EFAULT;
if (put_user(unit, p))
break;
err = 0;
break;
case PPPIOCATTACH:
/* Attach to an existing ppp unit */
if (get_user(unit, p))
break;
err = -ENXIO;
pn = ppp_pernet(net);
mutex_lock(&pn->all_ppp_mutex);
ppp = ppp_find_unit(pn, unit);
if (ppp) {
refcount_inc(&ppp->file.refcnt);
file->private_data = &ppp->file;
err = 0;
}
mutex_unlock(&pn->all_ppp_mutex);
break;
case PPPIOCATTCHAN:
if (get_user(unit, p))
break;
err = -ENXIO;
pn = ppp_pernet(net);
spin_lock_bh(&pn->all_channels_lock);
chan = ppp_find_channel(pn, unit);
if (chan) {
refcount_inc(&chan->file.refcnt);
file->private_data = &chan->file;
err = 0;
}
spin_unlock_bh(&pn->all_channels_lock);
break;
default:
err = -ENOTTY;
}
return err;
}
static const struct file_operations ppp_device_fops = {
.owner = THIS_MODULE,
.read = ppp_read,
.write = ppp_write,
.poll = ppp_poll,
.unlocked_ioctl = ppp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ppp_compat_ioctl,
#endif
.open = ppp_open,
.release = ppp_release,
.llseek = noop_llseek,
};
static __net_init int ppp_init_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
idr_init(&pn->units_idr);
mutex_init(&pn->all_ppp_mutex);
INIT_LIST_HEAD(&pn->all_channels);
INIT_LIST_HEAD(&pn->new_channels);
spin_lock_init(&pn->all_channels_lock);
return 0;
}
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
struct net_device *dev;
struct net_device *aux;
struct ppp *ppp;
LIST_HEAD(list);
int id;
rtnl_lock();
for_each_netdev_safe(net, dev, aux) {
if (dev->netdev_ops == &ppp_netdev_ops)
unregister_netdevice_queue(dev, &list);
}
idr_for_each_entry(&pn->units_idr, ppp, id)
/* Skip devices already unregistered by previous loop */
if (!net_eq(dev_net(ppp->dev), net))
unregister_netdevice_queue(ppp->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
WARN_ON_ONCE(!list_empty(&pn->all_channels));
WARN_ON_ONCE(!list_empty(&pn->new_channels));
}
static struct pernet_operations ppp_net_ops = {
.init = ppp_init_net,
.exit = ppp_exit_net,
.id = &ppp_net_id,
.size = sizeof(struct ppp_net),
};
static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
{
struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
int ret;
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
ret = unit_get(&pn->units_idr, ppp, 0);
if (ret < 0)
goto err;
if (!ifname_is_set) {
while (1) {
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name))
break;
unit_put(&pn->units_idr, ret);
ret = unit_get(&pn->units_idr, ppp, ret + 1);
if (ret < 0)
goto err;
}
}
} else {
/* Caller asked for a specific unit number. Fail with -EEXIST
* if unavailable. For backward compatibility, return -EEXIST
* too if idr allocation fails; this makes pppd retry without
* requesting a specific unit number.
*/
if (unit_find(&pn->units_idr, unit)) {
ret = -EEXIST;
goto err;
}
ret = unit_set(&pn->units_idr, ppp, unit);
if (ret < 0) {
/* Rewrite error for backward compatibility */
ret = -EEXIST;
goto err;
}
}
ppp->file.index = ret;
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
mutex_unlock(&pn->all_ppp_mutex);
ret = register_netdevice(ppp->dev);
if (ret < 0)
goto err_unit;
atomic_inc(&ppp_unit_count);
return 0;
err_unit:
mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
err:
mutex_unlock(&pn->all_ppp_mutex);
return ret;
}
static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
const struct ppp_config *conf)
{
struct ppp *ppp = netdev_priv(dev);
int indx;
int err;
int cpu;
ppp->dev = dev;
ppp->ppp_net = src_net;
ppp->mru = PPP_MRU;
ppp->owner = conf->file;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
for (indx = 0; indx < NUM_NP; ++indx)
ppp->npmode[indx] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
spin_lock_init(&ppp->rlock);
spin_lock_init(&ppp->wlock);
ppp->xmit_recursion = alloc_percpu(int);
if (!ppp->xmit_recursion) {
err = -ENOMEM;
goto err1;
}
for_each_possible_cpu(cpu)
(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
#ifdef CONFIG_PPP_MULTILINK
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
ppp->pass_filter = NULL;
ppp->active_filter = NULL;
#endif /* CONFIG_PPP_FILTER */
err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
if (err < 0)
goto err2;
conf->file->private_data = &ppp->file;
return 0;
err2:
free_percpu(ppp->xmit_recursion);
err1:
return err;
}
static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
[IFLA_PPP_DEV_FD] = { .type = NLA_S32 },
};
static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (!data)
return -EINVAL;
if (!data[IFLA_PPP_DEV_FD])
return -EINVAL;
if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
return -EBADF;
return 0;
}
static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ppp_config conf = {
.unit = -1,
.ifname_is_set = true,
};
struct file *file;
int err;
file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
if (!file)
return -EBADF;
/* rtnl_lock is already held here, but ppp_create_interface() locks
* ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
* possible deadlock due to lock order inversion, at the cost of
* pushing the problem back to userspace.
*/
if (!mutex_trylock(&ppp_mutex)) {
err = -EBUSY;
goto out;
}
if (file->f_op != &ppp_device_fops || file->private_data) {
err = -EBADF;
goto out_unlock;
}
conf.file = file;
/* Don't use device name generated by the rtnetlink layer when ifname
* isn't specified. Let ppp_dev_configure() set the device name using
* the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
* userspace to infer the device name using to the PPPIOCGUNIT ioctl.
*/
if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;
err = ppp_dev_configure(src_net, dev, &conf);
out_unlock:
mutex_unlock(&ppp_mutex);
out:
fput(file);
return err;
}
static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
{
unregister_netdevice_queue(dev, head);
}
static size_t ppp_nl_get_size(const struct net_device *dev)
{
return 0;
}
static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
return 0;
}
static struct net *ppp_nl_get_link_net(const struct net_device *dev)
{
struct ppp *ppp = netdev_priv(dev);
return ppp->ppp_net;
}
static struct rtnl_link_ops ppp_link_ops __read_mostly = {
.kind = "ppp",
.maxtype = IFLA_PPP_MAX,
.policy = ppp_nl_policy,
.priv_size = sizeof(struct ppp),
.setup = ppp_setup,
.validate = ppp_nl_validate,
.newlink = ppp_nl_newlink,
.dellink = ppp_nl_dellink,
.get_size = ppp_nl_get_size,
.fill_info = ppp_nl_fill_info,
.get_link_net = ppp_nl_get_link_net,
};
#define PPP_MAJOR 108
/* Called at boot time if ppp is compiled into the kernel,
or at module load time (from init_module) if compiled as a module. */
static int __init ppp_init(void)
{
int err;
pr_info("PPP generic driver version " PPP_VERSION "\n");
err = register_pernet_device(&ppp_net_ops);
if (err) {
pr_err("failed to register PPP pernet device (%d)\n", err);
goto out;
}
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (err) {
pr_err("failed to register PPP device (%d)\n", err);
goto out_net;
}
ppp_class = class_create("ppp");
if (IS_ERR(ppp_class)) {
err = PTR_ERR(ppp_class);
goto out_chrdev;
}
err = rtnl_link_register(&ppp_link_ops);
if (err) {
pr_err("failed to register rtnetlink PPP handler\n");
goto out_class;
}
/* not a big deal if we fail here :-) */
device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
out_class:
class_destroy(ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
unregister_pernet_device(&ppp_net_ops);
out:
return err;
}
/*
* Network interface unit routines.
*/
static netdev_tx_t
ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ppp *ppp = netdev_priv(dev);
int npi, proto;
unsigned char *pp;
npi = ethertype_to_npindex(ntohs(skb->protocol));
if (npi < 0)
goto outf;
/* Drop, accept or reject the packet */
switch (ppp->npmode[npi]) {
case NPMODE_PASS:
break;
case NPMODE_QUEUE:
/* it would be nice to have a way to tell the network
system to queue this one up for later. */
goto outf;
case NPMODE_DROP:
case NPMODE_ERROR:
goto outf;
}
/* Put the 2-byte PPP protocol number on the front,
making sure there is room for the address and control fields. */
if (skb_cow_head(skb, PPP_HDRLEN))
goto outf;
pp = skb_push(skb, 2);
proto = npindex_to_proto[npi];
put_unaligned_be16(proto, pp);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
ppp_xmit_process(ppp, skb);
return NETDEV_TX_OK;
outf:
kfree_skb(skb);
++dev->stats.tx_dropped;
return NETDEV_TX_OK;
}
static int
ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *addr, int cmd)
{
struct ppp *ppp = netdev_priv(dev);
int err = -EFAULT;
struct ppp_stats stats;
struct ppp_comp_stats cstats;
char *vers;
switch (cmd) {
case SIOCGPPPSTATS:
ppp_get_stats(ppp, &stats);
if (copy_to_user(addr, &stats, sizeof(stats)))
break;
err = 0;
break;
case SIOCGPPPCSTATS:
memset(&cstats, 0, sizeof(cstats));
if (ppp->xc_state)
ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
if (ppp->rc_state)
ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
if (copy_to_user(addr, &cstats, sizeof(cstats)))
break;
err = 0;
break;
case SIOCGPPPVER:
vers = PPP_VERSION;
if (copy_to_user(addr, vers, strlen(vers) + 1))
break;
err = 0;
break;
default:
err = -EINVAL;
}
return err;
}
static void
ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
{
struct ppp *ppp = netdev_priv(dev);
ppp_recv_lock(ppp);
stats64->rx_packets = ppp->stats64.rx_packets;
stats64->rx_bytes = ppp->stats64.rx_bytes;
ppp_recv_unlock(ppp);
ppp_xmit_lock(ppp);
stats64->tx_packets = ppp->stats64.tx_packets;
stats64->tx_bytes = ppp->stats64.tx_bytes;
ppp_xmit_unlock(ppp);
stats64->rx_errors = dev->stats.rx_errors;
stats64->tx_errors = dev->stats.tx_errors;
stats64->rx_dropped = dev->stats.rx_dropped;
stats64->tx_dropped = dev->stats.tx_dropped;
stats64->rx_length_errors = dev->stats.rx_length_errors;
}
static int ppp_dev_init(struct net_device *dev)
{
struct ppp *ppp;
netdev_lockdep_set_classes(dev);
ppp = netdev_priv(dev);
/* Let the netdevice take a reference on the ppp file. This ensures
* that ppp_destroy_interface() won't run before the device gets
* unregistered.
*/
refcount_inc(&ppp->file.refcnt);
return 0;
}
static void ppp_dev_uninit(struct net_device *dev)
{
struct ppp *ppp = netdev_priv(dev);
struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
ppp_lock(ppp);
ppp->closing = 1;
ppp_unlock(ppp);
mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
mutex_unlock(&pn->all_ppp_mutex);
ppp->owner = NULL;
ppp->file.dead = 1;
wake_up_interruptible(&ppp->file.rwait);
}
static void ppp_dev_priv_destructor(struct net_device *dev)
{
struct ppp *ppp;
ppp = netdev_priv(dev);
if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
}
static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
struct ppp *ppp = netdev_priv(ctx->dev);
struct ppp_channel *chan;
struct channel *pch;
if (ppp->flags & SC_MULTILINK)
return -EOPNOTSUPP;
if (list_empty(&ppp->channels))
return -ENODEV;
pch = list_first_entry(&ppp->channels, struct channel, clist);
chan = pch->chan;
if (!chan->ops->fill_forward_path)
return -EOPNOTSUPP;
return chan->ops->fill_forward_path(ctx, path, chan);
}
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_siocdevprivate = ppp_net_siocdevprivate,
.ndo_get_stats64 = ppp_get_stats64,
.ndo_fill_forward_path = ppp_fill_forward_path,
};
static struct device_type ppp_type = {
.name = "ppp",
};
static void ppp_setup(struct net_device *dev)
{
dev->netdev_ops = &ppp_netdev_ops;
SET_NETDEV_DEVTYPE(dev, &ppp_type);
dev->features |= NETIF_F_LLTX;
dev->hard_header_len = PPP_HDRLEN;
dev->mtu = PPP_MRU;
dev->addr_len = 0;
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev);
}
/*
* Transmit-side routines.
*/
/* Called to do any work queued up on the transmit side that can now be done */
static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
if (skb)
skb_queue_tail(&ppp->file.xq, skb);
while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
/* If there's no work left to do, tell the core net
code that we can accept some more. */
if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
netif_wake_queue(ppp->dev);
else
netif_stop_queue(ppp->dev);
} else {
kfree_skb(skb);
}
ppp_xmit_unlock(ppp);
}
static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
local_bh_disable();
if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
goto err;
(*this_cpu_ptr(ppp->xmit_recursion))++;
__ppp_xmit_process(ppp, skb);
(*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
return;
err:
local_bh_enable();
kfree_skb(skb);
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
static inline struct sk_buff *
pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *new_skb;
int len;
int new_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
int compressor_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + PPP_HDRLEN;
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
return NULL;
}
if (ppp->dev->hard_header_len > PPP_HDRLEN)
skb_reserve(new_skb,
ppp->dev->hard_header_len - PPP_HDRLEN);
/* compressor still expects A/C bytes in hdr */
len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
new_skb->data, skb->len + 2,
compressor_skb_size);
if (len > 0 && (ppp->flags & SC_CCP_UP)) {
consume_skb(skb);
skb = new_skb;
skb_put(skb, len);
skb_pull(skb, 2); /* pull off A/C bytes */
} else if (len == 0) {
/* didn't compress, or CCP not up yet */
consume_skb(new_skb);
new_skb = skb;
} else {
/*
* (len < 0)
* MPPE requires that we do not send unencrypted
* frames. The compressor will return -1 if we
* should drop the frame. We cannot simply test
* the compress_proto because MPPE and MPPC share
* the same number.
*/
if (net_ratelimit())
netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
kfree_skb(skb);
consume_skb(new_skb);
new_skb = NULL;
}
return new_skb;
}
/*
* Compress and send a frame.
* The caller should have locked the xmit path,
* and xmit_pending should be 0.
*/
static void
ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
{
int proto = PPP_PROTO(skb);
struct sk_buff *new_skb;
int len;
unsigned char *cp;
skb->dev = ppp->dev;
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
*(u8 *)skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: outbound frame "
"not passed\n");
kfree_skb(skb);
return;
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
bpf_prog_run(ppp->active_filter, skb) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
/* for data packets, record the time */
ppp->last_xmit = jiffies;
#endif /* CONFIG_PPP_FILTER */
}
++ppp->stats64.tx_packets;
ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN;
switch (proto) {
case PPP_IP:
if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
break;
/* try to do VJ TCP header compression */
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
if (!new_skb) {
netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
goto drop;
}
skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
cp = skb->data + 2;
len = slhc_compress(ppp->vj, cp, skb->len - 2,
new_skb->data + 2, &cp,
!(ppp->flags & SC_NO_TCP_CCID));
if (cp == skb->data + 2) {
/* didn't compress */
consume_skb(new_skb);
} else {
if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
proto = PPP_VJC_COMP;
cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
} else {
proto = PPP_VJC_UNCOMP;
cp[0] = skb->data[2];
}
consume_skb(skb);
skb = new_skb;
cp = skb_put(skb, len + 2);
cp[0] = 0;
cp[1] = proto;
}
break;
case PPP_CCP:
/* peek at outbound CCP frames */
ppp_ccp_peek(ppp, skb, 0);
break;
}
/* try to do packet compression */
if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit())
netdev_err(ppp->dev,
"ppp: compression required but "
"down - pkt dropped.\n");
goto drop;
}
skb = pad_compress_skb(ppp, skb);
if (!skb)
goto drop;
}
/*
* If we are waiting for traffic (demand dialling),
* queue it up for pppd to receive.
*/
if (ppp->flags & SC_LOOP_TRAFFIC) {
if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
goto drop;
skb_queue_tail(&ppp->file.rq, skb);
wake_up_interruptible(&ppp->file.rwait);
return;
}
ppp->xmit_pending = skb;
ppp_push(ppp);
return;
drop:
kfree_skb(skb);
++ppp->dev->stats.tx_errors;
}
/*
* Try to send the frame in xmit_pending.
* The caller should have the xmit path locked.
*/
static void
ppp_push(struct ppp *ppp)
{
struct list_head *list;
struct channel *pch;
struct sk_buff *skb = ppp->xmit_pending;
if (!skb)
return;
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
ppp->xmit_pending = NULL;
kfree_skb(skb);
return;
}
if ((ppp->flags & SC_MULTILINK) == 0) {
/* not doing multilink: send it down the first channel */
list = list->next;
pch = list_entry(list, struct channel, clist);
spin_lock(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
ppp->xmit_pending = NULL;
} else {
/* channel got unregistered */
kfree_skb(skb);
ppp->xmit_pending = NULL;
}
spin_unlock(&pch->downl);
return;
}
#ifdef CONFIG_PPP_MULTILINK
/* Multilink: fragment the packet over as many links
as can take the packet at the moment. */
if (!ppp_mp_explode(ppp, skb))
return;
#endif /* CONFIG_PPP_MULTILINK */
ppp->xmit_pending = NULL;
kfree_skb(skb);
}
#ifdef CONFIG_PPP_MULTILINK
static bool mp_protocol_compress __read_mostly = true;
module_param(mp_protocol_compress, bool, 0644);
MODULE_PARM_DESC(mp_protocol_compress,
"compress protocol id in multilink fragments");
/*
* Divide a packet to be transmitted into fragments and
* send them out the individual links.
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
int len, totlen;
int i, bits, hdrlen, mtu;
int flen;
int navail, nfree, nzero;
int nbigger;
int totspeed;
int totfree;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
struct sk_buff *frag;
struct ppp_channel *chan;
totspeed = 0; /*total bitrate of the bundle*/
nfree = 0; /* # channels which have no packet already queued */
navail = 0; /* total # of usable channels (not deregistered) */
nzero = 0; /* number of channels with zero speed associated*/
totfree = 0; /*total # of channels available and
*having no queued packets before
*starting the fragmentation*/
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) {
if (pch->chan) {
pch->avail = 1;
navail++;
pch->speed = pch->chan->speed;
} else {
pch->avail = 0;
}
if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) {
if (pch->speed == 0)
nzero++;
else
totspeed += pch->speed;
pch->avail = 2;
++nfree;
++totfree;
}
if (!pch->had_frag && i < ppp->nxchan)
ppp->nxchan = i;
}
++i;
}
/*
* Don't start sending this packet unless at least half of
* the channels are free. This gives much better TCP
* performance if we have a lot of channels.
*/
if (nfree == 0 || nfree < navail / 2)
return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression */
p = skb->data;
len = skb->len;
if (*p == 0 && mp_protocol_compress) {
++p;
--len;
}
totlen = len;
nbigger = len % nfree;
/* skip to the channel after the one we last used
and start at that one */
list = &ppp->channels;
for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
break;
}
}
/* create a fragment for each channel */
bits = B;
while (len > 0) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
continue;
}
pch = list_entry(list, struct channel, clist);
++i;
if (!pch->avail)
continue;
/*
* Skip this channel if it has a fragment pending already and
* we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
if (nfree > 0)
continue;
} else {
pch->avail = 1;
}
/* check the channel's mtu and whether it is still attached. */
spin_lock(&pch->downl);
if (pch->chan == NULL) {
/* can't use this channel, it's being deregistered */
if (pch->speed == 0)
nzero--;
else
totspeed -= pch->speed;
spin_unlock(&pch->downl);
pch->avail = 0;
totlen = len;
totfree--;
nfree--;
if (--navail == 0)
break;
continue;
}
/*
*if the channel speed is not set divide
*the packet evenly among the free channels;
*otherwise divide it according to the speed
*of the channel we are going to transmit on
*/
flen = len;
if (nfree > 0) {
if (pch->speed == 0) {
flen = len/nfree;
if (nbigger > 0) {
flen++;
nbigger--;
}
} else {
flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
((totspeed*totfree)/pch->speed)) - hdrlen;
if (nbigger > 0) {
flen += ((totfree - nzero)*pch->speed)/totspeed;
nbigger -= ((totfree - nzero)*pch->speed)/
totspeed;
}
}
nfree--;
}
/*
*check if we are on the last channel or
*we exceded the length of the data to
*fragment
*/
if ((nfree <= 0) || (flen > len))
flen = len;
/*
*it is not worth to tx on slow channels:
*in that case from the resulting flen according to the
*above formula will be equal or less than zero.
*Skip the channel in this case
*/
if (flen <= 0) {
pch->avail = 2;
spin_unlock(&pch->downl);
continue;
}
/*
* hdrlen includes the 2-byte PPP protocol field, but the
* MTU counts only the payload excluding the protocol field.
* (RFC1661 Section 2)
*/
mtu = pch->chan->mtu - (hdrlen - 2);
if (mtu < 4)
mtu = 4;
if (flen > mtu)
flen = mtu;
if (flen == len)
bits |= E;
frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (!frag)
goto noskb;
q = skb_put(frag, flen + hdrlen);
/* make the MP header */
put_unaligned_be16(PPP_MP, q);
if (ppp->flags & SC_MP_XSHORTSEQ) {
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
} else {
q[2] = bits;
q[3] = ppp->nxseq >> 16;
q[4] = ppp->nxseq >> 8;
q[5] = ppp->nxseq;
}
memcpy(q + hdrlen, p, flen);
/* try to send it down the channel */
chan = pch->chan;
if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
pch->had_frag = 1;
p += flen;
len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock(&pch->downl);
}
ppp->nxchan = i;
return 1;
noskb:
spin_unlock(&pch->downl);
if (ppp->debug & 1)
netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
}
#endif /* CONFIG_PPP_MULTILINK */
/* Try to send data out on a channel */
static void __ppp_channel_push(struct channel *pch)
{
struct sk_buff *skb;
struct ppp *ppp;
spin_lock(&pch->downl);
if (pch->chan) {
while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq);
if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
/* put the packet back and try again later */
skb_queue_head(&pch->file.xq, skb);
break;
}
}
} else {
/* channel got deregistered */
skb_queue_purge(&pch->file.xq);
}
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp;
if (ppp)
__ppp_xmit_process(ppp, NULL);
}
}
static void ppp_channel_push(struct channel *pch)
{
read_lock_bh(&pch->upl);
if (pch->ppp) {
(*this_cpu_ptr(pch->ppp->xmit_recursion))++;
__ppp_channel_push(pch);
(*this_cpu_ptr(pch->ppp->xmit_recursion))--;
} else {
__ppp_channel_push(pch);
}
read_unlock_bh(&pch->upl);
}
/*
* Receive-side routines.
*/
struct ppp_mp_skb_parm {
u32 sequence;
u8 BEbits;
};
#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
static inline void
ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
ppp_recv_lock(ppp);
if (!ppp->closing)
ppp_receive_frame(ppp, skb, pch);
else
kfree_skb(skb);
ppp_recv_unlock(ppp);
}
/**
* __ppp_decompress_proto - Decompress protocol field, slim version.
* @skb: Socket buffer where protocol field should be decompressed. It must have
* at least 1 byte of head room and 1 byte of linear data. First byte of
* data must be a protocol field byte.
*
* Decompress protocol field in PPP header if it's compressed, e.g. when
* Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
* length are done in this function.
*/
static void __ppp_decompress_proto(struct sk_buff *skb)
{
if (skb->data[0] & 0x01)
*(u8 *)skb_push(skb, 1) = 0x00;
}
/**
* ppp_decompress_proto - Check skb data room and decompress protocol field.
* @skb: Socket buffer where protocol field should be decompressed. First byte
* of data must be a protocol field byte.
*
* Decompress protocol field in PPP header if it's compressed, e.g. when
* Protocol-Field-Compression (PFC) was negotiated. This function also makes
* sure that skb data room is sufficient for Protocol field, before and after
* decompression.
*
* Return: true - decompressed successfully, false - not enough room in skb.
*/
static bool ppp_decompress_proto(struct sk_buff *skb)
{
/* At least one byte should be present (if protocol is compressed) */
if (!pskb_may_pull(skb, 1))
return false;
__ppp_decompress_proto(skb);
/* Protocol field should occupy 2 bytes when not compressed */
return pskb_may_pull(skb, 2);
}
/* Attempt to handle a frame via. a bridged channel, if one exists.
* If the channel is bridged, the frame is consumed by the bridge.
* If not, the caller must handle the frame by normal recv mechanisms.
* Returns true if the frame is consumed, false otherwise.
*/
static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
{
struct channel *pchb;
rcu_read_lock();
pchb = rcu_dereference(pch->bridge);
if (!pchb)
goto out_rcu;
spin_lock(&pchb->downl);
if (!pchb->chan) {
/* channel got unregistered */
kfree_skb(skb);
goto outl;
}
skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net));
if (!pchb->chan->ops->start_xmit(pchb->chan, skb))
kfree_skb(skb);
outl:
spin_unlock(&pchb->downl);
out_rcu:
rcu_read_unlock();
/* If pchb is set then we've consumed the packet */
return !!pchb;
}
void
ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
{
struct channel *pch = chan->ppp;
int proto;
if (!pch) {
kfree_skb(skb);
return;
}
/* If the channel is bridged, transmit via. bridge */
if (ppp_channel_bridge_input(pch, skb))
return;
read_lock_bh(&pch->upl);
if (!ppp_decompress_proto(skb)) {
kfree_skb(skb);
if (pch->ppp) {
++pch->ppp->dev->stats.rx_length_errors;
ppp_receive_error(pch->ppp);
}
goto done;
}
proto = PPP_PROTO(skb);
if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb);
/* drop old frames if queue too long */
while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
(skb = skb_dequeue(&pch->file.rq)))
kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait);
} else {
ppp_do_recv(pch->ppp, skb, pch);
}
done:
read_unlock_bh(&pch->upl);
}
/* Put a 0-length skb in the receive queue as an error indication */
void
ppp_input_error(struct ppp_channel *chan, int code)
{
struct channel *pch = chan->ppp;
struct sk_buff *skb;
if (!pch)
return;
read_lock_bh(&pch->upl);
if (pch->ppp) {
skb = alloc_skb(0, GFP_ATOMIC);
if (skb) {
skb->len = 0; /* probably unnecessary */
skb->cb[0] = code;
ppp_do_recv(pch->ppp, skb, pch);
}
}
read_unlock_bh(&pch->upl);
}
/*
* We come in here to process a received frame.
* The receive side of the ppp unit is locked.
*/
static void
ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
/* note: a 0-length skb is used as an error indication */
if (skb->len > 0) {
skb_checksum_complete_unset(skb);
#ifdef CONFIG_PPP_MULTILINK
/* XXX do channel-level decompression here */
if (PPP_PROTO(skb) == PPP_MP)
ppp_receive_mp_frame(ppp, skb, pch);
else
#endif /* CONFIG_PPP_MULTILINK */
ppp_receive_nonmp_frame(ppp, skb);
} else {
kfree_skb(skb);
ppp_receive_error(ppp);
}
}
static void
ppp_receive_error(struct ppp *ppp)
{
++ppp->dev->stats.rx_errors;
if (ppp->vj)
slhc_toss(ppp->vj);
}
static void
ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *ns;
int proto, len, npi;
/*
* Decompress the frame, if compressed.
* Note that some decompressors need to see uncompressed frames
* that come in as well as compressed frames.
*/
if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
(ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
skb = ppp_decompress_frame(ppp, skb);
if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
goto err;
/* At this point the "Protocol" field MUST be decompressed, either in
* ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
*/
proto = PPP_PROTO(skb);
switch (proto) {
case PPP_VJC_COMP:
/* decompress VJ compressed packets */
if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
goto err;
if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
/* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128);
if (!ns) {
netdev_err(ppp->dev, "PPP: no memory "
"(VJ decomp)\n");
goto err;
}
skb_reserve(ns, 2);
skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
consume_skb(skb);
skb = ns;
}
else
skb->ip_summed = CHECKSUM_NONE;
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: VJ decompression error\n");
goto err;
}
len += 2;
if (len > skb->len)
skb_put(skb, len - skb->len);
else if (len < skb->len)
skb_trim(skb, len);
proto = PPP_IP;
break;
case PPP_VJC_UNCOMP:
if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
goto err;
/* Until we fix the decompressor need to make sure
* data portion is linear.
*/
if (!pskb_may_pull(skb, skb->len))
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
goto err;
}
proto = PPP_IP;
break;
case PPP_CCP:
ppp_ccp_peek(ppp, skb, 1);
break;
}
++ppp->stats64.rx_packets;
ppp->stats64.rx_bytes += skb->len - 2;
npi = proto_to_npindex(proto);
if (npi < 0) {
/* control or unknown frame - pass it to pppd */
skb_queue_tail(&ppp->file.rq, skb);
/* limit queue length by dropping old frames */
while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
(skb = skb_dequeue(&ppp->file.rq)))
kfree_skb(skb);
/* wake up any process polling or blocking on read */
wake_up_interruptible(&ppp->file.rwait);
} else {
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
/* check if the packet passes the pass and active filters */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
*(u8 *)skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: inbound frame "
"not passed\n");
kfree_skb(skb);
return;
}
if (!(ppp->active_filter &&
bpf_prog_run(ppp->active_filter, skb) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
#endif /* CONFIG_PPP_FILTER */
ppp->last_recv = jiffies;
if ((ppp->dev->flags & IFF_UP) == 0 ||
ppp->npmode[npi] != NPMODE_PASS) {
kfree_skb(skb);
} else {
/* chop off protocol */
skb_pull_rcsum(skb, 2);
skb->dev = ppp->dev;
skb->protocol = htons(npindex_to_ethertype[npi]);
skb_reset_mac_header(skb);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
dev_net(ppp->dev)));
netif_rx(skb);
}
}
return;
err:
kfree_skb(skb);
ppp_receive_error(ppp);
}
static struct sk_buff *
ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
{
int proto = PPP_PROTO(skb);
struct sk_buff *ns;
int len;
/* Until we fix all the decompressor's need to make sure
* data portion is linear.
*/
if (!pskb_may_pull(skb, skb->len))
goto err;
if (proto == PPP_COMP) {
int obuff_size;
switch(ppp->rcomp->compress_proto) {
case CI_MPPE:
obuff_size = ppp->mru + PPP_HDRLEN + 1;
break;
default:
obuff_size = ppp->mru + PPP_HDRLEN;
break;
}
ns = dev_alloc_skb(obuff_size);
if (!ns) {
netdev_err(ppp->dev, "ppp_decompress_frame: "
"no memory\n");
goto err;
}
/* the decompressor still expects the A/C bytes in the hdr */
len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
skb->len + 2, ns->data, obuff_size);
if (len < 0) {
/* Pass the compressed frame to pppd as an
error indication. */
if (len == DECOMP_FATALERROR)
ppp->rstate |= SC_DC_FERROR;
kfree_skb(ns);
goto err;
}
consume_skb(skb);
skb = ns;
skb_put(skb, len);
skb_pull(skb, 2); /* pull off the A/C bytes */
/* Don't call __ppp_decompress_proto() here, but instead rely on
* corresponding algo (mppe/bsd/deflate) to decompress it.
*/
} else {
/* Uncompressed frame - pass to decompressor so it
can update its dictionary if necessary. */
if (ppp->rcomp->incomp)
ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
skb->len + 2);
}
return skb;
err:
ppp->rstate |= SC_DC_ERROR;
ppp_receive_error(ppp);
return skb;
}
#ifdef CONFIG_PPP_MULTILINK
/*
* Receive a multilink frame.
* We put it on the reconstruction queue and then pull off
* as many completed frames as we can.
*/
static void
ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
u32 mask, seq;
struct channel *ch;
int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
goto err; /* no good, throw it away */
/* Decode sequence number and begin/end bits */
if (ppp->flags & SC_MP_SHORTSEQ) {
seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
mask = 0xfff;
} else {
seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
mask = 0xffffff;
}
PPP_MP_CB(skb)->BEbits = skb->data[2];
skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
/*
* Do protocol ID decompression on the first fragment of each packet.
* We have to do that here, because ppp_receive_nonmp_frame() expects
* decompressed protocol field.
*/
if (PPP_MP_CB(skb)->BEbits & B)
__ppp_decompress_proto(skb);
/*
* Expand sequence number to 32 bits, making it as close
* as possible to ppp->minseq.
*/
seq |= ppp->minseq & ~mask;
if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
seq += mask + 1;
else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
seq -= mask + 1; /* should never happen */
PPP_MP_CB(skb)->sequence = seq;
pch->lastseq = seq;
/*
* If this packet comes before the next one we were expecting,
* drop it.
*/
if (seq_before(seq, ppp->nextseq)) {
kfree_skb(skb);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
return;
}
/*
* Reevaluate minseq, the minimum over all channels of the
* last sequence number received on each channel. Because of
* the increasing sequence number rule, we know that any fragment
* before `minseq' which hasn't arrived is never going to arrive.
* The list of channels can't change because we have the receive
* side of the ppp unit locked.
*/
list_for_each_entry(ch, &ppp->channels, clist) {
if (seq_before(ch->lastseq, seq))
seq = ch->lastseq;
}
if (seq_before(ppp->minseq, seq))
ppp->minseq = seq;
/* Put the fragment on the reconstruction queue */
ppp_mp_insert(ppp, skb);
/* If the queue is getting long, don't wait any longer for packets
before the start of the queue. */
if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
struct sk_buff *mskb = skb_peek(&ppp->mrq);
if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
ppp->minseq = PPP_MP_CB(mskb)->sequence;
}
/* Pull completed packets off the queue and receive them. */
while ((skb = ppp_mp_reconstruct(ppp))) {
if (pskb_may_pull(skb, 2))
ppp_receive_nonmp_frame(ppp, skb);
else {
++ppp->dev->stats.rx_length_errors;
kfree_skb(skb);
ppp_receive_error(ppp);
}
}
return;
err:
kfree_skb(skb);
ppp_receive_error(ppp);
}
/*
* Insert a fragment on the MP reconstruction queue.
* The queue is ordered by increasing sequence number.
*/
static void
ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *p;
struct sk_buff_head *list = &ppp->mrq;
u32 seq = PPP_MP_CB(skb)->sequence;
/* N.B. we don't need to lock the list lock because we have the
ppp unit receive-side lock. */
skb_queue_walk(list, p) {
if (seq_before(seq, PPP_MP_CB(p)->sequence))
break;
}
__skb_queue_before(list, p, skb);
}
/*
* Reconstruct a packet from the MP fragment queue.
* We go through increasing sequence numbers until we find a
* complete packet, or we get to the sequence number for a fragment
* which hasn't arrived but might still do so.
*/
static struct sk_buff *
ppp_mp_reconstruct(struct ppp *ppp)
{
u32 seq = ppp->nextseq;
u32 minseq = ppp->minseq;
struct sk_buff_head *list = &ppp->mrq;
struct sk_buff *p, *tmp;
struct sk_buff *head, *tail;
struct sk_buff *skb = NULL;
int lost = 0, len = 0;
if (ppp->mrru == 0) /* do nothing until mrru is set */
return NULL;
head = __skb_peek(list);
tail = NULL;
skb_queue_walk_safe(list, p, tmp) {
again:
if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
"seq %u < %u\n",
PPP_MP_CB(p)->sequence, seq);
__skb_unlink(p, list);
kfree_skb(p);
continue;
}
if (PPP_MP_CB(p)->sequence != seq) {
u32 oldseq;
/* Fragment `seq' is missing. If it is after
minseq, it might arrive later, so stop here. */
if (seq_after(seq, minseq))
break;
/* Fragment `seq' is lost, keep going. */
lost = 1;
oldseq = seq;
seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
minseq + 1: PPP_MP_CB(p)->sequence;
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"lost frag %u..%u\n",
oldseq, seq-1);
goto again;
}
/*
* At this point we know that all the fragments from
* ppp->nextseq to seq are either present or lost.
* Also, there are no complete packets in the queue
* that have no missing fragments and end before this
* fragment.
*/
/* B bit set indicates this fragment starts a packet */
if (PPP_MP_CB(p)->BEbits & B) {
head = p;
lost = 0;
len = 0;
}
len += p->len;
/* Got a complete packet yet? */
if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
(PPP_MP_CB(head)->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: reconstructed packet"
" is too long (%d)\n", len);
} else {
tail = p;
break;
}
ppp->nextseq = seq + 1;
}
/*
* If this is the ending fragment of a packet,
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
if (PPP_MP_CB(p)->BEbits & E) {
struct sk_buff *tmp2;
skb_queue_reverse_walk_from_safe(list, p, tmp2) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"discarding frag %u\n",
PPP_MP_CB(p)->sequence);
__skb_unlink(p, list);
kfree_skb(p);
}
head = skb_peek(list);
if (!head)
break;
}
++seq;
}
/* If we have a complete packet, copy it all into one skb. */
if (tail != NULL) {
/* If we have discarded any fragments,
signal a receive error. */
if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
skb_queue_walk_safe(list, p, tmp) {
if (p == head)
break;
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"discarding frag %u\n",
PPP_MP_CB(p)->sequence);
__skb_unlink(p, list);
kfree_skb(p);
}
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
" missed pkts %u..%u\n",
ppp->nextseq,
PPP_MP_CB(head)->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
skb = head;
if (head != tail) {
struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
p = skb_queue_next(list, head);
__skb_unlink(skb, list);
skb_queue_walk_from_safe(list, p, tmp) {
__skb_unlink(p, list);
*fragpp = p;
p->next = NULL;
fragpp = &p->next;
skb->len += p->len;
skb->data_len += p->len;
skb->truesize += p->truesize;
if (p == tail)
break;
}
} else {
__skb_unlink(skb, list);
}
ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
}
return skb;
}
#endif /* CONFIG_PPP_MULTILINK */
/*
* Channel interface.
*/
/* Create a new, unattached ppp channel. */
int ppp_register_channel(struct ppp_channel *chan)
{
return ppp_register_net_channel(current->nsproxy->net_ns, chan);
}
/* Create a new, unattached ppp channel for specified net. */
int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
{
struct channel *pch;
struct ppp_net *pn;
pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (!pch)
return -ENOMEM;
pn = ppp_pernet(net);
pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
#ifdef CONFIG_PPP_MULTILINK
pch->lastseq = -1;
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
rwlock_init(&pch->upl);
spin_lock_bh(&pn->all_channels_lock);
pch->file.index = ++pn->last_channel_index;
list_add(&pch->list, &pn->new_channels);
atomic_inc(&channel_count);
spin_unlock_bh(&pn->all_channels_lock);
return 0;
}
/*
* Return the index of a channel.
*/
int ppp_channel_index(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
if (pch)
return pch->file.index;
return -1;
}
/*
* Return the PPP unit number to which a channel is connected.
*/
int ppp_unit_number(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
int unit = -1;
if (pch) {
read_lock_bh(&pch->upl);
if (pch->ppp)
unit = pch->ppp->file.index;
read_unlock_bh(&pch->upl);
}
return unit;
}
/*
* Return the PPP device interface name of a channel.
*/
char *ppp_dev_name(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
char *name = NULL;
if (pch) {
read_lock_bh(&pch->upl);
if (pch->ppp && pch->ppp->dev)
name = pch->ppp->dev->name;
read_unlock_bh(&pch->upl);
}
return name;
}
/*
* Disconnect a channel from the generic layer.
* This must be called in process context.
*/
void
ppp_unregister_channel(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
struct ppp_net *pn;
if (!pch)
return; /* should never happen */
chan->ppp = NULL;
/*
* This ensures that we have returned from any calls into
* the channel's start_xmit or ioctl routine before we proceed.
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
pch->chan = NULL;
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
pn = ppp_pernet(pch->chan_net);
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
ppp_unbridge_channels(pch);
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
if (refcount_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch);
}
/*
* Callback from a channel when it can accept more to transmit.
* This should be called at BH/softirq level, not interrupt level.
*/
void
ppp_output_wakeup(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
if (!pch)
return;
ppp_channel_push(pch);
}
/*
* Compression control.
*/
/* Process the PPPIOCSCOMPRESS ioctl. */
static int
ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data)
{
int err = -EFAULT;
struct compressor *cp, *ocomp;
void *state, *ostate;
unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
if (data->length > CCP_MAX_OPTION_LENGTH)
goto out;
if (copy_from_user(ccp_option, data->ptr, data->length))
goto out;
err = -EINVAL;
if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length)
goto out;
cp = try_then_request_module(
find_compressor(ccp_option[0]),
"ppp-compress-%d", ccp_option[0]);
if (!cp)
goto out;
err = -ENOBUFS;
if (data->transmit) {
state = cp->comp_alloc(ccp_option, data->length);
if (state) {
ppp_xmit_lock(ppp);
ppp->xstate &= ~SC_COMP_RUN;
ocomp = ppp->xcomp;
ostate = ppp->xc_state;
ppp->xcomp = cp;
ppp->xc_state = state;
ppp_xmit_unlock(ppp);
if (ostate) {
ocomp->comp_free(ostate);
module_put(ocomp->owner);
}
err = 0;
} else
module_put(cp->owner);
} else {
state = cp->decomp_alloc(ccp_option, data->length);
if (state) {
ppp_recv_lock(ppp);
ppp->rstate &= ~SC_DECOMP_RUN;
ocomp = ppp->rcomp;
ostate = ppp->rc_state;
ppp->rcomp = cp;
ppp->rc_state = state;
ppp_recv_unlock(ppp);
if (ostate) {
ocomp->decomp_free(ostate);
module_put(ocomp->owner);
}
err = 0;
} else
module_put(cp->owner);
}
out:
return err;
}
/*
* Look at a CCP packet and update our state accordingly.
* We assume the caller has the xmit or recv path locked.
*/
static void
ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
{
unsigned char *dp;
int len;
if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
return; /* no header */
dp = skb->data + 2;
switch (CCP_CODE(dp)) {
case CCP_CONFREQ:
/* A ConfReq starts negotiation of compression
* in one direction of transmission,
* and hence brings it down...but which way?
*
* Remember:
* A ConfReq indicates what the sender would like to receive
*/
if(inbound)
/* He is proposing what I should send */
ppp->xstate &= ~SC_COMP_RUN;
else
/* I am proposing to what he should send */
ppp->rstate &= ~SC_DECOMP_RUN;
break;
case CCP_TERMREQ:
case CCP_TERMACK:
/*
* CCP is going down, both directions of transmission
*/
ppp->rstate &= ~SC_DECOMP_RUN;
ppp->xstate &= ~SC_COMP_RUN;
break;
case CCP_CONFACK:
if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
break;
len = CCP_LENGTH(dp);
if (!pskb_may_pull(skb, len + 2))
return; /* too short */
dp += CCP_HDRLEN;
len -= CCP_HDRLEN;
if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
break;
if (inbound) {
/* we will start receiving compressed packets */
if (!ppp->rc_state)
break;
if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
ppp->file.index, 0, ppp->mru, ppp->debug)) {
ppp->rstate |= SC_DECOMP_RUN;
ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
}
} else {
/* we will soon start sending compressed packets */
if (!ppp->xc_state)
break;
if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
ppp->file.index, 0, ppp->debug))
ppp->xstate |= SC_COMP_RUN;
}
break;
case CCP_RESETACK:
/* reset the [de]compressor */
if ((ppp->flags & SC_CCP_UP) == 0)
break;
if (inbound) {
if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
ppp->rcomp->decomp_reset(ppp->rc_state);
ppp->rstate &= ~SC_DC_ERROR;
}
} else {
if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
ppp->xcomp->comp_reset(ppp->xc_state);
}
break;
}
}
/* Free up compression resources. */
static void
ppp_ccp_closed(struct ppp *ppp)
{
void *xstate, *rstate;
struct compressor *xcomp, *rcomp;
ppp_lock(ppp);
ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
ppp->xstate = 0;
xcomp = ppp->xcomp;
xstate = ppp->xc_state;
ppp->xc_state = NULL;
ppp->rstate = 0;
rcomp = ppp->rcomp;
rstate = ppp->rc_state;
ppp->rc_state = NULL;
ppp_unlock(ppp);
if (xstate) {
xcomp->comp_free(xstate);
module_put(xcomp->owner);
}
if (rstate) {
rcomp->decomp_free(rstate);
module_put(rcomp->owner);
}
}
/* List of compressors. */
static LIST_HEAD(compressor_list);
static DEFINE_SPINLOCK(compressor_list_lock);
struct compressor_entry {
struct list_head list;
struct compressor *comp;
};
static struct compressor_entry *
find_comp_entry(int proto)
{
struct compressor_entry *ce;
list_for_each_entry(ce, &compressor_list, list) {
if (ce->comp->compress_proto == proto)
return ce;
}
return NULL;
}
/* Register a compressor */
int
ppp_register_compressor(struct compressor *cp)
{
struct compressor_entry *ce;
int ret;
spin_lock(&compressor_list_lock);
ret = -EEXIST;
if (find_comp_entry(cp->compress_proto))
goto out;
ret = -ENOMEM;
ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
if (!ce)
goto out;
ret = 0;
ce->comp = cp;
list_add(&ce->list, &compressor_list);
out:
spin_unlock(&compressor_list_lock);
return ret;
}
/* Unregister a compressor */
void
ppp_unregister_compressor(struct compressor *cp)
{
struct compressor_entry *ce;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(cp->compress_proto);
if (ce && ce->comp == cp) {
list_del(&ce->list);
kfree(ce);
}
spin_unlock(&compressor_list_lock);
}
/* Find a compressor. */
static struct compressor *
find_compressor(int type)
{
struct compressor_entry *ce;
struct compressor *cp = NULL;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(type);
if (ce) {
cp = ce->comp;
if (!try_module_get(cp->owner))
cp = NULL;
}
spin_unlock(&compressor_list_lock);
return cp;
}
/*
* Miscelleneous stuff.
*/
static void
ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
{
struct slcompress *vj = ppp->vj;
memset(st, 0, sizeof(*st));
st->p.ppp_ipackets = ppp->stats64.rx_packets;
st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
st->p.ppp_ibytes = ppp->stats64.rx_bytes;
st->p.ppp_opackets = ppp->stats64.tx_packets;
st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
st->p.ppp_obytes = ppp->stats64.tx_bytes;
if (!vj)
return;
st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
st->vj.vjs_compressed = vj->sls_o_compressed;
st->vj.vjs_searches = vj->sls_o_searches;
st->vj.vjs_misses = vj->sls_o_misses;
st->vj.vjs_errorin = vj->sls_i_error;
st->vj.vjs_tossed = vj->sls_i_tossed;
st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
st->vj.vjs_compressedin = vj->sls_i_compressed;
}
/*
* Stuff for handling the lists of ppp units and channels
* and for initialization.
*/
/*
* Create a new ppp interface unit. Fails if it can't allocate memory
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
static int ppp_create_interface(struct net *net, struct file *file, int *unit)
{
struct ppp_config conf = {
.file = file,
.unit = *unit,
.ifname_is_set = false,
};
struct net_device *dev;
struct ppp *ppp;
int err;
dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
if (!dev) {
err = -ENOMEM;
goto err;
}
dev_net_set(dev, net);
dev->rtnl_link_ops = &ppp_link_ops;
rtnl_lock();
err = ppp_dev_configure(net, dev, &conf);
if (err < 0)
goto err_dev;
ppp = netdev_priv(dev);
*unit = ppp->file.index;
rtnl_unlock();
return 0;
err_dev:
rtnl_unlock();
free_netdev(dev);
err:
return err;
}
/*
* Initialize a ppp_file structure.
*/
static void
init_ppp_file(struct ppp_file *pf, int kind)
{
pf->kind = kind;
skb_queue_head_init(&pf->xq);
skb_queue_head_init(&pf->rq);
refcount_set(&pf->refcnt, 1);
init_waitqueue_head(&pf->rwait);
}
/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
* that reference the unit.
*/
static void ppp_destroy_interface(struct ppp *ppp)
{
atomic_dec(&ppp_unit_count);
if (!ppp->file.dead || ppp->n_channels) {
/* "can't happen" */
netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
"but dead=%d n_channels=%d !\n",
ppp, ppp->file.dead, ppp->n_channels);
return;
}
ppp_ccp_closed(ppp);
if (ppp->vj) {
slhc_free(ppp->vj);
ppp->vj = NULL;
}
skb_queue_purge(&ppp->file.xq);
skb_queue_purge(&ppp->file.rq);
#ifdef CONFIG_PPP_MULTILINK
skb_queue_purge(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
if (ppp->pass_filter) {
bpf_prog_destroy(ppp->pass_filter);
ppp->pass_filter = NULL;
}
if (ppp->active_filter) {
bpf_prog_destroy(ppp->active_filter);
ppp->active_filter = NULL;
}
#endif /* CONFIG_PPP_FILTER */
kfree_skb(ppp->xmit_pending);
free_percpu(ppp->xmit_recursion);
free_netdev(ppp->dev);
}
/*
* Locate an existing ppp unit.
* The caller should have locked the all_ppp_mutex.
*/
static struct ppp *
ppp_find_unit(struct ppp_net *pn, int unit)
{
return unit_find(&pn->units_idr, unit);
}
/*
* Locate an existing ppp channel.
* The caller should have locked the all_channels_lock.
* First we look in the new_channels list, then in the
* all_channels list. If found in the new_channels list,
* we move it to the all_channels list. This is for speed
* when we have a lot of channels in use.
*/
static struct channel *
ppp_find_channel(struct ppp_net *pn, int unit)
{
struct channel *pch;
list_for_each_entry(pch, &pn->new_channels, list) {
if (pch->file.index == unit) {
list_move(&pch->list, &pn->all_channels);
return pch;
}
}
list_for_each_entry(pch, &pn->all_channels, list) {
if (pch->file.index == unit)
return pch;
}
return NULL;
}
/*
* Connect a PPP channel to a PPP interface unit.
*/
static int
ppp_connect_channel(struct channel *pch, int unit)
{
struct ppp *ppp;
struct ppp_net *pn;
int ret = -ENXIO;
int hdrlen;
pn = ppp_pernet(pch->chan_net);
mutex_lock(&pn->all_ppp_mutex);
ppp = ppp_find_unit(pn, unit);
if (!ppp)
goto out;
write_lock_bh(&pch->upl);
ret = -EINVAL;
if (pch->ppp ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
goto outl;
ppp_lock(ppp);
spin_lock_bh(&pch->downl);
if (!pch->chan) {
/* Don't connect unregistered channels */
spin_unlock_bh(&pch->downl);
ppp_unlock(ppp);
ret = -ENOTCONN;
goto outl;
}
spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
if (hdrlen > ppp->dev->hard_header_len)
ppp->dev->hard_header_len = hdrlen;
list_add_tail(&pch->clist, &ppp->channels);
++ppp->n_channels;
pch->ppp = ppp;
refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
outl:
write_unlock_bh(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
return ret;
}
/*
* Disconnect a channel from its ppp unit.
*/
static int
ppp_disconnect_channel(struct channel *pch)
{
struct ppp *ppp;
int err = -EINVAL;
write_lock_bh(&pch->upl);
ppp = pch->ppp;
pch->ppp = NULL;
write_unlock_bh(&pch->upl);
if (ppp) {
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
err = 0;
}
return err;
}
/*
* Free up the resources used by a ppp channel.
*/
static void ppp_destroy_channel(struct channel *pch)
{
put_net_track(pch->chan_net, &pch->ns_tracker);
pch->chan_net = NULL;
atomic_dec(&channel_count);
if (!pch->file.dead) {
/* "can't happen" */
pr_err("ppp: destroying undead channel %p !\n", pch);
return;
}
skb_queue_purge(&pch->file.xq);
skb_queue_purge(&pch->file.rq);
kfree(pch);
}
static void __exit ppp_cleanup(void)
{
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
pr_err("PPP: removing module but units remain!\n");
rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
unregister_pernet_device(&ppp_net_ops);
}
/*
* Units handling. Caller must protect concurrent access
* by holding all_ppp_mutex
*/
/* associate pointer with specified number */
static int unit_set(struct idr *p, void *ptr, int n)
{
int unit;
unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
if (unit == -ENOSPC)
unit = -EINVAL;
return unit;
}
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr, int min)
{
return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
}
/* put unit number back to a pool */
static void unit_put(struct idr *p, int n)
{
idr_remove(p, n);
}
/* get pointer associated with the number */
static void *unit_find(struct idr *p, int n)
{
return idr_find(p, n);
}
/* Module/initialization stuff */
module_init(ppp_init);
module_exit(ppp_cleanup);
EXPORT_SYMBOL(ppp_register_net_channel);
EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_dev_name);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
MODULE_ALIAS("devname:ppp");
| linux-master | drivers/net/ppp/ppp_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/** -*- linux-c -*- ***********************************************************
* Linux PPP over X/Ethernet (PPPoX/PPPoE) Sockets
*
* PPPoX --- Generic PPP encapsulation socket family
* PPPoE --- PPP over Ethernet (RFC 2516)
*
* Version: 0.5.2
*
* Author: Michal Ostrowski <[email protected]>
*
* 051000 : Initialization cleanup
*
* License:
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
#include <linux/init.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/kmod.h>
#include <net/sock.h>
#include <linux/uaccess.h>
static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
{
if (proto_num < 0 || proto_num > PX_MAX_PROTO)
return -EINVAL;
if (pppox_protos[proto_num])
return -EALREADY;
pppox_protos[proto_num] = pp;
return 0;
}
void unregister_pppox_proto(int proto_num)
{
if (proto_num >= 0 && proto_num <= PX_MAX_PROTO)
pppox_protos[proto_num] = NULL;
}
void pppox_unbind_sock(struct sock *sk)
{
/* Clear connection to ppp device, if attached. */
if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED)) {
ppp_unregister_channel(&pppox_sk(sk)->chan);
sk->sk_state = PPPOX_DEAD;
}
}
EXPORT_SYMBOL(register_pppox_proto);
EXPORT_SYMBOL(unregister_pppox_proto);
EXPORT_SYMBOL(pppox_unbind_sock);
int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct pppox_sock *po = pppox_sk(sk);
int rc;
lock_sock(sk);
switch (cmd) {
case PPPIOCGCHAN: {
int index;
rc = -ENOTCONN;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
rc = -EINVAL;
index = ppp_channel_index(&po->chan);
if (put_user(index , (int __user *) arg))
break;
rc = 0;
sk->sk_state |= PPPOX_BOUND;
break;
}
default:
rc = pppox_protos[sk->sk_protocol]->ioctl ?
pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY;
}
release_sock(sk);
return rc;
}
EXPORT_SYMBOL(pppox_ioctl);
#ifdef CONFIG_COMPAT
int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
if (cmd == PPPOEIOCSFWD32)
cmd = PPPOEIOCSFWD;
return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
}
EXPORT_SYMBOL(pppox_compat_ioctl);
#endif
static int pppox_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
int rc = -EPROTOTYPE;
if (protocol < 0 || protocol > PX_MAX_PROTO)
goto out;
rc = -EPROTONOSUPPORT;
if (!pppox_protos[protocol])
request_module("net-pf-%d-proto-%d", PF_PPPOX, protocol);
if (!pppox_protos[protocol] ||
!try_module_get(pppox_protos[protocol]->owner))
goto out;
rc = pppox_protos[protocol]->create(net, sock, kern);
module_put(pppox_protos[protocol]->owner);
out:
return rc;
}
static const struct net_proto_family pppox_proto_family = {
.family = PF_PPPOX,
.create = pppox_create,
.owner = THIS_MODULE,
};
static int __init pppox_init(void)
{
return sock_register(&pppox_proto_family);
}
static void __exit pppox_exit(void)
{
sock_unregister(PF_PPPOX);
}
module_init(pppox_init);
module_exit(pppox_exit);
MODULE_AUTHOR("Michal Ostrowski <[email protected]>");
MODULE_DESCRIPTION("PPP over Ethernet driver (generic socket layer)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_PPPOX);
| linux-master | drivers/net/ppp/pppox.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Point-to-Point Tunneling Protocol for Linux
*
* Authors: Dmitry Kozlov <[email protected]>
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
#include <linux/if_pppox.h>
#include <linux/ppp-ioctl.h>
#include <linux/notifier.h>
#include <linux/file.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/rcupdate.h>
#include <linux/security.h>
#include <linux/spinlock.h>
#include <net/sock.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/gre.h>
#include <net/pptp.h>
#include <linux/uaccess.h>
#define PPTP_DRIVER_VERSION "0.8.5"
#define MAX_CALLID 65535
static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
static struct pppox_sock __rcu **callid_sock;
static DEFINE_SPINLOCK(chan_lock);
static struct proto pptp_sk_proto __read_mostly;
static const struct ppp_channel_ops pptp_chan_ops;
static const struct proto_ops pptp_ops;
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
{
struct pppox_sock *sock;
struct pptp_opt *opt;
rcu_read_lock();
sock = rcu_dereference(callid_sock[call_id]);
if (sock) {
opt = &sock->proto.pptp;
if (opt->dst_addr.sin_addr.s_addr != s_addr)
sock = NULL;
else
sock_hold(sk_pppox(sock));
}
rcu_read_unlock();
return sock;
}
static int lookup_chan_dst(u16 call_id, __be32 d_addr)
{
struct pppox_sock *sock;
struct pptp_opt *opt;
int i;
rcu_read_lock();
i = 1;
for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
sock = rcu_dereference(callid_sock[i]);
if (!sock)
continue;
opt = &sock->proto.pptp;
if (opt->dst_addr.call_id == call_id &&
opt->dst_addr.sin_addr.s_addr == d_addr)
break;
}
rcu_read_unlock();
return i < MAX_CALLID;
}
static int add_chan(struct pppox_sock *sock,
struct pptp_addr *sa)
{
static int call_id;
spin_lock(&chan_lock);
if (!sa->call_id) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
if (call_id == MAX_CALLID) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
if (call_id == MAX_CALLID)
goto out_err;
}
sa->call_id = call_id;
} else if (test_bit(sa->call_id, callid_bitmap)) {
goto out_err;
}
sock->proto.pptp.src_addr = *sa;
set_bit(sa->call_id, callid_bitmap);
rcu_assign_pointer(callid_sock[sa->call_id], sock);
spin_unlock(&chan_lock);
return 0;
out_err:
spin_unlock(&chan_lock);
return -1;
}
static void del_chan(struct pppox_sock *sock)
{
spin_lock(&chan_lock);
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
}
static struct rtable *pptp_route_output(const struct pppox_sock *po,
struct flowi4 *fl4)
{
const struct sock *sk = &po->sk;
struct net *net;
net = sock_net(sk);
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0,
RT_SCOPE_UNIVERSE, IPPROTO_GRE, 0,
po->proto.pptp.dst_addr.sin_addr.s_addr,
po->proto.pptp.src_addr.sin_addr.s_addr,
0, 0, sock_net_uid(net, sk));
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}
static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
struct sock *sk = chan->private;
struct pppox_sock *po = pppox_sk(sk);
struct net *net = sock_net(sk);
struct pptp_opt *opt = &po->proto.pptp;
struct pptp_gre_header *hdr;
unsigned int header_len = sizeof(*hdr);
struct flowi4 fl4;
int islcp;
int len;
unsigned char *data;
__u32 seq_recv;
struct rtable *rt;
struct net_device *tdev;
struct iphdr *iph;
int max_headroom;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt))
goto tx_error;
tdev = rt->dst.dev;
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
goto tx_error;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
data = skb->data;
islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
/* compress protocol field */
if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
skb_pull(skb, 1);
/* Put in the address/control bytes if necessary */
if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
data = skb_push(skb, 2);
data[0] = PPP_ALLSTATIONS;
data[1] = PPP_UI;
}
len = skb->len;
seq_recv = opt->seq_recv;
if (opt->ack_sent == seq_recv)
header_len -= sizeof(hdr->ack);
/* Push down and install GRE header */
skb_push(skb, header_len);
hdr = (struct pptp_gre_header *)(skb->data);
hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
hdr->gre_hd.protocol = GRE_PROTO_PPP;
hdr->call_id = htons(opt->dst_addr.call_id);
hdr->seq = htonl(++opt->seq_sent);
if (opt->ack_sent != seq_recv) {
/* send ack with this message */
hdr->gre_hd.flags |= GRE_ACK;
hdr->ack = htonl(seq_recv);
opt->ack_sent = seq_recv;
}
hdr->payload_len = htons(len);
/* Push down and install the IP header. */
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*iph));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->protocol = IPPROTO_GRE;
iph->tos = 0;
iph->daddr = fl4.daddr;
iph->saddr = fl4.saddr;
iph->ttl = ip4_dst_hoplimit(&rt->dst);
iph->tot_len = htons(skb->len);
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
nf_reset_ct(skb);
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(net, skb, NULL);
ip_send_check(iph);
ip_local_out(net, skb->sk, skb);
return 1;
tx_error:
kfree_skb(skb);
return 1;
}
static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
{
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
int headersize, payload_len, seq;
__u8 *payload;
struct pptp_gre_header *header;
if (!(sk->sk_state & PPPOX_CONNECTED)) {
if (sock_queue_rcv_skb(sk, skb))
goto drop;
return NET_RX_SUCCESS;
}
header = (struct pptp_gre_header *)(skb->data);
headersize = sizeof(*header);
/* test if acknowledgement present */
if (GRE_IS_ACK(header->gre_hd.flags)) {
__u32 ack;
if (!pskb_may_pull(skb, headersize))
goto drop;
header = (struct pptp_gre_header *)(skb->data);
/* ack in different place if S = 0 */
ack = GRE_IS_SEQ(header->gre_hd.flags) ? ntohl(header->ack) :
ntohl(header->seq);
if (ack > opt->ack_recv)
opt->ack_recv = ack;
/* also handle sequence number wrap-around */
if (WRAPPED(ack, opt->ack_recv))
opt->ack_recv = ack;
} else {
headersize -= sizeof(header->ack);
}
/* test if payload present */
if (!GRE_IS_SEQ(header->gre_hd.flags))
goto drop;
payload_len = ntohs(header->payload_len);
seq = ntohl(header->seq);
/* check for incomplete packet (length smaller than expected) */
if (!pskb_may_pull(skb, headersize + payload_len))
goto drop;
payload = skb->data + headersize;
/* check for expected sequence number */
if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
(PPP_PROTOCOL(payload) == PPP_LCP) &&
((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
goto allow_packet;
} else {
opt->seq_recv = seq;
allow_packet:
skb_pull(skb, headersize);
if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
/* chop off address/control */
if (skb->len < 3)
goto drop;
skb_pull(skb, 2);
}
skb->ip_summed = CHECKSUM_NONE;
skb_set_network_header(skb, skb->head-skb->data);
ppp_input(&po->chan, skb);
return NET_RX_SUCCESS;
}
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
static int pptp_rcv(struct sk_buff *skb)
{
struct pppox_sock *po;
struct pptp_gre_header *header;
struct iphdr *iph;
if (skb->pkt_type != PACKET_HOST)
goto drop;
if (!pskb_may_pull(skb, 12))
goto drop;
iph = ip_hdr(skb);
header = (struct pptp_gre_header *)skb->data;
if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */
GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
!GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */
(header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */
/* if invalid, discard this packet */
goto drop;
po = lookup_chan(ntohs(header->call_id), iph->saddr);
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
int error = 0;
if (sockaddr_len < sizeof(struct sockaddr_pppox))
return -EINVAL;
lock_sock(sk);
if (sk->sk_state & PPPOX_DEAD) {
error = -EALREADY;
goto out;
}
if (sk->sk_state & PPPOX_BOUND) {
error = -EBUSY;
goto out;
}
if (add_chan(po, &sp->sa_addr.pptp))
error = -EBUSY;
else
sk->sk_state |= PPPOX_BOUND;
out:
release_sock(sk);
return error;
}
static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
struct rtable *rt;
struct flowi4 fl4;
int error = 0;
if (sockaddr_len < sizeof(struct sockaddr_pppox))
return -EINVAL;
if (sp->sa_protocol != PX_PROTO_PPTP)
return -EINVAL;
if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
return -EALREADY;
lock_sock(sk);
/* Check for already bound sockets */
if (sk->sk_state & PPPOX_CONNECTED) {
error = -EBUSY;
goto end;
}
/* Check for already disconnected sockets, on attempts to disconnect */
if (sk->sk_state & PPPOX_DEAD) {
error = -EALREADY;
goto end;
}
if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
error = -EINVAL;
goto end;
}
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
}
sk_setup_caps(sk, &rt->dst);
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MRU;
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
error = ppp_register_channel(&po->chan);
if (error) {
pr_err("PPTP: failed to register PPP channel (%d)\n", error);
goto end;
}
opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state |= PPPOX_CONNECTED;
end:
release_sock(sk);
return error;
}
static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
int len = sizeof(struct sockaddr_pppox);
struct sockaddr_pppox sp;
memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
sp.sa_family = AF_PPPOX;
sp.sa_protocol = PX_PROTO_PPTP;
sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
memcpy(uaddr, &sp, len);
return len;
}
static int pptp_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct pppox_sock *po;
int error = 0;
if (!sk)
return 0;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
release_sock(sk);
return -EBADF;
}
po = pppox_sk(sk);
del_chan(po);
synchronize_rcu();
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_DEAD;
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return error;
}
static void pptp_sock_destruct(struct sock *sk)
{
if (!(sk->sk_state & PPPOX_DEAD)) {
del_chan(pppox_sk(sk));
pppox_unbind_sock(sk);
}
skb_queue_purge(&sk->sk_receive_queue);
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
}
static int pptp_create(struct net *net, struct socket *sock, int kern)
{
int error = -ENOMEM;
struct sock *sk;
struct pppox_sock *po;
struct pptp_opt *opt;
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
if (!sk)
goto out;
sock_init_data(sock, sk);
sock->state = SS_UNCONNECTED;
sock->ops = &pptp_ops;
sk->sk_backlog_rcv = pptp_rcv_core;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_family = PF_PPPOX;
sk->sk_protocol = PX_PROTO_PPTP;
sk->sk_destruct = pptp_sock_destruct;
po = pppox_sk(sk);
opt = &po->proto.pptp;
opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
error = 0;
out:
return error;
}
static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = chan->private;
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
void __user *argp = (void __user *)arg;
int __user *p = argp;
int err, val;
err = -EFAULT;
switch (cmd) {
case PPPIOCGFLAGS:
val = opt->ppp_flags;
if (put_user(val, p))
break;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, p))
break;
opt->ppp_flags = val & ~SC_RCV_BITS;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
static const struct ppp_channel_ops pptp_chan_ops = {
.start_xmit = pptp_xmit,
.ioctl = pptp_ppp_ioctl,
};
static struct proto pptp_sk_proto __read_mostly = {
.name = "PPTP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct pppox_sock),
};
static const struct proto_ops pptp_ops = {
.family = AF_PPPOX,
.owner = THIS_MODULE,
.release = pptp_release,
.bind = pptp_bind,
.connect = pptp_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pptp_getname,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = pppox_compat_ioctl,
#endif
};
static const struct pppox_proto pppox_pptp_proto = {
.create = pptp_create,
.owner = THIS_MODULE,
};
static const struct gre_protocol gre_pptp_protocol = {
.handler = pptp_rcv,
};
static int __init pptp_init_module(void)
{
int err = 0;
pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
callid_sock = vzalloc(array_size(sizeof(void *), (MAX_CALLID + 1)));
if (!callid_sock)
return -ENOMEM;
err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
if (err) {
pr_err("PPTP: can't add gre protocol\n");
goto out_mem_free;
}
err = proto_register(&pptp_sk_proto, 0);
if (err) {
pr_err("PPTP: can't register sk_proto\n");
goto out_gre_del_protocol;
}
err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
if (err) {
pr_err("PPTP: can't register pppox_proto\n");
goto out_unregister_sk_proto;
}
return 0;
out_unregister_sk_proto:
proto_unregister(&pptp_sk_proto);
out_gre_del_protocol:
gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
out_mem_free:
vfree(callid_sock);
return err;
}
static void __exit pptp_exit_module(void)
{
unregister_pppox_proto(PX_PROTO_PPTP);
proto_unregister(&pptp_sk_proto);
gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
vfree(callid_sock);
}
module_init(pptp_init_module);
module_exit(pptp_exit_module);
MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
MODULE_AUTHOR("D. Kozlov ([email protected])");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);
| linux-master | drivers/net/ppp/pptp.c |
/*
* Update: The Berkeley copyright was changed, and the change
* is retroactive to all "true" BSD software (ie everything
* from UCB as opposed to other peoples code that just carried
* the same license). The new copyright doesn't clash with the
* GPL, so the module-only restriction has been removed..
*/
/* Because this code is derived from the 4.3BSD compress source:
*
* Copyright (c) 1985, 1986 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* James A. Woods, derived from original work by Spencer Thomas
* and Joseph Orost.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This version is for use with contiguous buffers on Linux-derived systems.
*
* ==FILEVERSION 20000226==
*
* NOTE TO MAINTAINERS:
* If you modify this file at all, please set the number above to the
* date of the modification as YYMMDD (year month day).
* bsd_comp.c is shipped with a PPP distribution as well as with
* the kernel; if everyone increases the FILEVERSION number above,
* then scripts can do the right thing when deciding whether to
* install a new bsd_comp.c file. Don't change the format of that
* line otherwise, so the installation script can recognize it.
*
* From: bsd_comp.c,v 1.3 1994/12/08 01:59:58 paulus Exp
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/ppp_defs.h>
#undef PACKETPTR
#define PACKETPTR 1
#include <linux/ppp-comp.h>
#undef PACKETPTR
#include <asm/byteorder.h>
/*
* PPP "BSD compress" compression
* The differences between this compression and the classic BSD LZW
* source are obvious from the requirement that the classic code worked
* with files while this handles arbitrarily long streams that
* are broken into packets. They are:
*
* When the code size expands, a block of junk is not emitted by
* the compressor and not expected by the decompressor.
*
* New codes are not necessarily assigned every time an old
* code is output by the compressor. This is because a packet
* end forces a code to be emitted, but does not imply that a
* new sequence has been seen.
*
* The compression ratio is checked at the first end of a packet
* after the appropriate gap. Besides simplifying and speeding
* things up, this makes it more likely that the transmitter
* and receiver will agree when the dictionary is cleared when
* compression is not going well.
*/
/*
* Macros to extract protocol version and number of bits
* from the third byte of the BSD Compress CCP configuration option.
*/
#define BSD_VERSION(x) ((x) >> 5)
#define BSD_NBITS(x) ((x) & 0x1F)
#define BSD_CURRENT_VERSION 1
/*
* A dictionary for doing BSD compress.
*/
struct bsd_dict {
union { /* hash value */
unsigned long fcode;
struct {
#if defined(__LITTLE_ENDIAN) /* Little endian order */
unsigned short prefix; /* preceding code */
unsigned char suffix; /* last character of new code */
unsigned char pad;
#elif defined(__BIG_ENDIAN) /* Big endian order */
unsigned char pad;
unsigned char suffix; /* last character of new code */
unsigned short prefix; /* preceding code */
#else
#error Endianness not defined...
#endif
} hs;
} f;
unsigned short codem1; /* output of hash table -1 */
unsigned short cptr; /* map code to hash table entry */
};
struct bsd_db {
int totlen; /* length of this structure */
unsigned int hsize; /* size of the hash table */
unsigned char hshift; /* used in hash function */
unsigned char n_bits; /* current bits/code */
unsigned char maxbits; /* maximum bits/code */
unsigned char debug; /* non-zero if debug desired */
unsigned char unit; /* ppp unit number */
unsigned short seqno; /* sequence # of next packet */
unsigned int mru; /* size of receive (decompress) bufr */
unsigned int maxmaxcode; /* largest valid code */
unsigned int max_ent; /* largest code in use */
unsigned int in_count; /* uncompressed bytes, aged */
unsigned int bytes_out; /* compressed bytes, aged */
unsigned int ratio; /* recent compression ratio */
unsigned int checkpoint; /* when to next check the ratio */
unsigned int clear_count; /* times dictionary cleared */
unsigned int incomp_count; /* incompressible packets */
unsigned int incomp_bytes; /* incompressible bytes */
unsigned int uncomp_count; /* uncompressed packets */
unsigned int uncomp_bytes; /* uncompressed bytes */
unsigned int comp_count; /* compressed packets */
unsigned int comp_bytes; /* compressed bytes */
unsigned short *lens; /* array of lengths of codes */
struct bsd_dict *dict; /* dictionary */
};
#define BSD_OVHD 2 /* BSD compress overhead/packet */
#define MIN_BSD_BITS 9
#define BSD_INIT_BITS MIN_BSD_BITS
#define MAX_BSD_BITS 15
static void bsd_free (void *state);
static void *bsd_alloc(unsigned char *options, int opt_len, int decomp);
static void *bsd_comp_alloc (unsigned char *options, int opt_len);
static void *bsd_decomp_alloc (unsigned char *options, int opt_len);
static int bsd_init (void *db, unsigned char *options,
int opt_len, int unit, int debug, int decomp);
static int bsd_comp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int debug);
static int bsd_decomp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int mru,
int debug);
static void bsd_reset (void *state);
static void bsd_comp_stats (void *state, struct compstat *stats);
static int bsd_compress (void *state, unsigned char *rptr,
unsigned char *obuf, int isize, int osize);
static void bsd_incomp (void *state, unsigned char *ibuf, int icnt);
static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
unsigned char *obuf, int osize);
/* These are in ppp_generic.c */
extern int ppp_register_compressor (struct compressor *cp);
extern void ppp_unregister_compressor (struct compressor *cp);
/*
* the next two codes should not be changed lightly, as they must not
* lie within the contiguous general code space.
*/
#define CLEAR 256 /* table clear output code */
#define FIRST 257 /* first free entry */
#define LAST 255
#define MAXCODE(b) ((1 << (b)) - 1)
#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
^ (unsigned long)(prefix))
#define BSD_KEY(prefix,suffix) ((((unsigned long)(suffix)) << 16) \
+ (unsigned long)(prefix))
#define CHECK_GAP 10000 /* Ratio check interval */
#define RATIO_SCALE_LOG 8
#define RATIO_SCALE (1<<RATIO_SCALE_LOG)
#define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG)
/*
* clear the dictionary
*/
static void
bsd_clear(struct bsd_db *db)
{
db->clear_count++;
db->max_ent = FIRST-1;
db->n_bits = BSD_INIT_BITS;
db->bytes_out = 0;
db->in_count = 0;
db->ratio = 0;
db->checkpoint = CHECK_GAP;
}
/*
* If the dictionary is full, then see if it is time to reset it.
*
* Compute the compression ratio using fixed-point arithmetic
* with 8 fractional bits.
*
* Since we have an infinite stream instead of a single file,
* watch only the local compression ratio.
*
* Since both peers must reset the dictionary at the same time even in
* the absence of CLEAR codes (while packets are incompressible), they
* must compute the same ratio.
*/
static int bsd_check (struct bsd_db *db) /* 1=output CLEAR */
{
unsigned int new_ratio;
if (db->in_count >= db->checkpoint)
{
/* age the ratio by limiting the size of the counts */
if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
{
db->in_count -= (db->in_count >> 2);
db->bytes_out -= (db->bytes_out >> 2);
}
db->checkpoint = db->in_count + CHECK_GAP;
if (db->max_ent >= db->maxmaxcode)
{
/* Reset the dictionary only if the ratio is worse,
* or if it looks as if it has been poisoned
* by incompressible data.
*
* This does not overflow, because
* db->in_count <= RATIO_MAX.
*/
new_ratio = db->in_count << RATIO_SCALE_LOG;
if (db->bytes_out != 0)
{
new_ratio /= db->bytes_out;
}
if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
{
bsd_clear (db);
return 1;
}
db->ratio = new_ratio;
}
}
return 0;
}
/*
* Return statistics.
*/
static void bsd_comp_stats (void *state, struct compstat *stats)
{
struct bsd_db *db = (struct bsd_db *) state;
stats->unc_bytes = db->uncomp_bytes;
stats->unc_packets = db->uncomp_count;
stats->comp_bytes = db->comp_bytes;
stats->comp_packets = db->comp_count;
stats->inc_bytes = db->incomp_bytes;
stats->inc_packets = db->incomp_count;
stats->in_count = db->in_count;
stats->bytes_out = db->bytes_out;
}
/*
* Reset state, as on a CCP ResetReq.
*/
static void bsd_reset (void *state)
{
struct bsd_db *db = (struct bsd_db *) state;
bsd_clear(db);
db->seqno = 0;
db->clear_count = 0;
}
/*
* Release the compression structure
*/
static void bsd_free (void *state)
{
struct bsd_db *db = state;
if (!db)
return;
/*
* Release the dictionary
*/
vfree(db->dict);
db->dict = NULL;
/*
* Release the string buffer
*/
vfree(db->lens);
db->lens = NULL;
/*
* Finally release the structure itself.
*/
kfree(db);
}
/*
* Allocate space for a (de) compressor.
*/
static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
{
int bits;
unsigned int hsize, hshift, maxmaxcode;
struct bsd_db *db;
if (opt_len != 3 || options[0] != CI_BSD_COMPRESS || options[1] != 3
|| BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
{
return NULL;
}
bits = BSD_NBITS(options[2]);
switch (bits)
{
case 9: /* needs 82152 for both directions */
case 10: /* needs 84144 */
case 11: /* needs 88240 */
case 12: /* needs 96432 */
hsize = 5003;
hshift = 4;
break;
case 13: /* needs 176784 */
hsize = 9001;
hshift = 5;
break;
case 14: /* needs 353744 */
hsize = 18013;
hshift = 6;
break;
case 15: /* needs 691440 */
hsize = 35023;
hshift = 7;
break;
case 16: /* needs 1366160--far too much, */
/* hsize = 69001; */ /* and 69001 is too big for cptr */
/* hshift = 8; */ /* in struct bsd_db */
/* break; */
default:
return NULL;
}
/*
* Allocate the main control structure for this instance.
*/
maxmaxcode = MAXCODE(bits);
db = kzalloc(sizeof (struct bsd_db),
GFP_KERNEL);
if (!db)
{
return NULL;
}
/*
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
if (!db->dict)
{
bsd_free (db);
return NULL;
}
/*
* If this is the compression buffer then there is no length data.
*/
if (!decomp)
{
db->lens = NULL;
}
/*
* For decompression, the length information is needed as well.
*/
else
{
db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
if (!db->lens)
{
bsd_free (db);
return NULL;
}
}
/*
* Initialize the data information for the compression code
*/
db->totlen = sizeof (struct bsd_db) +
(sizeof (struct bsd_dict) * hsize);
db->hsize = hsize;
db->hshift = hshift;
db->maxmaxcode = maxmaxcode;
db->maxbits = bits;
return (void *) db;
}
static void *bsd_comp_alloc (unsigned char *options, int opt_len)
{
return bsd_alloc (options, opt_len, 0);
}
static void *bsd_decomp_alloc (unsigned char *options, int opt_len)
{
return bsd_alloc (options, opt_len, 1);
}
/*
* Initialize the database.
*/
static int bsd_init (void *state, unsigned char *options,
int opt_len, int unit, int debug, int decomp)
{
struct bsd_db *db = state;
int indx;
if ((opt_len != 3) || (options[0] != CI_BSD_COMPRESS) || (options[1] != 3)
|| (BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
|| (BSD_NBITS(options[2]) != db->maxbits)
|| (decomp && db->lens == NULL))
{
return 0;
}
if (decomp)
{
indx = LAST;
do
{
db->lens[indx] = 1;
}
while (indx-- > 0);
}
indx = db->hsize;
while (indx-- != 0)
{
db->dict[indx].codem1 = BADCODEM1;
db->dict[indx].cptr = 0;
}
db->unit = unit;
db->mru = 0;
#ifndef DEBUG
if (debug)
#endif
db->debug = 1;
bsd_reset(db);
return 1;
}
static int bsd_comp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int debug)
{
return bsd_init (state, options, opt_len, unit, debug, 0);
}
static int bsd_decomp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int mru,
int debug)
{
return bsd_init (state, options, opt_len, unit, debug, 1);
}
/*
* Obtain pointers to the various structures in the compression tables
*/
#define dict_ptrx(p,idx) &(p->dict[idx])
#define lens_ptrx(p,idx) &(p->lens[idx])
#ifdef DEBUG
static unsigned short *lens_ptr(struct bsd_db *db, int idx)
{
if ((unsigned int) idx > (unsigned int) db->maxmaxcode)
{
printk ("<9>ppp: lens_ptr(%d) > max\n", idx);
idx = 0;
}
return lens_ptrx (db, idx);
}
static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
{
if ((unsigned int) idx >= (unsigned int) db->hsize)
{
printk ("<9>ppp: dict_ptr(%d) > max\n", idx);
idx = 0;
}
return dict_ptrx (db, idx);
}
#else
#define lens_ptr(db,idx) lens_ptrx(db,idx)
#define dict_ptr(db,idx) dict_ptrx(db,idx)
#endif
/*
* compress a packet
*
* The result of this function is the size of the compressed
* packet. A zero is returned if the packet was not compressed
* for some reason, such as the size being larger than uncompressed.
*
* One change from the BSD compress command is that when the
* code size expands, we do not output a bunch of padding.
*/
static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf,
int isize, int osize)
{
struct bsd_db *db;
int hshift;
unsigned int max_ent;
unsigned int n_bits;
unsigned int bitno;
unsigned long accm;
int ent;
unsigned long fcode;
struct bsd_dict *dictp;
unsigned char c;
int hval;
int disp;
int ilen;
int mxcode;
unsigned char *wptr;
int olen;
#define PUTBYTE(v) \
{ \
++olen; \
if (wptr) \
{ \
*wptr++ = (unsigned char) (v); \
if (olen >= osize) \
{ \
wptr = NULL; \
} \
} \
}
#define OUTPUT(ent) \
{ \
bitno -= n_bits; \
accm |= ((ent) << bitno); \
do \
{ \
PUTBYTE(accm >> 24); \
accm <<= 8; \
bitno += 8; \
} \
while (bitno <= 24); \
}
/*
* If the protocol is not in the range we're interested in,
* just return without compressing the packet. If it is,
* the protocol becomes the first byte to compress.
*/
ent = PPP_PROTOCOL(rptr);
if (ent < 0x21 || ent > 0xf9)
{
return 0;
}
db = (struct bsd_db *) state;
hshift = db->hshift;
max_ent = db->max_ent;
n_bits = db->n_bits;
bitno = 32;
accm = 0;
mxcode = MAXCODE (n_bits);
/* Initialize the output pointers */
wptr = obuf;
olen = PPP_HDRLEN + BSD_OVHD;
if (osize > isize)
{
osize = isize;
}
/* This is the PPP header information */
if (wptr)
{
*wptr++ = PPP_ADDRESS(rptr);
*wptr++ = PPP_CONTROL(rptr);
*wptr++ = 0;
*wptr++ = PPP_COMP;
*wptr++ = db->seqno >> 8;
*wptr++ = db->seqno;
}
/* Skip the input header */
rptr += PPP_HDRLEN;
isize -= PPP_HDRLEN;
ilen = ++isize; /* Low byte of protocol is counted as input */
while (--ilen > 0)
{
c = *rptr++;
fcode = BSD_KEY (ent, c);
hval = BSD_HASH (ent, c, hshift);
dictp = dict_ptr (db, hval);
/* Validate and then check the entry. */
if (dictp->codem1 >= max_ent)
{
goto nomatch;
}
if (dictp->f.fcode == fcode)
{
ent = dictp->codem1 + 1;
continue; /* found (prefix,suffix) */
}
/* continue probing until a match or invalid entry */
disp = (hval == 0) ? 1 : hval;
do
{
hval += disp;
if (hval >= db->hsize)
{
hval -= db->hsize;
}
dictp = dict_ptr (db, hval);
if (dictp->codem1 >= max_ent)
{
goto nomatch;
}
}
while (dictp->f.fcode != fcode);
ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
continue;
nomatch:
OUTPUT(ent); /* output the prefix */
/* code -> hashtable */
if (max_ent < db->maxmaxcode)
{
struct bsd_dict *dictp2;
struct bsd_dict *dictp3;
int indx;
/* expand code size if needed */
if (max_ent >= mxcode)
{
db->n_bits = ++n_bits;
mxcode = MAXCODE (n_bits);
}
/* Invalidate old hash table entry using
* this code, and then take it over.
*/
dictp2 = dict_ptr (db, max_ent + 1);
indx = dictp2->cptr;
dictp3 = dict_ptr (db, indx);
if (dictp3->codem1 == max_ent)
{
dictp3->codem1 = BADCODEM1;
}
dictp2->cptr = hval;
dictp->codem1 = max_ent;
dictp->f.fcode = fcode;
db->max_ent = ++max_ent;
if (db->lens)
{
unsigned short *len1 = lens_ptr (db, max_ent);
unsigned short *len2 = lens_ptr (db, ent);
*len1 = *len2 + 1;
}
}
ent = c;
}
OUTPUT(ent); /* output the last code */
db->bytes_out += olen - PPP_HDRLEN - BSD_OVHD;
db->uncomp_bytes += isize;
db->in_count += isize;
++db->uncomp_count;
++db->seqno;
if (bitno < 32)
{
++db->bytes_out; /* must be set before calling bsd_check */
}
/*
* Generate the clear command if needed
*/
if (bsd_check(db))
{
OUTPUT (CLEAR);
}
/*
* Pad dribble bits of last code with ones.
* Do not emit a completely useless byte of ones.
*/
if (bitno != 32)
{
PUTBYTE((accm | (0xff << (bitno-8))) >> 24);
}
/*
* Increase code size if we would have without the packet
* boundary because the decompressor will do so.
*/
if (max_ent >= mxcode && max_ent < db->maxmaxcode)
{
db->n_bits++;
}
/* If output length is too large then this is an incomplete frame. */
if (wptr == NULL)
{
++db->incomp_count;
db->incomp_bytes += isize;
olen = 0;
}
else /* Count the number of compressed frames */
{
++db->comp_count;
db->comp_bytes += olen;
}
/* Return the resulting output length */
return olen;
#undef OUTPUT
#undef PUTBYTE
}
/*
* Update the "BSD Compress" dictionary on the receiver for
* incompressible data by pretending to compress the incoming data.
*/
static void bsd_incomp (void *state, unsigned char *ibuf, int icnt)
{
(void) bsd_compress (state, ibuf, (char *) 0, icnt, 0);
}
/*
* Decompress "BSD Compress".
*
* Because of patent problems, we return DECOMP_ERROR for errors
* found by inspecting the input data and for system problems, but
* DECOMP_FATALERROR for any errors which could possibly be said to
* be being detected "after" decompression. For DECOMP_ERROR,
* we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
* infringing a patent of Motorola's if we do, so we take CCP down
* instead.
*
* Given that the frame has the correct sequence number and a good FCS,
* errors such as invalid codes in the input most likely indicate a
* bug, so we return DECOMP_FATALERROR for them in order to turn off
* compression, even though they are detected by inspecting the input.
*/
static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
unsigned char *obuf, int osize)
{
struct bsd_db *db;
unsigned int max_ent;
unsigned long accm;
unsigned int bitno; /* 1st valid bit in accm */
unsigned int n_bits;
unsigned int tgtbitno; /* bitno when we have a code */
struct bsd_dict *dictp;
int explen;
int seq;
unsigned int incode;
unsigned int oldcode;
unsigned int finchar;
unsigned char *p;
unsigned char *wptr;
int adrs;
int ctrl;
int ilen;
int codelen;
int extra;
db = (struct bsd_db *) state;
max_ent = db->max_ent;
accm = 0;
bitno = 32; /* 1st valid bit in accm */
n_bits = db->n_bits;
tgtbitno = 32 - n_bits; /* bitno when we have a code */
/*
* Save the address/control from the PPP header
* and then get the sequence number.
*/
adrs = PPP_ADDRESS (ibuf);
ctrl = PPP_CONTROL (ibuf);
seq = (ibuf[4] << 8) + ibuf[5];
ibuf += (PPP_HDRLEN + 2);
ilen = isize - (PPP_HDRLEN + 2);
/*
* Check the sequence number and give up if it differs from
* the value we're expecting.
*/
if (seq != db->seqno)
{
if (db->debug)
{
printk("bsd_decomp%d: bad sequence # %d, expected %d\n",
db->unit, seq, db->seqno - 1);
}
return DECOMP_ERROR;
}
++db->seqno;
db->bytes_out += ilen;
/*
* Fill in the ppp header, but not the last byte of the protocol
* (that comes from the decompressed data).
*/
wptr = obuf;
*wptr++ = adrs;
*wptr++ = ctrl;
*wptr++ = 0;
oldcode = CLEAR;
explen = 3;
/*
* Keep the checkpoint correctly so that incompressible packets
* clear the dictionary at the proper times.
*/
for (;;)
{
if (ilen-- <= 0)
{
db->in_count += (explen - 3); /* don't count the header */
break;
}
/*
* Accumulate bytes until we have a complete code.
* Then get the next code, relying on the 32-bit,
* unsigned accm to mask the result.
*/
bitno -= 8;
accm |= *ibuf++ << bitno;
if (tgtbitno < bitno)
{
continue;
}
incode = accm >> tgtbitno;
accm <<= n_bits;
bitno += n_bits;
/*
* The dictionary must only be cleared at the end of a packet.
*/
if (incode == CLEAR)
{
if (ilen > 0)
{
if (db->debug)
{
printk("bsd_decomp%d: bad CLEAR\n", db->unit);
}
return DECOMP_FATALERROR; /* probably a bug */
}
bsd_clear(db);
break;
}
if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
|| (incode > max_ent && oldcode == CLEAR))
{
if (db->debug)
{
printk("bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
db->unit, incode, oldcode);
printk("max_ent=0x%x explen=%d seqno=%d\n",
max_ent, explen, db->seqno);
}
return DECOMP_FATALERROR; /* probably a bug */
}
/* Special case for KwKwK string. */
if (incode > max_ent)
{
finchar = oldcode;
extra = 1;
}
else
{
finchar = incode;
extra = 0;
}
codelen = *(lens_ptr (db, finchar));
explen += codelen + extra;
if (explen > osize)
{
if (db->debug)
{
printk("bsd_decomp%d: ran out of mru\n", db->unit);
#ifdef DEBUG
printk(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n",
ilen, finchar, codelen, explen);
#endif
}
return DECOMP_FATALERROR;
}
/*
* Decode this code and install it in the decompressed buffer.
*/
wptr += codelen;
p = wptr;
while (finchar > LAST)
{
struct bsd_dict *dictp2 = dict_ptr (db, finchar);
dictp = dict_ptr (db, dictp2->cptr);
#ifdef DEBUG
if (--codelen <= 0 || dictp->codem1 != finchar-1)
{
if (codelen <= 0)
{
printk("bsd_decomp%d: fell off end of chain ", db->unit);
printk("0x%x at 0x%x by 0x%x, max_ent=0x%x\n",
incode, finchar, dictp2->cptr, max_ent);
}
else
{
if (dictp->codem1 != finchar-1)
{
printk("bsd_decomp%d: bad code chain 0x%x "
"finchar=0x%x ",
db->unit, incode, finchar);
printk("oldcode=0x%x cptr=0x%x codem1=0x%x\n",
oldcode, dictp2->cptr, dictp->codem1);
}
}
return DECOMP_FATALERROR;
}
#endif
*--p = dictp->f.hs.suffix;
finchar = dictp->f.hs.prefix;
}
*--p = finchar;
#ifdef DEBUG
if (--codelen != 0)
{
printk("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n",
db->unit, codelen, incode, max_ent);
}
#endif
if (extra) /* the KwKwK case again */
{
*wptr++ = finchar;
}
/*
* If not first code in a packet, and
* if not out of code space, then allocate a new code.
*
* Keep the hash table correct so it can be used
* with uncompressed packets.
*/
if (oldcode != CLEAR && max_ent < db->maxmaxcode)
{
struct bsd_dict *dictp2, *dictp3;
unsigned short *lens1, *lens2;
unsigned long fcode;
int hval, disp, indx;
fcode = BSD_KEY(oldcode,finchar);
hval = BSD_HASH(oldcode,finchar,db->hshift);
dictp = dict_ptr (db, hval);
/* look for a free hash table entry */
if (dictp->codem1 < max_ent)
{
disp = (hval == 0) ? 1 : hval;
do
{
hval += disp;
if (hval >= db->hsize)
{
hval -= db->hsize;
}
dictp = dict_ptr (db, hval);
}
while (dictp->codem1 < max_ent);
}
/*
* Invalidate previous hash table entry
* assigned this code, and then take it over
*/
dictp2 = dict_ptr (db, max_ent + 1);
indx = dictp2->cptr;
dictp3 = dict_ptr (db, indx);
if (dictp3->codem1 == max_ent)
{
dictp3->codem1 = BADCODEM1;
}
dictp2->cptr = hval;
dictp->codem1 = max_ent;
dictp->f.fcode = fcode;
db->max_ent = ++max_ent;
/* Update the length of this string. */
lens1 = lens_ptr (db, max_ent);
lens2 = lens_ptr (db, oldcode);
*lens1 = *lens2 + 1;
/* Expand code size if needed. */
if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
{
db->n_bits = ++n_bits;
tgtbitno = 32-n_bits;
}
}
oldcode = incode;
}
++db->comp_count;
++db->uncomp_count;
db->comp_bytes += isize - BSD_OVHD - PPP_HDRLEN;
db->uncomp_bytes += explen;
if (bsd_check(db))
{
if (db->debug)
{
printk("bsd_decomp%d: peer should have cleared dictionary on %d\n",
db->unit, db->seqno - 1);
}
}
return explen;
}
/*************************************************************
* Table of addresses for the BSD compression module
*************************************************************/
static struct compressor ppp_bsd_compress = {
.compress_proto = CI_BSD_COMPRESS,
.comp_alloc = bsd_comp_alloc,
.comp_free = bsd_free,
.comp_init = bsd_comp_init,
.comp_reset = bsd_reset,
.compress = bsd_compress,
.comp_stat = bsd_comp_stats,
.decomp_alloc = bsd_decomp_alloc,
.decomp_free = bsd_free,
.decomp_init = bsd_decomp_init,
.decomp_reset = bsd_reset,
.decompress = bsd_decompress,
.incomp = bsd_incomp,
.decomp_stat = bsd_comp_stats,
.owner = THIS_MODULE
};
/*************************************************************
* Module support routines
*************************************************************/
static int __init bsdcomp_init(void)
{
int answer = ppp_register_compressor(&ppp_bsd_compress);
if (answer == 0)
printk(KERN_INFO "PPP BSD Compression module registered\n");
return answer;
}
static void __exit bsdcomp_cleanup(void)
{
ppp_unregister_compressor(&ppp_bsd_compress);
}
module_init(bsdcomp_init);
module_exit(bsdcomp_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
| linux-master | drivers/net/ppp/bsd_comp.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.