python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* ppp_mppe.c - interface MPPE to the PPP code.
* This version is for use with Linux kernel 2.6.14+
*
* By Frank Cusack <[email protected]>.
* Copyright (c) 2002,2003,2004 Google, Inc.
* All rights reserved.
*
* License:
* Permission to use, copy, modify, and distribute this software and its
* documentation is hereby granted, provided that the above copyright
* notice appears in all copies. This software is provided without any
* warranty, express or implied.
*
* ALTERNATIVELY, provided that this notice is retained in full, this product
* may be distributed under the terms of the GNU General Public License (GPL),
* in which case the provisions of the GPL apply INSTEAD OF those given above.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*
* Changelog:
* 08/12/05 - Matt Domsch <[email protected]>
* Only need extra skb padding on transmit, not receive.
* 06/18/04 - Matt Domsch <[email protected]>, Oleg Makarenko <[email protected]>
* Use Linux kernel 2.6 arc4 and sha1 routines rather than
* providing our own.
* 2/15/04 - TS: added #include <version.h> and testing for Kernel
* version before using
* MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are
* deprecated in 2.6
*/
#include <crypto/arc4.h>
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/scatterlist.h>
#include <asm/unaligned.h>
#include "ppp_mppe.h"
MODULE_AUTHOR("Frank Cusack <[email protected]>");
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_VERSION("1.0.2");
#define SHA1_PAD_SIZE 40
/*
* kernel crypto API needs its arguments to be in kmalloc'd memory, not in the module
* static data area. That means sha_pad needs to be kmalloc'd.
*/
struct sha_pad {
unsigned char sha_pad1[SHA1_PAD_SIZE];
unsigned char sha_pad2[SHA1_PAD_SIZE];
};
static struct sha_pad *sha_pad;
static inline void sha_pad_init(struct sha_pad *shapad)
{
memset(shapad->sha_pad1, 0x00, sizeof(shapad->sha_pad1));
memset(shapad->sha_pad2, 0xF2, sizeof(shapad->sha_pad2));
}
/*
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
struct arc4_ctx arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
unsigned keylen; /* key length in bytes */
/* NB: 128-bit == 16, 40-bit == 8! */
/* If we want to support 56-bit, */
/* the unit has to change to bits */
unsigned char bits; /* MPPE control bits */
unsigned ccount; /* 12-bit coherency count (seqno) */
unsigned stateful; /* stateful mode flag */
int discard; /* stateful mode packet loss flag */
int sanity_errors; /* take down LCP if too many */
int unit;
int debug;
struct compstat stats;
};
/* struct ppp_mppe_state.bits definitions */
#define MPPE_BIT_A 0x80 /* Encryption table were (re)inititalized */
#define MPPE_BIT_B 0x40 /* MPPC only (not implemented) */
#define MPPE_BIT_C 0x20 /* MPPC only (not implemented) */
#define MPPE_BIT_D 0x10 /* This is an encrypted frame */
#define MPPE_BIT_FLUSHED MPPE_BIT_A
#define MPPE_BIT_ENCRYPTED MPPE_BIT_D
#define MPPE_BITS(p) ((p)[4] & 0xf0)
#define MPPE_CCOUNT(p) ((((p)[4] & 0x0f) << 8) + (p)[5])
#define MPPE_CCOUNT_SPACE 0x1000 /* The size of the ccount space */
#define MPPE_OVHD 2 /* MPPE overhead/packet */
#define SANITY_MAX 1600 /* Max bogon factor we will tolerate */
/*
* Key Derivation, from RFC 3078, RFC 3079.
* Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
crypto_shash_init(state->sha1);
crypto_shash_update(state->sha1, state->master_key,
state->keylen);
crypto_shash_update(state->sha1, sha_pad->sha_pad1,
sizeof(sha_pad->sha_pad1));
crypto_shash_update(state->sha1, state->session_key,
state->keylen);
crypto_shash_update(state->sha1, sha_pad->sha_pad2,
sizeof(sha_pad->sha_pad2));
crypto_shash_final(state->sha1, state->sha1_digest);
}
/*
* Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3.
* Well, not what's written there, but rather what they meant.
*/
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
get_new_key_from_sha(state);
if (!initial_key) {
arc4_setkey(&state->arc4, state->sha1_digest, state->keylen);
arc4_crypt(&state->arc4, state->session_key, state->sha1_digest,
state->keylen);
} else {
memcpy(state->session_key, state->sha1_digest, state->keylen);
}
if (state->keylen == 8) {
/* See RFC 3078 */
state->session_key[0] = 0xd1;
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
arc4_setkey(&state->arc4, state->session_key, state->keylen);
}
/*
* Allocate space for a (de)compressor.
*/
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
struct crypto_shash *shash;
unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
fips_enabled)
goto out;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
goto out;
shash = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(shash))
goto out_free;
state->sha1 = kmalloc(sizeof(*state->sha1) +
crypto_shash_descsize(shash),
GFP_KERNEL);
if (!state->sha1) {
crypto_free_shash(shash);
goto out_free;
}
state->sha1->tfm = shash;
digestsize = crypto_shash_digestsize(shash);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
if (!state->sha1_digest)
goto out_free;
/* Save keys. */
memcpy(state->master_key, &options[CILEN_MPPE],
sizeof(state->master_key));
memcpy(state->session_key, state->master_key,
sizeof(state->master_key));
/*
* We defer initial key generation until mppe_init(), as mppe_alloc()
* is called frequently during negotiation.
*/
return (void *)state;
out_free:
kfree(state->sha1_digest);
if (state->sha1) {
crypto_free_shash(state->sha1->tfm);
kfree_sensitive(state->sha1);
}
kfree(state);
out:
return NULL;
}
/*
* Deallocate space for a (de)compressor.
*/
static void mppe_free(void *arg)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
if (state) {
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kfree_sensitive(state->sha1);
kfree_sensitive(state);
}
}
/*
* Initialize (de)compressor state.
*/
static int
mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
const char *debugstr)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
unsigned char mppe_opts;
if (optlen != CILEN_MPPE ||
options[0] != CI_MPPE || options[1] != CILEN_MPPE)
return 0;
MPPE_CI_TO_OPTS(&options[2], mppe_opts);
if (mppe_opts & MPPE_OPT_128)
state->keylen = 16;
else if (mppe_opts & MPPE_OPT_40)
state->keylen = 8;
else {
printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr,
unit);
return 0;
}
if (mppe_opts & MPPE_OPT_STATEFUL)
state->stateful = 1;
/* Generate the initial session key. */
mppe_rekey(state, 1);
if (debug) {
printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
debugstr, unit, (state->keylen == 16) ? 128 : 40,
(state->stateful) ? "stateful" : "stateless");
printk(KERN_DEBUG
"%s[%d]: keys: master: %*phN initial session: %*phN\n",
debugstr, unit,
(int)sizeof(state->master_key), state->master_key,
(int)sizeof(state->session_key), state->session_key);
}
/*
* Initialize the coherency count. The initial value is not specified
* in RFC 3078, but we can make a reasonable assumption that it will
* start at 0. Setting it to the max here makes the comp/decomp code
* do the right thing (determined through experiment).
*/
state->ccount = MPPE_CCOUNT_SPACE - 1;
/*
* Note that even though we have initialized the key table, we don't
* set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1.
*/
state->bits = MPPE_BIT_ENCRYPTED;
state->unit = unit;
state->debug = debug;
return 1;
}
static int
mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit,
int hdrlen, int debug)
{
/* ARGSUSED */
return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init");
}
/*
* We received a CCP Reset-Request (actually, we are sending a Reset-Ack),
* tell the compressor to rekey. Note that we MUST NOT rekey for
* every CCP Reset-Request; we only rekey on the next xmit packet.
* We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost.
* So, rekeying for every CCP Reset-Request is broken as the peer will not
* know how many times we've rekeyed. (If we rekey and THEN get another
* CCP Reset-Request, we must rekey again.)
*/
static void mppe_comp_reset(void *arg)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
state->bits |= MPPE_BIT_FLUSHED;
}
/*
* Compress (encrypt) a packet.
* It's strange to call this a compressor, since the output is always
* MPPE_OVHD + 2 bytes larger than the input.
*/
static int
mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
int proto;
/*
* Check that the protocol is in the range we handle.
*/
proto = PPP_PROTOCOL(ibuf);
if (proto < 0x0021 || proto > 0x00fa)
return 0;
/* Make sure we have enough room to generate an encrypted packet. */
if (osize < isize + MPPE_OVHD + 2) {
/* Drop the packet if we should encrypt it, but can't. */
printk(KERN_DEBUG "mppe_compress[%d]: osize too small! "
"(have: %d need: %d)\n", state->unit,
osize, osize + MPPE_OVHD + 2);
return -1;
}
osize = isize + MPPE_OVHD + 2;
/*
* Copy over the PPP header and set control bits.
*/
obuf[0] = PPP_ADDRESS(ibuf);
obuf[1] = PPP_CONTROL(ibuf);
put_unaligned_be16(PPP_COMP, obuf + 2);
obuf += PPP_HDRLEN;
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
if (state->debug >= 7)
printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
state->ccount);
put_unaligned_be16(state->ccount, obuf);
if (!state->stateful || /* stateless mode */
((state->ccount & 0xff) == 0xff) || /* "flag" packet */
(state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */
/* We must rekey */
if (state->debug && state->stateful)
printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n",
state->unit);
mppe_rekey(state, 0);
state->bits |= MPPE_BIT_FLUSHED;
}
obuf[0] |= state->bits;
state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */
obuf += MPPE_OVHD;
ibuf += 2; /* skip to proto field */
isize -= 2;
arc4_crypt(&state->arc4, obuf, ibuf, isize);
state->stats.unc_bytes += isize;
state->stats.unc_packets++;
state->stats.comp_bytes += osize;
state->stats.comp_packets++;
return osize;
}
/*
* Since every frame grows by MPPE_OVHD + 2 bytes, this is always going
* to look bad ... and the longer the link is up the worse it will get.
*/
static void mppe_comp_stats(void *arg, struct compstat *stats)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
*stats = state->stats;
}
static int
mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit,
int hdrlen, int mru, int debug)
{
/* ARGSUSED */
return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init");
}
/*
* We received a CCP Reset-Ack. Just ignore it.
*/
static void mppe_decomp_reset(void *arg)
{
/* ARGSUSED */
return;
}
/*
* Decompress (decrypt) an MPPE packet.
*/
static int
mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
if (state->debug)
printk(KERN_DEBUG
"mppe_decompress[%d]: short pkt (%d)\n",
state->unit, isize);
return DECOMP_ERROR;
}
/*
* Make sure we have enough room to decrypt the packet.
* Note that for our test we only subtract 1 byte whereas in
* mppe_compress() we added 2 bytes (+MPPE_OVHD);
* this is to account for possible PFC.
*/
if (osize < isize - MPPE_OVHD - 1) {
printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
"(have: %d need: %d)\n", state->unit,
osize, isize - MPPE_OVHD - 1);
return DECOMP_ERROR;
}
osize = isize - MPPE_OVHD - 2; /* assume no PFC */
ccount = MPPE_CCOUNT(ibuf);
if (state->debug >= 7)
printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n",
state->unit, ccount);
/* sanity checks -- terminate with extreme prejudice */
if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) {
printk(KERN_DEBUG
"mppe_decompress[%d]: ENCRYPTED bit not set!\n",
state->unit);
state->sanity_errors += 100;
goto sanity_error;
}
if (!state->stateful && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
"stateless mode!\n", state->unit);
state->sanity_errors += 100;
goto sanity_error;
}
if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
"flag packet!\n", state->unit);
state->sanity_errors += 100;
goto sanity_error;
}
/*
* Check the coherency count.
*/
if (!state->stateful) {
/* Discard late packet */
if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
> MPPE_CCOUNT_SPACE / 2) {
state->sanity_errors++;
goto sanity_error;
}
/* RFC 3078, sec 8.1. Rekey for every packet. */
while (state->ccount != ccount) {
mppe_rekey(state, 0);
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
}
} else {
/* RFC 3078, sec 8.2. */
if (!state->discard) {
/* normal state */
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
if (ccount != state->ccount) {
/*
* (ccount > state->ccount)
* Packet loss detected, enter the discard state.
* Signal the peer to rekey (by sending a CCP Reset-Request).
*/
state->discard = 1;
return DECOMP_ERROR;
}
} else {
/* discard state */
if (!flushed) {
/* ccp.c will be silent (no additional CCP Reset-Requests). */
return DECOMP_ERROR;
} else {
/* Rekey for every missed "flag" packet. */
while ((ccount & ~0xff) !=
(state->ccount & ~0xff)) {
mppe_rekey(state, 0);
state->ccount =
(state->ccount +
256) % MPPE_CCOUNT_SPACE;
}
/* reset */
state->discard = 0;
state->ccount = ccount;
/*
* Another problem with RFC 3078 here. It implies that the
* peer need not send a Reset-Ack packet. But RFC 1962
* requires it. Hopefully, M$ does send a Reset-Ack; even
* though it isn't required for MPPE synchronization, it is
* required to reset CCP state.
*/
}
}
if (flushed)
mppe_rekey(state, 0);
}
/*
* Fill in the first part of the PPP header. The protocol field
* comes from the decrypted data.
*/
obuf[0] = PPP_ADDRESS(ibuf); /* +1 */
obuf[1] = PPP_CONTROL(ibuf); /* +1 */
obuf += 2;
ibuf += PPP_HDRLEN + MPPE_OVHD;
isize -= PPP_HDRLEN + MPPE_OVHD; /* -6 */
/* net osize: isize-4 */
/*
* Decrypt the first byte in order to check if it is
* a compressed or uncompressed protocol field.
*/
arc4_crypt(&state->arc4, obuf, ibuf, 1);
/*
* Do PFC decompression.
* This would be nicer if we were given the actual sk_buff
* instead of a char *.
*/
if ((obuf[0] & 0x01) != 0) {
obuf[1] = obuf[0];
obuf[0] = 0;
obuf++;
osize++;
}
/* And finally, decrypt the rest of the packet. */
arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1);
state->stats.unc_bytes += osize;
state->stats.unc_packets++;
state->stats.comp_bytes += isize;
state->stats.comp_packets++;
/* good packet credit */
state->sanity_errors >>= 1;
return osize;
sanity_error:
if (state->sanity_errors < SANITY_MAX)
return DECOMP_ERROR;
else
/* Take LCP down if the peer is sending too many bogons.
* We don't want to do this for a single or just a few
* instances since it could just be due to packet corruption.
*/
return DECOMP_FATALERROR;
}
/*
* Incompressible data has arrived (this should never happen!).
* We should probably drop the link if the protocol is in the range
* of what should be encrypted. At the least, we should drop this
* packet. (How to do this?)
*/
static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
if (state->debug &&
(PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa))
printk(KERN_DEBUG
"mppe_incomp[%d]: incompressible (unencrypted) data! "
"(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf));
state->stats.inc_bytes += icnt;
state->stats.inc_packets++;
state->stats.unc_bytes += icnt;
state->stats.unc_packets++;
}
/*************************************************************
* Module interface table
*************************************************************/
/*
* Procedures exported to if_ppp.c.
*/
static struct compressor ppp_mppe = {
.compress_proto = CI_MPPE,
.comp_alloc = mppe_alloc,
.comp_free = mppe_free,
.comp_init = mppe_comp_init,
.comp_reset = mppe_comp_reset,
.compress = mppe_compress,
.comp_stat = mppe_comp_stats,
.decomp_alloc = mppe_alloc,
.decomp_free = mppe_free,
.decomp_init = mppe_decomp_init,
.decomp_reset = mppe_decomp_reset,
.decompress = mppe_decompress,
.incomp = mppe_incomp,
.decomp_stat = mppe_comp_stats,
.owner = THIS_MODULE,
.comp_extra = MPPE_PAD,
};
/*
* ppp_mppe_init()
*
* Prior to allowing load, try to load the arc4 and sha1 crypto
* libraries. The actual use will be allocated later, but
* this way the module will fail to insmod if they aren't available.
*/
static int __init ppp_mppe_init(void)
{
int answer;
if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
return -ENODEV;
sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
if (!sha_pad)
return -ENOMEM;
sha_pad_init(sha_pad);
answer = ppp_register_compressor(&ppp_mppe);
if (answer == 0)
printk(KERN_INFO "PPP MPPE Compression module registered\n");
else
kfree(sha_pad);
return answer;
}
static void __exit ppp_mppe_cleanup(void)
{
ppp_unregister_compressor(&ppp_mppe);
kfree(sha_pad);
}
module_init(ppp_mppe_init);
module_exit(ppp_mppe_cleanup);
| linux-master | drivers/net/ppp/ppp_mppe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PPP async serial channel driver for Linux.
*
* Copyright 1999 Paul Mackerras.
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames over async serial lines. It relies on
* the generic PPP layer to give it frames to send and to process
* received frames. It implements the PPP line discipline.
*
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/tty.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/crc-ccitt.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <asm/string.h>
#define PPP_VERSION "2.4.2"
#define OBUFSIZE 4096
/* Structure for storing local state. */
struct asyncppp {
struct tty_struct *tty;
unsigned int flags;
unsigned int state;
unsigned int rbits;
int mru;
spinlock_t xmit_lock;
spinlock_t recv_lock;
unsigned long xmit_flags;
u32 xaccm[8];
u32 raccm;
unsigned int bytes_sent;
unsigned int bytes_rcvd;
struct sk_buff *tpkt;
int tpkt_pos;
u16 tfcs;
unsigned char *optr;
unsigned char *olim;
unsigned long last_xmit;
struct sk_buff *rpkt;
int lcp_fcs;
struct sk_buff_head rqueue;
struct tasklet_struct tsk;
refcount_t refcnt;
struct completion dead;
struct ppp_channel chan; /* interface to generic ppp layer */
unsigned char obuf[OBUFSIZE];
};
/* Bit numbers in xmit_flags */
#define XMIT_WAKEUP 0
#define XMIT_FULL 1
#define XMIT_BUSY 2
/* State bits */
#define SC_TOSS 1
#define SC_ESCAPE 2
#define SC_PREV_ERROR 4
/* Bits in rbits */
#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
static int flag_time = HZ;
module_param(flag_time, int, 0);
MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_PPP);
/*
* Prototypes.
*/
static int ppp_async_encode(struct asyncppp *ap);
static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_async_push(struct asyncppp *ap);
static void ppp_async_flush_output(struct asyncppp *ap);
static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
const u8 *flags, int count);
static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
static void ppp_async_process(struct tasklet_struct *t);
static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
int len, int inbound);
static const struct ppp_channel_ops async_ops = {
.start_xmit = ppp_async_send,
.ioctl = ppp_async_ioctl,
};
/*
* Routines implementing the PPP line discipline.
*/
/*
* We have a potential race on dereferencing tty->disc_data,
* because the tty layer provides no locking at all - thus one
* cpu could be running ppp_asynctty_receive while another
* calls ppp_asynctty_close, which zeroes tty->disc_data and
* frees the memory that ppp_asynctty_receive is using. The best
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*
* FIXME: this is no longer true. The _close path for the ldisc is
* now guaranteed to be sane.
*/
static DEFINE_RWLOCK(disc_data_lock);
static struct asyncppp *ap_get(struct tty_struct *tty)
{
struct asyncppp *ap;
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void ap_put(struct asyncppp *ap)
{
if (refcount_dec_and_test(&ap->refcnt))
complete(&ap->dead);
}
/*
* Called when a tty is put into PPP line discipline. Called in process
* context.
*/
static int
ppp_asynctty_open(struct tty_struct *tty)
{
struct asyncppp *ap;
int err;
int speed;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
err = -ENOMEM;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
goto out;
/* initialize the asyncppp structure */
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
spin_lock_init(&ap->recv_lock);
ap->xaccm[0] = ~0U;
ap->xaccm[3] = 0x60000000U;
ap->raccm = ~0U;
ap->optr = ap->obuf;
ap->olim = ap->obuf;
ap->lcp_fcs = -1;
skb_queue_head_init(&ap->rqueue);
tasklet_setup(&ap->tsk, ppp_async_process);
refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead);
ap->chan.private = ap;
ap->chan.ops = &async_ops;
ap->chan.mtu = PPP_MRU;
speed = tty_get_baud_rate(tty);
ap->chan.speed = speed;
err = ppp_register_channel(&ap->chan);
if (err)
goto out_free;
tty->disc_data = ap;
tty->receive_room = 65536;
return 0;
out_free:
kfree(ap);
out:
return err;
}
/*
* Called when the tty is put into another line discipline
* or it hangs up. We have to wait for any cpu currently
* executing in any of the other ppp_asynctty_* routines to
* finish before we can call ppp_unregister_channel and free
* the asyncppp struct. This routine must be called from
* process context, not interrupt or softirq context.
*/
static void
ppp_asynctty_close(struct tty_struct *tty)
{
struct asyncppp *ap;
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!ap)
return;
/*
* We have now ensured that nobody can start using ap from now
* on, but we have to wait for all existing users to finish.
* Note that ppp_unregister_channel ensures that no calls to
* our channel ops (i.e. ppp_async_send/ioctl) are in progress
* by the time it returns.
*/
if (!refcount_dec_and_test(&ap->refcnt))
wait_for_completion(&ap->dead);
tasklet_kill(&ap->tsk);
ppp_unregister_channel(&ap->chan);
kfree_skb(ap->rpkt);
skb_queue_purge(&ap->rqueue);
kfree_skb(ap->tpkt);
kfree(ap);
}
/*
* Called on tty hangup in process context.
*
* Wait for I/O to driver to complete and unregister PPP channel.
* This is already done by the close routine, so just call that.
*/
static void ppp_asynctty_hangup(struct tty_struct *tty)
{
ppp_asynctty_close(tty);
}
/*
* Read does nothing - no data is ever available this way.
* Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
ppp_asynctty_read(struct tty_struct *tty, struct file *file, u8 *buf,
size_t count, void **cookie, unsigned long offset)
{
return -EAGAIN;
}
/*
* Write on the tty does nothing, the packets all come in
* from the ppp generic stuff.
*/
static ssize_t
ppp_asynctty_write(struct tty_struct *tty, struct file *file, const u8 *buf,
size_t count)
{
return -EAGAIN;
}
/*
* Called in process context only. May be re-entered by multiple
* ioctl calling threads.
*/
static int
ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = ap_get(tty);
int err, val;
int __user *p = (int __user *)arg;
if (!ap)
return -ENXIO;
err = -EFAULT;
switch (cmd) {
case PPPIOCGCHAN:
err = -EFAULT;
if (put_user(ppp_channel_index(&ap->chan), p))
break;
err = 0;
break;
case PPPIOCGUNIT:
err = -EFAULT;
if (put_user(ppp_unit_number(&ap->chan), p))
break;
err = 0;
break;
case TCFLSH:
/* flush our buffers and the serial port's buffer */
if (arg == TCIOFLUSH || arg == TCOFLUSH)
ppp_async_flush_output(ap);
err = n_tty_ioctl_helper(tty, cmd, arg);
break;
case FIONREAD:
val = 0;
if (put_user(val, p))
break;
err = 0;
break;
default:
/* Try the various mode ioctls */
err = tty_mode_ioctl(tty, cmd, arg);
}
ap_put(ap);
return err;
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
ppp_asynctty_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags,
size_t count)
{
struct asyncppp *ap = ap_get(tty);
unsigned long flags;
if (!ap)
return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
ap_put(ap);
tty_unthrottle(tty);
}
static void
ppp_asynctty_wakeup(struct tty_struct *tty)
{
struct asyncppp *ap = ap_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (!ap)
return;
set_bit(XMIT_WAKEUP, &ap->xmit_flags);
tasklet_schedule(&ap->tsk);
ap_put(ap);
}
static struct tty_ldisc_ops ppp_ldisc = {
.owner = THIS_MODULE,
.num = N_PPP,
.name = "ppp",
.open = ppp_asynctty_open,
.close = ppp_asynctty_close,
.hangup = ppp_asynctty_hangup,
.read = ppp_asynctty_read,
.write = ppp_asynctty_write,
.ioctl = ppp_asynctty_ioctl,
.receive_buf = ppp_asynctty_receive,
.write_wakeup = ppp_asynctty_wakeup,
};
static int __init
ppp_async_init(void)
{
int err;
err = tty_register_ldisc(&ppp_ldisc);
if (err != 0)
printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
err);
return err;
}
/*
* The following routines provide the PPP channel interface.
*/
static int
ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = chan->private;
void __user *argp = (void __user *)arg;
int __user *p = argp;
int err, val;
u32 accm[8];
err = -EFAULT;
switch (cmd) {
case PPPIOCGFLAGS:
val = ap->flags | ap->rbits;
if (put_user(val, p))
break;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, p))
break;
ap->flags = val & ~SC_RCV_BITS;
spin_lock_irq(&ap->recv_lock);
ap->rbits = val & SC_RCV_BITS;
spin_unlock_irq(&ap->recv_lock);
err = 0;
break;
case PPPIOCGASYNCMAP:
if (put_user(ap->xaccm[0], (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCSASYNCMAP:
if (get_user(ap->xaccm[0], (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCGRASYNCMAP:
if (put_user(ap->raccm, (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCSRASYNCMAP:
if (get_user(ap->raccm, (u32 __user *)argp))
break;
err = 0;
break;
case PPPIOCGXASYNCMAP:
if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
break;
err = 0;
break;
case PPPIOCSXASYNCMAP:
if (copy_from_user(accm, argp, sizeof(accm)))
break;
accm[2] &= ~0x40000000U; /* can't escape 0x5e */
accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
err = 0;
break;
case PPPIOCGMRU:
if (put_user(ap->mru, p))
break;
err = 0;
break;
case PPPIOCSMRU:
if (get_user(val, p))
break;
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
err = 0;
break;
default:
err = -ENOTTY;
}
return err;
}
/*
* This is called at softirq level to deliver received packets
* to the ppp_generic code, and to tell the ppp_generic code
* if we can accept more output now.
*/
static void ppp_async_process(struct tasklet_struct *t)
{
struct asyncppp *ap = from_tasklet(ap, t, tsk);
struct sk_buff *skb;
/* process received packets */
while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
if (skb->cb[0])
ppp_input_error(&ap->chan, 0);
ppp_input(&ap->chan, skb);
}
/* try to push more stuff out */
if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
ppp_output_wakeup(&ap->chan);
}
/*
* Procedures for encapsulation and framing.
*/
/*
* Procedure to encode the data for async serial transmission.
* Does octet stuffing (escaping), puts the address/control bytes
* on if A/C compression is disabled, and does protocol compression.
* Assumes ap->tpkt != 0 on entry.
* Returns 1 if we finished the current frame, 0 otherwise.
*/
#define PUT_BYTE(ap, buf, c, islcp) do { \
if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
*buf++ = PPP_ESCAPE; \
*buf++ = c ^ PPP_TRANS; \
} else \
*buf++ = c; \
} while (0)
static int
ppp_async_encode(struct asyncppp *ap)
{
int fcs, i, count, c, proto;
unsigned char *buf, *buflim;
unsigned char *data;
int islcp;
buf = ap->obuf;
ap->olim = buf;
ap->optr = buf;
i = ap->tpkt_pos;
data = ap->tpkt->data;
count = ap->tpkt->len;
fcs = ap->tfcs;
proto = get_unaligned_be16(data);
/*
* LCP packets with code values between 1 (configure-reqest)
* and 7 (code-reject) must be sent as though no options
* had been negotiated.
*/
islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
if (i == 0) {
if (islcp)
async_lcp_peek(ap, data, count, 0);
/*
* Start of a new packet - insert the leading FLAG
* character if necessary.
*/
if (islcp || flag_time == 0 ||
time_after_eq(jiffies, ap->last_xmit + flag_time))
*buf++ = PPP_FLAG;
ap->last_xmit = jiffies;
fcs = PPP_INITFCS;
/*
* Put in the address/control bytes if necessary
*/
if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
PUT_BYTE(ap, buf, 0xff, islcp);
fcs = PPP_FCS(fcs, 0xff);
PUT_BYTE(ap, buf, 0x03, islcp);
fcs = PPP_FCS(fcs, 0x03);
}
}
/*
* Once we put in the last byte, we need to put in the FCS
* and closing flag, so make sure there is at least 7 bytes
* of free space in the output buffer.
*/
buflim = ap->obuf + OBUFSIZE - 6;
while (i < count && buf < buflim) {
c = data[i++];
if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
continue; /* compress protocol field */
fcs = PPP_FCS(fcs, c);
PUT_BYTE(ap, buf, c, islcp);
}
if (i < count) {
/*
* Remember where we are up to in this packet.
*/
ap->olim = buf;
ap->tpkt_pos = i;
ap->tfcs = fcs;
return 0;
}
/*
* We have finished the packet. Add the FCS and flag.
*/
fcs = ~fcs;
c = fcs & 0xff;
PUT_BYTE(ap, buf, c, islcp);
c = (fcs >> 8) & 0xff;
PUT_BYTE(ap, buf, c, islcp);
*buf++ = PPP_FLAG;
ap->olim = buf;
consume_skb(ap->tpkt);
ap->tpkt = NULL;
return 1;
}
/*
* Transmit-side routines.
*/
/*
* Send a packet to the peer over an async tty line.
* Returns 1 iff the packet was accepted.
* If the packet was not accepted, we will call ppp_output_wakeup
* at some later time.
*/
static int
ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct asyncppp *ap = chan->private;
ppp_async_push(ap);
if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
return 0; /* already full */
ap->tpkt = skb;
ap->tpkt_pos = 0;
ppp_async_push(ap);
return 1;
}
/*
* Push as much data as possible out to the tty.
*/
static int
ppp_async_push(struct asyncppp *ap)
{
int avail, sent, done = 0;
struct tty_struct *tty = ap->tty;
int tty_stuffed = 0;
/*
* We can get called recursively here if the tty write
* function calls our wakeup function. This can happen
* for example on a pty with both the master and slave
* set to PPP line discipline.
* We use the XMIT_BUSY bit to detect this and get out,
* leaving the XMIT_WAKEUP bit set to tell the other
* instance that it may now be able to write more now.
*/
if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
return 0;
spin_lock_bh(&ap->xmit_lock);
for (;;) {
if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
tty_stuffed = 0;
if (!tty_stuffed && ap->optr < ap->olim) {
avail = ap->olim - ap->optr;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sent = tty->ops->write(tty, ap->optr, avail);
if (sent < 0)
goto flush; /* error, e.g. loss of CD */
ap->optr += sent;
if (sent < avail)
tty_stuffed = 1;
continue;
}
if (ap->optr >= ap->olim && ap->tpkt) {
if (ppp_async_encode(ap)) {
/* finished processing ap->tpkt */
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
continue;
}
/*
* We haven't made any progress this time around.
* Clear XMIT_BUSY to let other callers in, but
* after doing so we have to check if anyone set
* XMIT_WAKEUP since we last checked it. If they
* did, we should try again to set XMIT_BUSY and go
* around again in case XMIT_BUSY was still set when
* the other caller tried.
*/
clear_bit(XMIT_BUSY, &ap->xmit_flags);
/* any more work to do? if not, exit the loop */
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
(!tty_stuffed && ap->tpkt)))
break;
/* more work to do, see if we can do it now */
if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
break;
}
spin_unlock_bh(&ap->xmit_lock);
return done;
flush:
clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
ap->optr = ap->olim;
spin_unlock_bh(&ap->xmit_lock);
return done;
}
/*
* Flush output from our internal buffers.
* Called for the TCFLSH ioctl. Can be entered in parallel
* but this is covered by the xmit_lock.
*/
static void
ppp_async_flush_output(struct asyncppp *ap)
{
int done = 0;
spin_lock_bh(&ap->xmit_lock);
ap->optr = ap->olim;
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_unlock_bh(&ap->xmit_lock);
if (done)
ppp_output_wakeup(&ap->chan);
}
/*
* Receive-side routines.
*/
/* see how many ordinary chars there are at the start of buf */
static inline int
scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
{
int i, c;
for (i = 0; i < count; ++i) {
c = buf[i];
if (c == PPP_ESCAPE || c == PPP_FLAG ||
(c < 0x20 && (ap->raccm & (1 << c)) != 0))
break;
}
return i;
}
/* called when a flag is seen - do end-of-packet processing */
static void
process_input_packet(struct asyncppp *ap)
{
struct sk_buff *skb;
unsigned char *p;
unsigned int len, fcs;
skb = ap->rpkt;
if (ap->state & (SC_TOSS | SC_ESCAPE))
goto err;
if (skb == NULL)
return; /* 0-length packet */
/* check the FCS */
p = skb->data;
len = skb->len;
if (len < 3)
goto err; /* too short */
fcs = PPP_INITFCS;
for (; len > 0; --len)
fcs = PPP_FCS(fcs, *p++);
if (fcs != PPP_GOODFCS)
goto err; /* bad FCS */
skb_trim(skb, skb->len - 2);
/* check for address/control and protocol compression */
p = skb->data;
if (p[0] == PPP_ALLSTATIONS) {
/* chop off address/control */
if (p[1] != PPP_UI || skb->len < 3)
goto err;
p = skb_pull(skb, 2);
}
/* If protocol field is not compressed, it can be LCP packet */
if (!(p[0] & 0x01)) {
unsigned int proto;
if (skb->len < 2)
goto err;
proto = (p[0] << 8) + p[1];
if (proto == PPP_LCP)
async_lcp_peek(ap, p, skb->len, 1);
}
/* queue the frame to be processed */
skb->cb[0] = ap->state;
skb_queue_tail(&ap->rqueue, skb);
ap->rpkt = NULL;
ap->state = 0;
return;
err:
/* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
ap->state = SC_PREV_ERROR;
if (skb) {
/* make skb appear as freshly allocated */
skb_trim(skb, 0);
skb_reserve(skb, - skb_headroom(skb));
}
}
/* Called when the tty driver has data for us. Runs parallel with the
other ldisc functions but will not be re-entered */
static void
ppp_async_input(struct asyncppp *ap, const u8 *buf, const u8 *flags, int count)
{
struct sk_buff *skb;
int c, i, j, n, s, f;
unsigned char *sp;
/* update bits used for 8-bit cleanness detection */
if (~ap->rbits & SC_RCV_BITS) {
s = 0;
for (i = 0; i < count; ++i) {
c = buf[i];
if (flags && flags[i] != 0)
continue;
s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
c = ((c >> 4) ^ c) & 0xf;
s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
}
ap->rbits |= s;
}
while (count > 0) {
/* scan through and see how many chars we can do in bulk */
if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
n = 1;
else
n = scan_ordinary(ap, buf, count);
f = 0;
if (flags && (ap->state & SC_TOSS) == 0) {
/* check the flags to see if any char had an error */
for (j = 0; j < n; ++j)
if ((f = flags[j]) != 0)
break;
}
if (f != 0) {
/* start tossing */
ap->state |= SC_TOSS;
} else if (n > 0 && (ap->state & SC_TOSS) == 0) {
/* stuff the chars in the skb */
skb = ap->rpkt;
if (!skb) {
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb)
goto nomem;
ap->rpkt = skb;
}
if (skb->len == 0) {
/* Try to get the payload 4-byte aligned.
* This should match the
* PPP_ALLSTATIONS/PPP_UI/compressed tests in
* process_input_packet, but we do not have
* enough chars here to test buf[1] and buf[2].
*/
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
}
if (n > skb_tailroom(skb)) {
/* packet overflowed MRU */
ap->state |= SC_TOSS;
} else {
sp = skb_put_data(skb, buf, n);
if (ap->state & SC_ESCAPE) {
sp[0] ^= PPP_TRANS;
ap->state &= ~SC_ESCAPE;
}
}
}
if (n >= count)
break;
c = buf[n];
if (flags != NULL && flags[n] != 0) {
ap->state |= SC_TOSS;
} else if (c == PPP_FLAG) {
process_input_packet(ap);
} else if (c == PPP_ESCAPE) {
ap->state |= SC_ESCAPE;
} else if (I_IXON(ap->tty)) {
if (c == START_CHAR(ap->tty))
start_tty(ap->tty);
else if (c == STOP_CHAR(ap->tty))
stop_tty(ap->tty);
}
/* otherwise it's a char in the recv ACCM */
++n;
buf += n;
if (flags)
flags += n;
count -= n;
}
return;
nomem:
printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
ap->state |= SC_TOSS;
}
/*
* We look at LCP frames going past so that we can notice
* and react to the LCP configure-ack from the peer.
* In the situation where the peer has been sent a configure-ack
* already, LCP is up once it has sent its configure-ack
* so the immediately following packet can be sent with the
* configured LCP options. This allows us to process the following
* packet correctly without pppd needing to respond quickly.
*
* We only respond to the received configure-ack if we have just
* sent a configure-request, and the configure-ack contains the
* same data (this is checked using a 16-bit crc of the data).
*/
#define CONFREQ 1 /* LCP code field values */
#define CONFACK 2
#define LCP_MRU 1 /* LCP option numbers */
#define LCP_ASYNCMAP 2
static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
int len, int inbound)
{
int dlen, fcs, i, code;
u32 val;
data += 2; /* skip protocol bytes */
len -= 2;
if (len < 4) /* 4 = code, ID, length */
return;
code = data[0];
if (code != CONFACK && code != CONFREQ)
return;
dlen = get_unaligned_be16(data + 2);
if (len < dlen)
return; /* packet got truncated or length is bogus */
if (code == (inbound? CONFACK: CONFREQ)) {
/*
* sent confreq or received confack:
* calculate the crc of the data from the ID field on.
*/
fcs = PPP_INITFCS;
for (i = 1; i < dlen; ++i)
fcs = PPP_FCS(fcs, data[i]);
if (!inbound) {
/* outbound confreq - remember the crc for later */
ap->lcp_fcs = fcs;
return;
}
/* received confack, check the crc */
fcs ^= ap->lcp_fcs;
ap->lcp_fcs = -1;
if (fcs != 0)
return;
} else if (inbound)
return; /* not interested in received confreq */
/* process the options in the confack */
data += 4;
dlen -= 4;
/* data[0] is code, data[1] is length */
while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) {
case LCP_MRU:
val = get_unaligned_be16(data + 2);
if (inbound)
ap->mru = val;
else
ap->chan.mtu = val;
break;
case LCP_ASYNCMAP:
val = get_unaligned_be32(data + 2);
if (inbound)
ap->raccm = val;
else
ap->xaccm[0] = val;
break;
}
dlen -= data[1];
data += data[1];
}
}
static void __exit ppp_async_cleanup(void)
{
tty_unregister_ldisc(&ppp_ldisc);
}
module_init(ppp_async_init);
module_exit(ppp_async_cleanup);
| linux-master | drivers/net/ppp/ppp_async.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2014 Mahesh Bandewar <[email protected]>
*/
#include "ipvlan.h"
static unsigned int ipvlan_netid __read_mostly;
struct ipvlan_netns {
unsigned int ipvl_nf_hook_refcnt;
};
static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
struct net_device *dev)
{
struct ipvl_addr *addr = NULL;
struct ipvl_port *port;
int addr_type;
void *lyr3h;
if (!dev || !netif_is_ipvlan_port(dev))
goto out;
port = ipvlan_port_get_rcu(dev);
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
out:
return addr;
}
static struct sk_buff *ipvlan_l3_rcv(struct net_device *dev,
struct sk_buff *skb, u16 proto)
{
struct ipvl_addr *addr;
struct net_device *sdev;
addr = ipvlan_skb_to_addr(skb, dev);
if (!addr)
goto out;
sdev = addr->master->dev;
switch (proto) {
case AF_INET:
{
struct iphdr *ip4h = ip_hdr(skb);
int err;
err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
ip4h->tos, sdev);
if (unlikely(err))
goto out;
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
{
struct dst_entry *dst;
struct ipv6hdr *ip6h = ipv6_hdr(skb);
int flags = RT6_LOOKUP_F_HAS_SADDR;
struct flowi6 fl6 = {
.flowi6_iif = sdev->ifindex,
.daddr = ip6h->daddr,
.saddr = ip6h->saddr,
.flowlabel = ip6_flowinfo(ip6h),
.flowi6_mark = skb->mark,
.flowi6_proto = ip6h->nexthdr,
};
skb_dst_drop(skb);
dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6,
skb, flags);
skb_dst_set(skb, dst);
break;
}
#endif
default:
break;
}
out:
return skb;
}
static const struct l3mdev_ops ipvl_l3mdev_ops = {
.l3mdev_l3_rcv = ipvlan_l3_rcv,
};
static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct ipvl_addr *addr;
unsigned int len;
addr = ipvlan_skb_to_addr(skb, skb->dev);
if (!addr)
goto out;
skb->dev = addr->master->dev;
skb->skb_iif = skb->dev->ifindex;
#if IS_ENABLED(CONFIG_IPV6)
if (addr->atype == IPVL_IPV6)
IP6CB(skb)->iif = skb->dev->ifindex;
#endif
len = skb->len + ETH_HLEN;
ipvlan_count_rx(addr->master, len, true, false);
out:
return NF_ACCEPT;
}
static const struct nf_hook_ops ipvl_nfops[] = {
{
.hook = ipvlan_nf_input,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = INT_MAX,
},
#if IS_ENABLED(CONFIG_IPV6)
{
.hook = ipvlan_nf_input,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = INT_MAX,
},
#endif
};
static int ipvlan_register_nf_hook(struct net *net)
{
struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
int err = 0;
if (!vnet->ipvl_nf_hook_refcnt) {
err = nf_register_net_hooks(net, ipvl_nfops,
ARRAY_SIZE(ipvl_nfops));
if (!err)
vnet->ipvl_nf_hook_refcnt = 1;
} else {
vnet->ipvl_nf_hook_refcnt++;
}
return err;
}
static void ipvlan_unregister_nf_hook(struct net *net)
{
struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
if (WARN_ON(!vnet->ipvl_nf_hook_refcnt))
return;
vnet->ipvl_nf_hook_refcnt--;
if (!vnet->ipvl_nf_hook_refcnt)
nf_unregister_net_hooks(net, ipvl_nfops,
ARRAY_SIZE(ipvl_nfops));
}
void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet)
{
struct ipvlan_netns *old_vnet;
ASSERT_RTNL();
old_vnet = net_generic(oldnet, ipvlan_netid);
if (!old_vnet->ipvl_nf_hook_refcnt)
return;
ipvlan_register_nf_hook(newnet);
ipvlan_unregister_nf_hook(oldnet);
}
static void ipvlan_ns_exit(struct net *net)
{
struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) {
vnet->ipvl_nf_hook_refcnt = 0;
nf_unregister_net_hooks(net, ipvl_nfops,
ARRAY_SIZE(ipvl_nfops));
}
}
static struct pernet_operations ipvlan_net_ops = {
.id = &ipvlan_netid,
.size = sizeof(struct ipvlan_netns),
.exit = ipvlan_ns_exit,
};
int ipvlan_l3s_init(void)
{
return register_pernet_subsys(&ipvlan_net_ops);
}
void ipvlan_l3s_cleanup(void)
{
unregister_pernet_subsys(&ipvlan_net_ops);
}
int ipvlan_l3s_register(struct ipvl_port *port)
{
struct net_device *dev = port->dev;
int ret;
ASSERT_RTNL();
ret = ipvlan_register_nf_hook(read_pnet(&port->pnet));
if (!ret) {
dev->l3mdev_ops = &ipvl_l3mdev_ops;
dev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
}
return ret;
}
void ipvlan_l3s_unregister(struct ipvl_port *port)
{
struct net_device *dev = port->dev;
ASSERT_RTNL();
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
dev->l3mdev_ops = NULL;
}
| linux-master | drivers/net/ipvlan/ipvlan_l3s.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2014 Mahesh Bandewar <[email protected]>
*/
#include <linux/ethtool.h>
#include "ipvlan.h"
static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan;
unsigned int flags;
int err;
ASSERT_RTNL();
if (port->mode != nval) {
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
flags = ipvlan->dev->flags;
if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
err = dev_change_flags(ipvlan->dev,
flags | IFF_NOARP,
extack);
} else {
err = dev_change_flags(ipvlan->dev,
flags & ~IFF_NOARP,
extack);
}
if (unlikely(err))
goto fail;
}
if (nval == IPVLAN_MODE_L3S) {
/* New mode is L3S */
err = ipvlan_l3s_register(port);
if (err)
goto fail;
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
ipvlan_l3s_unregister(port);
}
port->mode = nval;
}
return 0;
fail:
/* Undo the flags changes that have been done so far. */
list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
flags = ipvlan->dev->flags;
if (port->mode == IPVLAN_MODE_L3 ||
port->mode == IPVLAN_MODE_L3S)
dev_change_flags(ipvlan->dev, flags | IFF_NOARP,
NULL);
else
dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP,
NULL);
}
return err;
}
static int ipvlan_port_create(struct net_device *dev)
{
struct ipvl_port *port;
int err, idx;
port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
write_pnet(&port->pnet, dev_net(dev));
port->dev = dev;
port->mode = IPVLAN_MODE_L3;
INIT_LIST_HEAD(&port->ipvlans);
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
ida_init(&port->ida);
port->dev_id_start = 1;
err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
if (err)
goto err;
netdev_hold(dev, &port->dev_tracker, GFP_KERNEL);
return 0;
err:
kfree(port);
return err;
}
static void ipvlan_port_destroy(struct net_device *dev)
{
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
struct sk_buff *skb;
netdev_put(dev, &port->dev_tracker);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_l3s_unregister(port);
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
dev_put(skb->dev);
kfree_skb(skb);
}
ida_destroy(&port->ida);
kfree(port);
}
#define IPVLAN_ALWAYS_ON_OFLOADS \
(NETIF_F_SG | NETIF_F_HW_CSUM | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
#define IPVLAN_ALWAYS_ON \
(IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
/* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_port *port;
int err;
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
dev->features |= IPVLAN_ALWAYS_ON;
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
netif_inherit_tso_max(dev, phy_dev);
dev->hard_header_len = phy_dev->hard_header_len;
netdev_lockdep_set_classes(dev);
ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
if (!netif_is_ipvlan_port(phy_dev)) {
err = ipvlan_port_create(phy_dev);
if (err < 0) {
free_percpu(ipvlan->pcpu_stats);
return err;
}
}
port = ipvlan_port_get_rtnl(phy_dev);
port->count += 1;
return 0;
}
static void ipvlan_uninit(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_port *port;
free_percpu(ipvlan->pcpu_stats);
port = ipvlan_port_get_rtnl(phy_dev);
port->count -= 1;
if (!port->count)
ipvlan_port_destroy(port->dev);
}
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
ipvlan->port->mode == IPVLAN_MODE_L3S)
dev->flags |= IFF_NOARP;
else
dev->flags &= ~IFF_NOARP;
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
rcu_read_unlock();
return 0;
}
static int ipvlan_stop(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
rcu_read_unlock();
return 0;
}
static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
int skblen = skb->len;
int ret;
ret = ipvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct ipvl_pcpu_stats *pcptr;
pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
u64_stats_update_begin(&pcptr->syncp);
u64_stats_inc(&pcptr->tx_pkts);
u64_stats_add(&pcptr->tx_bytes, skblen);
u64_stats_update_end(&pcptr->syncp);
} else {
this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
}
return ret;
}
static netdev_features_t ipvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
features |= NETIF_F_ALL_FOR_ALL;
features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
features = netdev_increment_features(ipvlan->phy_dev->features,
features, features);
features |= IPVLAN_ALWAYS_ON;
features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
return features;
}
static void ipvlan_change_rx_flags(struct net_device *dev, int change)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
if (change & IFF_ALLMULTI)
dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
}
static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE);
} else {
struct netdev_hw_addr *ha;
DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE);
bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_for_each_mc_addr(ha, dev)
__set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
/* Turn-on broadcast bit irrespective of address family,
* since broadcast is deferred to a work-queue, hence no
* impact on fast-path processing.
*/
__set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
bitmap_copy(ipvlan->mac_filters, mc_filters,
IPVLAN_MAC_FILTER_SIZE);
}
dev_uc_sync(ipvlan->phy_dev, dev);
dev_mc_sync(ipvlan->phy_dev, dev);
}
static void ipvlan_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
if (ipvlan->pcpu_stats) {
struct ipvl_pcpu_stats *pcptr;
u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes;
u32 rx_errs = 0, tx_drps = 0;
u32 strt;
int idx;
for_each_possible_cpu(idx) {
pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
do {
strt = u64_stats_fetch_begin(&pcptr->syncp);
rx_pkts = u64_stats_read(&pcptr->rx_pkts);
rx_bytes = u64_stats_read(&pcptr->rx_bytes);
rx_mcast = u64_stats_read(&pcptr->rx_mcast);
tx_pkts = u64_stats_read(&pcptr->tx_pkts);
tx_bytes = u64_stats_read(&pcptr->tx_bytes);
} while (u64_stats_fetch_retry(&pcptr->syncp,
strt));
s->rx_packets += rx_pkts;
s->rx_bytes += rx_bytes;
s->multicast += rx_mcast;
s->tx_packets += tx_pkts;
s->tx_bytes += tx_bytes;
/* u32 values are updated without syncp protection. */
rx_errs += READ_ONCE(pcptr->rx_errs);
tx_drps += READ_ONCE(pcptr->tx_drps);
}
s->rx_errors = rx_errs;
s->rx_dropped = rx_errs;
s->tx_dropped = tx_drps;
}
}
static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
return vlan_vid_add(phy_dev, proto, vid);
}
static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
vlan_vid_del(phy_dev, proto, vid);
return 0;
}
static int ipvlan_get_iflink(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
return ipvlan->phy_dev->ifindex;
}
static const struct net_device_ops ipvlan_netdev_ops = {
.ndo_init = ipvlan_init,
.ndo_uninit = ipvlan_uninit,
.ndo_open = ipvlan_open,
.ndo_stop = ipvlan_stop,
.ndo_start_xmit = ipvlan_start_xmit,
.ndo_fix_features = ipvlan_fix_features,
.ndo_change_rx_flags = ipvlan_change_rx_flags,
.ndo_set_rx_mode = ipvlan_set_multicast_mac_filter,
.ndo_get_stats64 = ipvlan_get_stats64,
.ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
.ndo_get_iflink = ipvlan_get_iflink,
};
static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
/* TODO Probably use a different field than dev_addr so that the
* mac-address on the virtual device is portable and can be carried
* while the packets use the mac-addr on the physical device.
*/
return dev_hard_header(skb, phy_dev, type, daddr,
saddr ? : phy_dev->dev_addr, len);
}
static const struct header_ops ipvlan_header_ops = {
.create = ipvlan_hard_header,
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
};
static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
{
ipvlan->dev->mtu = dev->mtu;
}
static bool netif_is_ipvlan(const struct net_device *dev)
{
/* both ipvlan and ipvtap devices use the same netdev_ops */
return dev->netdev_ops == &ipvlan_netdev_ops;
}
static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
return __ethtool_get_link_ksettings(ipvlan->phy_dev, cmd);
}
static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
strscpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
}
static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
return ipvlan->msg_enable;
}
static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
ipvlan->msg_enable = value;
}
static const struct ethtool_ops ipvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = ipvlan_ethtool_get_link_ksettings,
.get_drvinfo = ipvlan_ethtool_get_drvinfo,
.get_msglevel = ipvlan_ethtool_get_msglevel,
.set_msglevel = ipvlan_ethtool_set_msglevel,
};
static int ipvlan_nl_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
int err = 0;
if (!data)
return 0;
if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
err = ipvlan_set_port_mode(port, nmode, extack);
}
if (!err && data[IFLA_IPVLAN_FLAGS]) {
u16 flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
if (flags & IPVLAN_F_PRIVATE)
ipvlan_mark_private(port);
else
ipvlan_clear_private(port);
if (flags & IPVLAN_F_VEPA)
ipvlan_mark_vepa(port);
else
ipvlan_clear_vepa(port);
}
return err;
}
static size_t ipvlan_nl_getsize(const struct net_device *dev)
{
return (0
+ nla_total_size(2) /* IFLA_IPVLAN_MODE */
+ nla_total_size(2) /* IFLA_IPVLAN_FLAGS */
);
}
static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (!data)
return 0;
if (data[IFLA_IPVLAN_MODE]) {
u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
if (mode >= IPVLAN_MODE_MAX)
return -EINVAL;
}
if (data[IFLA_IPVLAN_FLAGS]) {
u16 flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
/* Only two bits are used at this moment. */
if (flags & ~(IPVLAN_F_PRIVATE | IPVLAN_F_VEPA))
return -EINVAL;
/* Also both flags can't be active at the same time. */
if ((flags & (IPVLAN_F_PRIVATE | IPVLAN_F_VEPA)) ==
(IPVLAN_F_PRIVATE | IPVLAN_F_VEPA))
return -EINVAL;
}
return 0;
}
static int ipvlan_nl_fillinfo(struct sk_buff *skb,
const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
int ret = -EINVAL;
if (!port)
goto err;
ret = -EMSGSIZE;
if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
goto err;
if (nla_put_u16(skb, IFLA_IPVLAN_FLAGS, port->flags))
goto err;
return 0;
err:
return ret;
}
int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port;
struct net_device *phy_dev;
int err;
u16 mode = IPVLAN_MODE_L3;
if (!tb[IFLA_LINK])
return -EINVAL;
phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!phy_dev)
return -ENODEV;
if (netif_is_ipvlan(phy_dev)) {
struct ipvl_dev *tmp = netdev_priv(phy_dev);
phy_dev = tmp->phy_dev;
if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
} else if (!netif_is_ipvlan_port(phy_dev)) {
/* Exit early if the underlying link is invalid or busy */
if (phy_dev->type != ARPHRD_ETHER ||
phy_dev->flags & IFF_LOOPBACK) {
netdev_err(phy_dev,
"Master is either lo or non-ether device\n");
return -EINVAL;
}
if (netdev_is_rx_handler_busy(phy_dev)) {
netdev_err(phy_dev, "Device is already in use.\n");
return -EBUSY;
}
}
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
ipvlan->sfeatures = IPVLAN_FEATURES;
if (!tb[IFLA_MTU])
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
* packets.
*/
eth_hw_addr_set(dev, phy_dev->dev_addr);
dev->priv_flags |= IFF_NO_RX_HANDLER;
err = register_netdevice(dev);
if (err < 0)
return err;
/* ipvlan_init() would have created the port, if required */
port = ipvlan_port_get_rtnl(phy_dev);
ipvlan->port = port;
/* If the port-id base is at the MAX value, then wrap it around and
* begin from 0x1 again. This may be due to a busy system where lots
* of slaves are getting created and deleted.
*/
if (port->dev_id_start == 0xFFFE)
port->dev_id_start = 0x1;
/* Since L2 address is shared among all IPvlan slaves including
* master, use unique 16 bit dev-ids to diffentiate among them.
* Assign IDs between 0x1 and 0xFFFE (used by the master) to each
* slave link [see addrconf_ifid_eui48()].
*/
err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE,
GFP_KERNEL);
if (err < 0)
err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
GFP_KERNEL);
if (err < 0)
goto unregister_netdev;
dev->dev_id = err;
/* Increment id-base to the next slot for the future assignment */
port->dev_id_start = err + 1;
err = netdev_upper_dev_link(phy_dev, dev, extack);
if (err)
goto remove_ida;
/* Flags are per port and latest update overrides. User has
* to be consistent in setting it just like the mode attribute.
*/
if (data && data[IFLA_IPVLAN_FLAGS])
port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
if (data && data[IFLA_IPVLAN_MODE])
mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
err = ipvlan_set_port_mode(port, mode, extack);
if (err)
goto unlink_netdev;
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
remove_ida:
ida_simple_remove(&port->ida, dev->dev_id);
unregister_netdev:
unregister_netdevice(dev);
return err;
}
EXPORT_SYMBOL_GPL(ipvlan_link_new);
void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
spin_lock_bh(&ipvlan->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
spin_unlock_bh(&ipvlan->addrs_lock);
ida_simple_remove(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
}
EXPORT_SYMBOL_GPL(ipvlan_link_delete);
void ipvlan_link_setup(struct net_device *dev)
{
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
dev->netdev_ops = &ipvlan_netdev_ops;
dev->needs_free_netdev = true;
dev->header_ops = &ipvlan_header_ops;
dev->ethtool_ops = &ipvlan_ethtool_ops;
}
EXPORT_SYMBOL_GPL(ipvlan_link_setup);
static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
{
[IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
[IFLA_IPVLAN_FLAGS] = { .type = NLA_U16 },
};
static struct net *ipvlan_get_link_net(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
return dev_net(ipvlan->phy_dev);
}
static struct rtnl_link_ops ipvlan_link_ops = {
.kind = "ipvlan",
.priv_size = sizeof(struct ipvl_dev),
.setup = ipvlan_link_setup,
.newlink = ipvlan_link_new,
.dellink = ipvlan_link_delete,
.get_link_net = ipvlan_get_link_net,
};
int ipvlan_link_register(struct rtnl_link_ops *ops)
{
ops->get_size = ipvlan_nl_getsize;
ops->policy = ipvlan_nl_policy;
ops->validate = ipvlan_nl_validate;
ops->fill_info = ipvlan_nl_fillinfo;
ops->changelink = ipvlan_nl_changelink;
ops->maxtype = IFLA_IPVLAN_MAX;
return rtnl_link_register(ops);
}
EXPORT_SYMBOL_GPL(ipvlan_link_register);
static int ipvlan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct netdev_notifier_pre_changeaddr_info *prechaddr_info;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ipvl_dev *ipvlan, *next;
struct ipvl_port *port;
LIST_HEAD(lst_kill);
int err;
if (!netif_is_ipvlan_port(dev))
return NOTIFY_DONE;
port = ipvlan_port_get_rtnl(dev);
switch (event) {
case NETDEV_UP:
case NETDEV_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
netif_stacked_transfer_operstate(ipvlan->phy_dev,
ipvlan->dev);
break;
case NETDEV_REGISTER: {
struct net *oldnet, *newnet = dev_net(dev);
oldnet = read_pnet(&port->pnet);
if (net_eq(newnet, oldnet))
break;
write_pnet(&port->pnet, newnet);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_migrate_l3s_hook(oldnet, newnet);
break;
}
case NETDEV_UNREGISTER:
if (dev->reg_state != NETREG_UNREGISTERING)
break;
list_for_each_entry_safe(ipvlan, next, &port->ipvlans, pnode)
ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
&lst_kill);
unregister_netdevice_many(&lst_kill);
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
netif_inherit_tso_max(ipvlan->dev, dev);
netdev_update_features(ipvlan->dev);
}
break;
case NETDEV_CHANGEMTU:
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
ipvlan_adjust_mtu(ipvlan, dev);
break;
case NETDEV_PRE_CHANGEADDR:
prechaddr_info = ptr;
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
err = dev_pre_changeaddr_notify(ipvlan->dev,
prechaddr_info->dev_addr,
extack);
if (err)
return notifier_from_errno(err);
}
break;
case NETDEV_CHANGEADDR:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
eth_hw_addr_set(ipvlan->dev, dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
}
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
/* the caller must held the addrs lock */
static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
addr->master = ipvlan;
if (!is_v6) {
memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
#if IS_ENABLED(CONFIG_IPV6)
} else {
memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
#endif
}
list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
if (netif_running(ipvlan->dev))
ipvlan_ht_addr_add(ipvlan, addr);
return 0;
}
static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
spin_lock_bh(&ipvlan->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr) {
spin_unlock_bh(&ipvlan->addrs_lock);
return;
}
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
spin_unlock_bh(&ipvlan->addrs_lock);
kfree_rcu(addr, rcu);
}
static bool ipvlan_is_valid_dev(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
if (!netif_is_ipvlan(dev))
return false;
if (!ipvlan || !ipvlan->port)
return false;
return true;
}
#if IS_ENABLED(CONFIG_IPV6)
static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
int ret = -EINVAL;
spin_lock_bh(&ipvlan->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
spin_unlock_bh(&ipvlan->addrs_lock);
return ret;
}
static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
return ipvlan_del_addr(ipvlan, ip6_addr, true);
}
static int ipvlan_addr6_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr;
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
if (ipvlan_add_addr6(ipvlan, &if6->addr))
return NOTIFY_BAD;
break;
case NETDEV_DOWN:
ipvlan_del_addr6(ipvlan, &if6->addr);
break;
}
return NOTIFY_OK;
}
static int ipvlan_addr6_validator_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) {
NL_SET_ERR_MSG(i6vi->extack,
"Address already assigned to an ipvlan device");
return notifier_from_errno(-EADDRINUSE);
}
break;
}
return NOTIFY_OK;
}
#endif
static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
int ret = -EINVAL;
spin_lock_bh(&ipvlan->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
spin_unlock_bh(&ipvlan->addrs_lock);
return ret;
}
static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
return ipvlan_del_addr(ipvlan, ip4_addr, false);
}
static int ipvlan_addr4_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct in_ifaddr *if4 = (struct in_ifaddr *)ptr;
struct net_device *dev = (struct net_device *)if4->ifa_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct in_addr ip4_addr;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
ip4_addr.s_addr = if4->ifa_address;
if (ipvlan_add_addr4(ipvlan, &ip4_addr))
return NOTIFY_BAD;
break;
case NETDEV_DOWN:
ip4_addr.s_addr = if4->ifa_address;
ipvlan_del_addr4(ipvlan, &ip4_addr);
break;
}
return NOTIFY_OK;
}
static int ipvlan_addr4_validator_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct in_validator_info *ivi = (struct in_validator_info *)ptr;
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) {
NL_SET_ERR_MSG(ivi->extack,
"Address already assigned to an ipvlan device");
return notifier_from_errno(-EADDRINUSE);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr4_event,
};
static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr4_validator_event,
};
static struct notifier_block ipvlan_notifier_block __read_mostly = {
.notifier_call = ipvlan_device_event,
};
#if IS_ENABLED(CONFIG_IPV6)
static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr6_event,
};
static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr6_validator_event,
};
#endif
static int __init ipvlan_init_module(void)
{
int err;
ipvlan_init_secret();
register_netdevice_notifier(&ipvlan_notifier_block);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
register_inet6addr_validator_notifier(
&ipvlan_addr6_vtor_notifier_block);
#endif
register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
err = ipvlan_l3s_init();
if (err < 0)
goto error;
err = ipvlan_link_register(&ipvlan_link_ops);
if (err < 0) {
ipvlan_l3s_cleanup();
goto error;
}
return 0;
error:
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
unregister_inetaddr_validator_notifier(
&ipvlan_addr4_vtor_notifier_block);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
unregister_inet6addr_validator_notifier(
&ipvlan_addr6_vtor_notifier_block);
#endif
unregister_netdevice_notifier(&ipvlan_notifier_block);
return err;
}
static void __exit ipvlan_cleanup_module(void)
{
rtnl_link_unregister(&ipvlan_link_ops);
ipvlan_l3s_cleanup();
unregister_netdevice_notifier(&ipvlan_notifier_block);
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
unregister_inetaddr_validator_notifier(
&ipvlan_addr4_vtor_notifier_block);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
unregister_inet6addr_validator_notifier(
&ipvlan_addr6_vtor_notifier_block);
#endif
}
module_init(ipvlan_init_module);
module_exit(ipvlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mahesh Bandewar <[email protected]>");
MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
MODULE_ALIAS_RTNL_LINK("ipvlan");
| linux-master | drivers/net/ipvlan/ipvlan_main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2014 Mahesh Bandewar <[email protected]>
*/
#include "ipvlan.h"
static u32 ipvlan_jhash_secret __read_mostly;
void ipvlan_init_secret(void)
{
net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
}
void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast)
{
if (likely(success)) {
struct ipvl_pcpu_stats *pcptr;
pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
u64_stats_update_begin(&pcptr->syncp);
u64_stats_inc(&pcptr->rx_pkts);
u64_stats_add(&pcptr->rx_bytes, len);
if (mcast)
u64_stats_inc(&pcptr->rx_mcast);
u64_stats_update_end(&pcptr->syncp);
} else {
this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
}
}
EXPORT_SYMBOL_GPL(ipvlan_count_rx);
#if IS_ENABLED(CONFIG_IPV6)
static u8 ipvlan_get_v6_hash(const void *iaddr)
{
const struct in6_addr *ip6_addr = iaddr;
return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
IPVLAN_HASH_MASK;
}
#else
static u8 ipvlan_get_v6_hash(const void *iaddr)
{
return 0;
}
#endif
static u8 ipvlan_get_v4_hash(const void *iaddr)
{
const struct in_addr *ip4_addr = iaddr;
return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
IPVLAN_HASH_MASK;
}
static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
{
if (!is_v6 && addr->atype == IPVL_IPV4) {
struct in_addr *i4addr = (struct in_addr *)iaddr;
return addr->ip4addr.s_addr == i4addr->s_addr;
#if IS_ENABLED(CONFIG_IPV6)
} else if (is_v6 && addr->atype == IPVL_IPV6) {
struct in6_addr *i6addr = (struct in6_addr *)iaddr;
return ipv6_addr_equal(&addr->ip6addr, i6addr);
#endif
}
return false;
}
static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
u8 hash;
hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
ipvlan_get_v4_hash(iaddr);
hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
if (addr_equal(is_v6, addr, iaddr))
return addr;
return NULL;
}
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
{
struct ipvl_port *port = ipvlan->port;
u8 hash;
hash = (addr->atype == IPVL_IPV6) ?
ipvlan_get_v6_hash(&addr->ip6addr) :
ipvlan_get_v4_hash(&addr->ip4addr);
if (hlist_unhashed(&addr->hlnode))
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
void ipvlan_ht_addr_del(struct ipvl_addr *addr)
{
hlist_del_init_rcu(&addr->hlnode);
}
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6)
{
struct ipvl_addr *addr, *ret = NULL;
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
if (addr_equal(is_v6, addr, iaddr)) {
ret = addr;
break;
}
}
rcu_read_unlock();
return ret;
}
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
{
struct ipvl_dev *ipvlan;
bool ret = false;
rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
ret = true;
break;
}
}
rcu_read_unlock();
return ret;
}
void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
switch (skb->protocol) {
case htons(ETH_P_ARP): {
struct arphdr *arph;
if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
*type = IPVL_ARP;
lyr3h = arph;
break;
}
case htons(ETH_P_IP): {
u32 pktlen;
struct iphdr *ip4h;
if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
return NULL;
ip4h = ip_hdr(skb);
pktlen = skb_ip_totlen(skb);
if (ip4h->ihl < 5 || ip4h->version != 4)
return NULL;
if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
return NULL;
*type = IPVL_IPV4;
lyr3h = ip4h;
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6): {
struct ipv6hdr *ip6h;
if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
return NULL;
ip6h = ipv6_hdr(skb);
if (ip6h->version != 6)
return NULL;
*type = IPVL_IPV6;
lyr3h = ip6h;
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
struct icmp6hdr *icmph;
if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
return NULL;
ip6h = ipv6_hdr(skb);
icmph = (struct icmp6hdr *)(ip6h + 1);
if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
/* Need to access the ipv6 address in body */
if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ sizeof(struct in6_addr))))
return NULL;
ip6h = ipv6_hdr(skb);
icmph = (struct icmp6hdr *)(ip6h + 1);
}
*type = IPVL_ICMPV6;
lyr3h = icmph;
}
break;
}
#endif
default:
return NULL;
}
return lyr3h;
}
unsigned int ipvlan_mac_hash(const unsigned char *addr)
{
u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
ipvlan_jhash_secret);
return hash & IPVLAN_MAC_FILTER_MASK;
}
void ipvlan_process_multicast(struct work_struct *work)
{
struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
struct ethhdr *ethh;
struct ipvl_dev *ipvlan;
struct sk_buff *skb, *nskb;
struct sk_buff_head list;
unsigned int len;
unsigned int mac_hash;
int ret;
u8 pkt_type;
bool tx_pkt;
__skb_queue_head_init(&list);
spin_lock_bh(&port->backlog.lock);
skb_queue_splice_tail_init(&port->backlog, &list);
spin_unlock_bh(&port->backlog.lock);
while ((skb = __skb_dequeue(&list)) != NULL) {
struct net_device *dev = skb->dev;
bool consumed = false;
ethh = eth_hdr(skb);
tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
mac_hash = ipvlan_mac_hash(ethh->h_dest);
if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
pkt_type = PACKET_BROADCAST;
else
pkt_type = PACKET_MULTICAST;
rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (tx_pkt && (ipvlan->dev == skb->dev))
continue;
if (!test_bit(mac_hash, ipvlan->mac_filters))
continue;
if (!(ipvlan->dev->flags & IFF_UP))
continue;
ret = NET_RX_DROP;
len = skb->len + ETH_HLEN;
nskb = skb_clone(skb, GFP_ATOMIC);
local_bh_disable();
if (nskb) {
consumed = true;
nskb->pkt_type = pkt_type;
nskb->dev = ipvlan->dev;
if (tx_pkt)
ret = dev_forward_skb(ipvlan->dev, nskb);
else
ret = netif_rx(nskb);
}
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
local_bh_enable();
}
rcu_read_unlock();
if (tx_pkt) {
/* If the packet originated here, send it out. */
skb->dev = port->dev;
skb->pkt_type = pkt_type;
dev_queue_xmit(skb);
} else {
if (consumed)
consume_skb(skb);
else
kfree_skb(skb);
}
dev_put(dev);
cond_resched();
}
}
static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
{
bool xnet = true;
if (dev)
xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
skb_scrub_packet(skb, xnet);
if (dev)
skb->dev = dev;
}
static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
bool local)
{
struct ipvl_dev *ipvlan = addr->master;
struct net_device *dev = ipvlan->dev;
unsigned int len;
rx_handler_result_t ret = RX_HANDLER_CONSUMED;
bool success = false;
struct sk_buff *skb = *pskb;
len = skb->len + ETH_HLEN;
/* Only packets exchanged between two local slaves need to have
* device-up check as well as skb-share check.
*/
if (local) {
if (unlikely(!(dev->flags & IFF_UP))) {
kfree_skb(skb);
goto out;
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
*pskb = skb;
}
if (local) {
skb->pkt_type = PACKET_HOST;
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
skb->dev = dev;
ret = RX_HANDLER_ANOTHER;
success = true;
}
out:
ipvlan_count_rx(ipvlan, len, success, false);
return ret;
}
struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
int addr_type, bool use_dest)
{
struct ipvl_addr *addr = NULL;
switch (addr_type) {
#if IS_ENABLED(CONFIG_IPV6)
case IPVL_IPV6: {
struct ipv6hdr *ip6h;
struct in6_addr *i6addr;
ip6h = (struct ipv6hdr *)lyr3h;
i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
addr = ipvlan_ht_addr_lookup(port, i6addr, true);
break;
}
case IPVL_ICMPV6: {
struct nd_msg *ndmh;
struct in6_addr *i6addr;
/* Make sure that the NeighborSolicitation ICMPv6 packets
* are handled to avoid DAD issue.
*/
ndmh = (struct nd_msg *)lyr3h;
if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
i6addr = &ndmh->target;
addr = ipvlan_ht_addr_lookup(port, i6addr, true);
}
break;
}
#endif
case IPVL_IPV4: {
struct iphdr *ip4h;
__be32 *i4addr;
ip4h = (struct iphdr *)lyr3h;
i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
addr = ipvlan_ht_addr_lookup(port, i4addr, false);
break;
}
case IPVL_ARP: {
struct arphdr *arph;
unsigned char *arp_ptr;
__be32 dip;
arph = (struct arphdr *)lyr3h;
arp_ptr = (unsigned char *)(arph + 1);
if (use_dest)
arp_ptr += (2 * port->dev->addr_len) + 4;
else
arp_ptr += port->dev->addr_len;
memcpy(&dip, arp_ptr, 4);
addr = ipvlan_ht_addr_lookup(port, &dip, false);
break;
}
}
return addr;
}
static int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
struct rtable *rt;
int err, ret = NET_XMIT_DROP;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
.daddr = ip4h->daddr,
.saddr = ip4h->saddr,
};
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
ip_rt_put(rt);
goto err;
}
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
else
ret = NET_XMIT_SUCCESS;
goto out;
err:
dev->stats.tx_errors++;
kfree_skb(skb);
out:
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
struct dst_entry *dst;
int err, ret = NET_XMIT_DROP;
struct flowi6 fl6 = {
.flowi6_oif = dev->ifindex,
.daddr = ip6h->daddr,
.saddr = ip6h->saddr,
.flowi6_flags = FLOWI_FLAG_ANYSRC,
.flowlabel = ip6_flowinfo(ip6h),
.flowi6_mark = skb->mark,
.flowi6_proto = ip6h->nexthdr,
};
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
ret = dst->error;
dst_release(dst);
goto err;
}
skb_dst_set(skb, dst);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
else
ret = NET_XMIT_SUCCESS;
goto out;
err:
dev->stats.tx_errors++;
kfree_skb(skb);
out:
return ret;
}
#else
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
{
return NET_XMIT_DROP;
}
#endif
static int ipvlan_process_outbound(struct sk_buff *skb)
{
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
* will have L2; which need to discarded and processed further
* in the net-ns of the main-device.
*/
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
struct ethhdr *ethh = eth_hdr(skb);
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
ntohs(skb->protocol));
kfree_skb(skb);
goto out;
}
skb_pull(skb, sizeof(*ethh));
skb->mac_header = (typeof(skb->mac_header))~0U;
skb_reset_network_header(skb);
}
if (skb->protocol == htons(ETH_P_IPV6))
ret = ipvlan_process_v6_outbound(skb);
else if (skb->protocol == htons(ETH_P_IP))
ret = ipvlan_process_v4_outbound(skb);
else {
pr_warn_ratelimited("Dropped outbound packet type=%x\n",
ntohs(skb->protocol));
kfree_skb(skb);
}
out:
return ret;
}
static void ipvlan_multicast_enqueue(struct ipvl_port *port,
struct sk_buff *skb, bool tx_pkt)
{
if (skb->protocol == htons(ETH_P_PAUSE)) {
kfree_skb(skb);
return;
}
/* Record that the deferred packet is from TX or RX path. By
* looking at mac-addresses on packet will lead to erronus decisions.
* (This would be true for a loopback-mode on master device or a
* hair-pin mode of the switch.)
*/
IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
spin_lock(&port->backlog.lock);
if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
dev_hold(skb->dev);
__skb_queue_tail(&port->backlog, skb);
spin_unlock(&port->backlog.lock);
schedule_work(&port->wq);
} else {
spin_unlock(&port->backlog.lock);
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb(skb);
}
}
static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
void *lyr3h;
struct ipvl_addr *addr;
int addr_type;
lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
if (!ipvlan_is_vepa(ipvlan->port)) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
if (ipvlan_is_private(ipvlan->port)) {
consume_skb(skb);
return NET_XMIT_DROP;
}
ipvlan_rcv_frame(addr, &skb, true);
return NET_XMIT_SUCCESS;
}
}
out:
ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
return ipvlan_process_outbound(skb);
}
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
if (!ipvlan_is_vepa(ipvlan->port) &&
ether_addr_equal(eth->h_dest, eth->h_source)) {
lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
if (ipvlan_is_private(ipvlan->port)) {
consume_skb(skb);
return NET_XMIT_DROP;
}
ipvlan_rcv_frame(addr, &skb, true);
return NET_XMIT_SUCCESS;
}
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return NET_XMIT_DROP;
/* Packet definitely does not belong to any of the
* virtual devices, but the dest is local. So forward
* the skb for the main-dev. At the RX side we just return
* RX_PASS for it to be processed further on the stack.
*/
dev_forward_skb(ipvlan->phy_dev, skb);
return NET_XMIT_SUCCESS;
} else if (is_multicast_ether_addr(eth->h_dest)) {
skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
}
skb->dev = ipvlan->phy_dev;
return dev_queue_xmit(skb);
}
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
if (!port)
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
goto out;
switch(port->mode) {
case IPVLAN_MODE_L2:
return ipvlan_xmit_mode_l2(skb, dev);
case IPVLAN_MODE_L3:
#ifdef CONFIG_IPVLAN_L3S
case IPVLAN_MODE_L3S:
#endif
return ipvlan_xmit_mode_l3(skb, dev);
}
/* Should not reach here */
WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
out:
kfree_skb(skb);
return NET_XMIT_DROP;
}
static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
{
struct ethhdr *eth = eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
if (addr)
return false;
}
return true;
}
static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
struct ipvl_port *port)
{
void *lyr3h;
int addr_type;
struct ipvl_addr *addr;
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
if (addr)
ret = ipvlan_rcv_frame(addr, pskb, false);
out:
return ret;
}
static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
struct ipvl_port *port)
{
struct sk_buff *skb = *pskb;
struct ethhdr *eth = eth_hdr(skb);
rx_handler_result_t ret = RX_HANDLER_PASS;
if (is_multicast_ether_addr(eth->h_dest)) {
if (ipvlan_external_frame(skb, port)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
/* External frames are queued for device local
* distribution, but a copy is given to master
* straight away to avoid sending duplicates later
* when work-queue processes this frame. This is
* achieved by returning RX_HANDLER_PASS.
*/
if (nskb) {
ipvlan_skb_crossing_ns(nskb, NULL);
ipvlan_multicast_enqueue(port, nskb, false);
}
}
} else {
/* Perform like l3 mode for non-multicast packet */
ret = ipvlan_handle_mode_l3(pskb, port);
}
return ret;
}
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
if (!port)
return RX_HANDLER_PASS;
switch (port->mode) {
case IPVLAN_MODE_L2:
return ipvlan_handle_mode_l2(pskb, port);
case IPVLAN_MODE_L3:
return ipvlan_handle_mode_l3(pskb, port);
#ifdef CONFIG_IPVLAN_L3S
case IPVLAN_MODE_L3S:
return RX_HANDLER_PASS;
#endif
}
/* Should not reach here */
WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
| linux-master | drivers/net/ipvlan/ipvlan_core.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/etherdevice.h>
#include "ipvlan.h"
#include <linux/if_vlan.h>
#include <linux/if_tap.h>
#include <linux/interrupt.h>
#include <linux/nsproxy.h>
#include <linux/compat.h>
#include <linux/if_tun.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/fs.h>
#include <linux/uio.h>
#include <net/net_namespace.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <linux/virtio_net.h>
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6)
static dev_t ipvtap_major;
static struct cdev ipvtap_cdev;
static const void *ipvtap_net_namespace(const struct device *d)
{
const struct net_device *dev = to_net_dev(d->parent);
return dev_net(dev);
}
static struct class ipvtap_class = {
.name = "ipvtap",
.ns_type = &net_ns_type_operations,
.namespace = ipvtap_net_namespace,
};
struct ipvtap_dev {
struct ipvl_dev vlan;
struct tap_dev tap;
};
static void ipvtap_count_tx_dropped(struct tap_dev *tap)
{
struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
struct ipvl_dev *vlan = &vlantap->vlan;
this_cpu_inc(vlan->pcpu_stats->tx_drps);
}
static void ipvtap_count_rx_dropped(struct tap_dev *tap)
{
struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
struct ipvl_dev *vlan = &vlantap->vlan;
ipvlan_count_rx(vlan, 0, 0, 0);
}
static void ipvtap_update_features(struct tap_dev *tap,
netdev_features_t features)
{
struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
struct ipvl_dev *vlan = &vlantap->vlan;
vlan->sfeatures = features;
netdev_update_features(vlan->dev);
}
static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ipvtap_dev *vlantap = netdev_priv(dev);
int err;
INIT_LIST_HEAD(&vlantap->tap.queue_list);
/* Since macvlan supports all offloads by default, make
* tap support all offloads also.
*/
vlantap->tap.tap_features = TUN_OFFLOADS;
vlantap->tap.count_tx_dropped = ipvtap_count_tx_dropped;
vlantap->tap.update_features = ipvtap_update_features;
vlantap->tap.count_rx_dropped = ipvtap_count_rx_dropped;
err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap);
if (err)
return err;
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
err = ipvlan_link_new(src_net, dev, tb, data, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
}
vlantap->tap.dev = vlantap->vlan.dev;
return err;
}
static void ipvtap_dellink(struct net_device *dev,
struct list_head *head)
{
struct ipvtap_dev *vlan = netdev_priv(dev);
netdev_rx_handler_unregister(dev);
tap_del_queues(&vlan->tap);
ipvlan_link_delete(dev, head);
}
static void ipvtap_setup(struct net_device *dev)
{
ipvlan_link_setup(dev);
dev->tx_queue_len = TUN_READQ_SIZE;
dev->priv_flags &= ~IFF_NO_QUEUE;
}
static struct rtnl_link_ops ipvtap_link_ops __read_mostly = {
.kind = "ipvtap",
.setup = ipvtap_setup,
.newlink = ipvtap_newlink,
.dellink = ipvtap_dellink,
.priv_size = sizeof(struct ipvtap_dev),
};
static int ipvtap_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ipvtap_dev *vlantap;
struct device *classdev;
dev_t devt;
int err;
char tap_name[IFNAMSIZ];
if (dev->rtnl_link_ops != &ipvtap_link_ops)
return NOTIFY_DONE;
snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
vlantap = netdev_priv(dev);
switch (event) {
case NETDEV_REGISTER:
/* Create the device node here after the network device has
* been registered but before register_netdevice has
* finished running.
*/
err = tap_get_minor(ipvtap_major, &vlantap->tap);
if (err)
return notifier_from_errno(err);
devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
classdev = device_create(&ipvtap_class, &dev->dev, devt,
dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(ipvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
}
err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
tap_name);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_UNREGISTER:
/* vlan->minor == 0 if NETDEV_REGISTER above failed */
if (vlantap->tap.minor == 0)
break;
sysfs_remove_link(&dev->dev.kobj, tap_name);
devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
device_destroy(&ipvtap_class, devt);
tap_free_minor(ipvtap_major, &vlantap->tap);
break;
case NETDEV_CHANGE_TX_QUEUE_LEN:
if (tap_queue_resize(&vlantap->tap))
return NOTIFY_BAD;
break;
}
return NOTIFY_DONE;
}
static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
static int __init ipvtap_init(void)
{
int err;
err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
THIS_MODULE);
if (err)
goto out1;
err = class_register(&ipvtap_class);
if (err)
goto out2;
err = register_netdevice_notifier(&ipvtap_notifier_block);
if (err)
goto out3;
err = ipvlan_link_register(&ipvtap_link_ops);
if (err)
goto out4;
return 0;
out4:
unregister_netdevice_notifier(&ipvtap_notifier_block);
out3:
class_unregister(&ipvtap_class);
out2:
tap_destroy_cdev(ipvtap_major, &ipvtap_cdev);
out1:
return err;
}
module_init(ipvtap_init);
static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
class_unregister(&ipvtap_class);
tap_destroy_cdev(ipvtap_major, &ipvtap_cdev);
}
module_exit(ipvtap_exit);
MODULE_ALIAS_RTNL_LINK("ipvtap");
MODULE_AUTHOR("Sainath Grandhi <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/ipvlan/ipvtap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoints for Thunderbolt/USB4 networking driver
*
* Copyright (C) 2023, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/net/thunderbolt/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Networking over Thunderbolt/USB4 cables using USB4NET protocol
* (formerly Apple ThunderboltIP).
*
* Copyright (C) 2017, Intel Corporation
* Authors: Amir Levy <[email protected]>
* Michael Jamet <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/highmem.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/sizes.h>
#include <linux/thunderbolt.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
#include <net/ip6_checksum.h>
#include "trace.h"
/* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500
#define TBNET_LOGOUT_TIMEOUT 1000
#define TBNET_RING_SIZE 256
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 10
#define TBNET_E2E BIT(0)
#define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K
#define TBNET_FRAME_SIZE SZ_4K
#define TBNET_MAX_PAYLOAD_SIZE \
(TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
/* Rx packets need to hold space for skb_shared_info */
#define TBNET_RX_MAX_SIZE \
(TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
#define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
#define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
/**
* struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
* @frame_size: size of the data with the frame
* @frame_index: running index on the frames
* @frame_id: ID of the frame to match frames to specific packet
* @frame_count: how many frames assembles a full packet
*
* Each data frame passed to the high-speed DMA ring has this header. If
* the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
* supported then @frame_id is filled, otherwise it stays %0.
*/
struct thunderbolt_ip_frame_header {
__le32 frame_size;
__le16 frame_index;
__le16 frame_id;
__le32 frame_count;
};
enum thunderbolt_ip_frame_pdf {
TBIP_PDF_FRAME_START = 1,
TBIP_PDF_FRAME_END,
};
enum thunderbolt_ip_type {
TBIP_LOGIN,
TBIP_LOGIN_RESPONSE,
TBIP_LOGOUT,
TBIP_STATUS,
};
struct thunderbolt_ip_header {
u32 route_hi;
u32 route_lo;
u32 length_sn;
uuid_t uuid;
uuid_t initiator_uuid;
uuid_t target_uuid;
u32 type;
u32 command_id;
};
#define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
#define TBIP_HDR_SN_MASK GENMASK(28, 27)
#define TBIP_HDR_SN_SHIFT 27
struct thunderbolt_ip_login {
struct thunderbolt_ip_header hdr;
u32 proto_version;
u32 transmit_path;
u32 reserved[4];
};
#define TBIP_LOGIN_PROTO_VERSION 1
struct thunderbolt_ip_login_response {
struct thunderbolt_ip_header hdr;
u32 status;
u32 receiver_mac[2];
u32 receiver_mac_len;
u32 reserved[4];
};
struct thunderbolt_ip_logout {
struct thunderbolt_ip_header hdr;
};
struct thunderbolt_ip_status {
struct thunderbolt_ip_header hdr;
u32 status;
};
struct tbnet_stats {
u64 tx_packets;
u64 rx_packets;
u64 tx_bytes;
u64 rx_bytes;
u64 rx_errors;
u64 tx_errors;
u64 rx_length_errors;
u64 rx_over_errors;
u64 rx_crc_errors;
u64 rx_missed_errors;
};
struct tbnet_frame {
struct net_device *dev;
struct page *page;
struct ring_frame frame;
};
struct tbnet_ring {
struct tbnet_frame frames[TBNET_RING_SIZE];
unsigned int cons;
unsigned int prod;
struct tb_ring *ring;
};
/**
* struct tbnet - ThunderboltIP network driver private data
* @svc: XDomain service the driver is bound to
* @xd: XDomain the service belongs to
* @handler: ThunderboltIP configuration protocol handler
* @dev: Networking device
* @napi: NAPI structure for Rx polling
* @stats: Network statistics
* @skb: Network packet that is currently processed on Rx path
* @command_id: ID used for next configuration protocol packet
* @login_sent: ThunderboltIP login message successfully sent
* @login_received: ThunderboltIP login message received from the remote
* host
* @local_transmit_path: HopID we are using to send out packets
* @remote_transmit_path: HopID the other end is using to send packets to us
* @connection_lock: Lock serializing access to @login_sent,
* @login_received and @transmit_path.
* @login_retries: Number of login retries currently done
* @login_work: Worker to send ThunderboltIP login packets
* @connected_work: Worker that finalizes the ThunderboltIP connection
* setup and enables DMA paths for high speed data
* transfers
* @disconnect_work: Worker that handles tearing down the ThunderboltIP
* connection
* @rx_hdr: Copy of the currently processed Rx frame. Used when a
* network packet consists of multiple Thunderbolt frames.
* In host byte order.
* @rx_ring: Software ring holding Rx frames
* @frame_id: Frame ID use for next Tx packet
* (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
* @tx_ring: Software ring holding Tx frames
*/
struct tbnet {
const struct tb_service *svc;
struct tb_xdomain *xd;
struct tb_protocol_handler handler;
struct net_device *dev;
struct napi_struct napi;
struct tbnet_stats stats;
struct sk_buff *skb;
atomic_t command_id;
bool login_sent;
bool login_received;
int local_transmit_path;
int remote_transmit_path;
struct mutex connection_lock;
int login_retries;
struct delayed_work login_work;
struct work_struct connected_work;
struct work_struct disconnect_work;
struct thunderbolt_ip_frame_header rx_hdr;
struct tbnet_ring rx_ring;
atomic_t frame_id;
struct tbnet_ring tx_ring;
};
/* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
static const uuid_t tbnet_dir_uuid =
UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
/* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
static const uuid_t tbnet_svc_uuid =
UUID_INIT(0x798f589e, 0x3616, 0x8a47,
0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
static struct tb_property_dir *tbnet_dir;
static bool tbnet_e2e = true;
module_param_named(e2e, tbnet_e2e, bool, 0444);
MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
enum thunderbolt_ip_type type, size_t size, u32 command_id)
{
u32 length_sn;
/* Length does not include route_hi/lo and length_sn fields */
length_sn = (size - 3 * 4) / 4;
length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
hdr->route_hi = upper_32_bits(route);
hdr->route_lo = lower_32_bits(route);
hdr->length_sn = length_sn;
uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
uuid_copy(&hdr->initiator_uuid, initiator_uuid);
uuid_copy(&hdr->target_uuid, target_uuid);
hdr->type = type;
hdr->command_id = command_id;
}
static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
u32 command_id)
{
struct thunderbolt_ip_login_response reply;
struct tb_xdomain *xd = net->xd;
memset(&reply, 0, sizeof(reply));
tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
command_id);
memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
reply.receiver_mac_len = ETH_ALEN;
return tb_xdomain_response(xd, &reply, sizeof(reply),
TB_CFG_PKG_XDOMAIN_RESP);
}
static int tbnet_login_request(struct tbnet *net, u8 sequence)
{
struct thunderbolt_ip_login_response reply;
struct thunderbolt_ip_login request;
struct tb_xdomain *xd = net->xd;
memset(&request, 0, sizeof(request));
tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
xd->remote_uuid, TBIP_LOGIN, sizeof(request),
atomic_inc_return(&net->command_id));
request.proto_version = TBIP_LOGIN_PROTO_VERSION;
request.transmit_path = net->local_transmit_path;
return tb_xdomain_request(xd, &request, sizeof(request),
TB_CFG_PKG_XDOMAIN_RESP, &reply,
sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
TBNET_LOGIN_TIMEOUT);
}
static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
u32 command_id)
{
struct thunderbolt_ip_status reply;
struct tb_xdomain *xd = net->xd;
memset(&reply, 0, sizeof(reply));
tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
xd->remote_uuid, TBIP_STATUS, sizeof(reply),
atomic_inc_return(&net->command_id));
return tb_xdomain_response(xd, &reply, sizeof(reply),
TB_CFG_PKG_XDOMAIN_RESP);
}
static int tbnet_logout_request(struct tbnet *net)
{
struct thunderbolt_ip_logout request;
struct thunderbolt_ip_status reply;
struct tb_xdomain *xd = net->xd;
memset(&request, 0, sizeof(request));
tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
atomic_inc_return(&net->command_id));
return tb_xdomain_request(xd, &request, sizeof(request),
TB_CFG_PKG_XDOMAIN_RESP, &reply,
sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
TBNET_LOGOUT_TIMEOUT);
}
static void start_login(struct tbnet *net)
{
netdev_dbg(net->dev, "login started\n");
mutex_lock(&net->connection_lock);
net->login_sent = false;
net->login_received = false;
mutex_unlock(&net->connection_lock);
queue_delayed_work(system_long_wq, &net->login_work,
msecs_to_jiffies(1000));
}
static void stop_login(struct tbnet *net)
{
cancel_delayed_work_sync(&net->login_work);
cancel_work_sync(&net->connected_work);
netdev_dbg(net->dev, "login stopped\n");
}
static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
{
return tf->frame.size ? : TBNET_FRAME_SIZE;
}
static void tbnet_free_buffers(struct tbnet_ring *ring)
{
unsigned int i;
for (i = 0; i < TBNET_RING_SIZE; i++) {
struct device *dma_dev = tb_ring_dma_device(ring->ring);
struct tbnet_frame *tf = &ring->frames[i];
enum dma_data_direction dir;
unsigned int order;
size_t size;
if (!tf->page)
continue;
if (ring->ring->is_tx) {
dir = DMA_TO_DEVICE;
order = 0;
size = TBNET_FRAME_SIZE;
} else {
dir = DMA_FROM_DEVICE;
order = TBNET_RX_PAGE_ORDER;
size = TBNET_RX_PAGE_SIZE;
}
trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
if (tf->frame.buffer_phy)
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
dir);
__free_pages(tf->page, order);
tf->page = NULL;
}
ring->cons = 0;
ring->prod = 0;
}
static void tbnet_tear_down(struct tbnet *net, bool send_logout)
{
netif_carrier_off(net->dev);
netif_stop_queue(net->dev);
stop_login(net);
mutex_lock(&net->connection_lock);
if (net->login_sent && net->login_received) {
int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) {
netdev_dbg(net->dev, "sending logout request %u\n",
retries);
ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT)
break;
}
tb_ring_stop(net->rx_ring.ring);
tb_ring_stop(net->tx_ring.ring);
tbnet_free_buffers(&net->rx_ring);
tbnet_free_buffers(&net->tx_ring);
ret = tb_xdomain_disable_paths(net->xd,
net->local_transmit_path,
net->rx_ring.ring->hop,
net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret)
netdev_warn(net->dev, "failed to disable DMA paths\n");
tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
net->remote_transmit_path = 0;
}
net->login_retries = 0;
net->login_sent = false;
net->login_received = false;
netdev_dbg(net->dev, "network traffic stopped\n");
mutex_unlock(&net->connection_lock);
}
static int tbnet_handle_packet(const void *buf, size_t size, void *data)
{
const struct thunderbolt_ip_login *pkg = buf;
struct tbnet *net = data;
u32 command_id;
int ret = 0;
u32 sequence;
u64 route;
/* Make sure the packet is for us */
if (size < sizeof(struct thunderbolt_ip_header))
return 0;
if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
return 0;
if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
return 0;
route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
route &= ~BIT_ULL(63);
if (route != net->xd->route)
return 0;
sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
sequence >>= TBIP_HDR_SN_SHIFT;
command_id = pkg->hdr.command_id;
switch (pkg->hdr.type) {
case TBIP_LOGIN:
netdev_dbg(net->dev, "remote login request received\n");
if (!netif_running(net->dev))
break;
ret = tbnet_login_response(net, route, sequence,
pkg->hdr.command_id);
if (!ret) {
netdev_dbg(net->dev, "remote login response sent\n");
mutex_lock(&net->connection_lock);
net->login_received = true;
net->remote_transmit_path = pkg->transmit_path;
/* If we reached the number of max retries or
* previous logout, schedule another round of
* login retries
*/
if (net->login_retries >= TBNET_LOGIN_RETRIES ||
!net->login_sent) {
net->login_retries = 0;
queue_delayed_work(system_long_wq,
&net->login_work, 0);
}
mutex_unlock(&net->connection_lock);
queue_work(system_long_wq, &net->connected_work);
}
break;
case TBIP_LOGOUT:
netdev_dbg(net->dev, "remote logout request received\n");
ret = tbnet_logout_response(net, route, sequence, command_id);
if (!ret) {
netdev_dbg(net->dev, "remote logout response sent\n");
queue_work(system_long_wq, &net->disconnect_work);
}
break;
default:
return 0;
}
if (ret)
netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
return 1;
}
static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
{
return ring->prod - ring->cons;
}
static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
{
struct tbnet_ring *ring = &net->rx_ring;
int ret;
while (nbuffers--) {
struct device *dma_dev = tb_ring_dma_device(ring->ring);
unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
struct tbnet_frame *tf = &ring->frames[index];
dma_addr_t dma_addr;
if (tf->page)
break;
/* Allocate page (order > 0) so that it can hold maximum
* ThunderboltIP frame (4kB) and the additional room for
* SKB shared info required by build_skb().
*/
tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
if (!tf->page) {
ret = -ENOMEM;
goto err_free;
}
dma_addr = dma_map_page(dma_dev, tf->page, 0,
TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
ret = -ENOMEM;
goto err_free;
}
tf->frame.buffer_phy = dma_addr;
tf->dev = net->dev;
trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
DMA_FROM_DEVICE);
tb_ring_rx(ring->ring, &tf->frame);
ring->prod++;
}
return 0;
err_free:
tbnet_free_buffers(ring);
return ret;
}
static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
struct device *dma_dev = tb_ring_dma_device(ring->ring);
struct tbnet_frame *tf;
unsigned int index;
if (!tbnet_available_buffers(ring))
return NULL;
index = ring->cons++ & (TBNET_RING_SIZE - 1);
tf = &ring->frames[index];
tf->frame.size = 0;
dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
tbnet_frame_size(tf), DMA_TO_DEVICE);
return tf;
}
static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
struct tbnet *net = netdev_priv(tf->dev);
/* Return buffer to the ring */
net->tx_ring.prod++;
if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
netif_wake_queue(net->dev);
}
static int tbnet_alloc_tx_buffers(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
struct device *dma_dev = tb_ring_dma_device(ring->ring);
unsigned int i;
for (i = 0; i < TBNET_RING_SIZE; i++) {
struct tbnet_frame *tf = &ring->frames[i];
dma_addr_t dma_addr;
tf->page = alloc_page(GFP_KERNEL);
if (!tf->page) {
tbnet_free_buffers(ring);
return -ENOMEM;
}
dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
__free_page(tf->page);
tf->page = NULL;
tbnet_free_buffers(ring);
return -ENOMEM;
}
tf->dev = net->dev;
tf->frame.buffer_phy = dma_addr;
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
}
ring->cons = 0;
ring->prod = TBNET_RING_SIZE - 1;
return 0;
}
static void tbnet_connected_work(struct work_struct *work)
{
struct tbnet *net = container_of(work, typeof(*net), connected_work);
bool connected;
int ret;
if (netif_carrier_ok(net->dev))
return;
mutex_lock(&net->connection_lock);
connected = net->login_sent && net->login_received;
mutex_unlock(&net->connection_lock);
if (!connected)
return;
netdev_dbg(net->dev, "login successful, enabling paths\n");
ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
if (ret != net->remote_transmit_path) {
netdev_err(net->dev, "failed to allocate Rx HopID\n");
return;
}
/* Both logins successful so enable the rings, high-speed DMA
* paths and start the network device queue.
*
* Note we enable the DMA paths last to make sure we have primed
* the Rx ring before any incoming packets are allowed to
* arrive.
*/
tb_ring_start(net->tx_ring.ring);
tb_ring_start(net->rx_ring.ring);
ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
if (ret)
goto err_stop_rings;
ret = tbnet_alloc_tx_buffers(net);
if (ret)
goto err_free_rx_buffers;
ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
net->rx_ring.ring->hop,
net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n");
goto err_free_tx_buffers;
}
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
netdev_dbg(net->dev, "network traffic started\n");
return;
err_free_tx_buffers:
tbnet_free_buffers(&net->tx_ring);
err_free_rx_buffers:
tbnet_free_buffers(&net->rx_ring);
err_stop_rings:
tb_ring_stop(net->rx_ring.ring);
tb_ring_stop(net->tx_ring.ring);
tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
}
static void tbnet_login_work(struct work_struct *work)
{
struct tbnet *net = container_of(work, typeof(*net), login_work.work);
unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
int ret;
if (netif_carrier_ok(net->dev))
return;
netdev_dbg(net->dev, "sending login request, retries=%u\n",
net->login_retries);
ret = tbnet_login_request(net, net->login_retries % 4);
if (ret) {
netdev_dbg(net->dev, "sending login request failed, ret=%d\n",
ret);
if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
queue_delayed_work(system_long_wq, &net->login_work,
delay);
} else {
netdev_info(net->dev, "ThunderboltIP login timed out\n");
}
} else {
netdev_dbg(net->dev, "received login reply\n");
net->login_retries = 0;
mutex_lock(&net->connection_lock);
net->login_sent = true;
mutex_unlock(&net->connection_lock);
queue_work(system_long_wq, &net->connected_work);
}
}
static void tbnet_disconnect_work(struct work_struct *work)
{
struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
tbnet_tear_down(net, false);
}
static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
const struct thunderbolt_ip_frame_header *hdr)
{
u32 frame_id, frame_count, frame_size, frame_index;
unsigned int size;
if (tf->frame.flags & RING_DESC_CRC_ERROR) {
net->stats.rx_crc_errors++;
return false;
} else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
net->stats.rx_over_errors++;
return false;
}
/* Should be greater than just header i.e. contains data */
size = tbnet_frame_size(tf);
if (size <= sizeof(*hdr)) {
net->stats.rx_length_errors++;
return false;
}
frame_count = le32_to_cpu(hdr->frame_count);
frame_size = le32_to_cpu(hdr->frame_size);
frame_index = le16_to_cpu(hdr->frame_index);
frame_id = le16_to_cpu(hdr->frame_id);
if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
net->stats.rx_length_errors++;
return false;
}
/* In case we're in the middle of packet, validate the frame
* header based on first fragment of the packet.
*/
if (net->skb && net->rx_hdr.frame_count) {
/* Check the frame count fits the count field */
if (frame_count != le32_to_cpu(net->rx_hdr.frame_count)) {
net->stats.rx_length_errors++;
return false;
}
/* Check the frame identifiers are incremented correctly,
* and id is matching.
*/
if (frame_index != le16_to_cpu(net->rx_hdr.frame_index) + 1 ||
frame_id != le16_to_cpu(net->rx_hdr.frame_id)) {
net->stats.rx_missed_errors++;
return false;
}
if (net->skb->len + frame_size > TBNET_MAX_MTU) {
net->stats.rx_length_errors++;
return false;
}
return true;
}
/* Start of packet, validate the frame header */
if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
net->stats.rx_length_errors++;
return false;
}
if (frame_index != 0) {
net->stats.rx_missed_errors++;
return false;
}
return true;
}
static int tbnet_poll(struct napi_struct *napi, int budget)
{
struct tbnet *net = container_of(napi, struct tbnet, napi);
unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
unsigned int rx_packets = 0;
while (rx_packets < budget) {
const struct thunderbolt_ip_frame_header *hdr;
unsigned int hdr_size = sizeof(*hdr);
struct sk_buff *skb = NULL;
struct ring_frame *frame;
struct tbnet_frame *tf;
struct page *page;
bool last = true;
u32 frame_size;
/* Return some buffers to hardware, one at a time is too
* slow so allocate MAX_SKB_FRAGS buffers at the same
* time.
*/
if (cleaned_count >= MAX_SKB_FRAGS) {
tbnet_alloc_rx_buffers(net, cleaned_count);
cleaned_count = 0;
}
frame = tb_ring_poll(net->rx_ring.ring);
if (!frame)
break;
dma_unmap_page(dma_dev, frame->buffer_phy,
TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
tf = container_of(frame, typeof(*tf), frame);
page = tf->page;
tf->page = NULL;
net->rx_ring.cons++;
cleaned_count++;
hdr = page_address(page);
if (!tbnet_check_frame(net, tf, hdr)) {
trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
hdr->frame_id, hdr->frame_index, hdr->frame_count);
__free_pages(page, TBNET_RX_PAGE_ORDER);
dev_kfree_skb_any(net->skb);
net->skb = NULL;
continue;
}
trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
frame_size = le32_to_cpu(hdr->frame_size);
skb = net->skb;
if (!skb) {
skb = build_skb(page_address(page),
TBNET_RX_PAGE_SIZE);
if (!skb) {
__free_pages(page, TBNET_RX_PAGE_ORDER);
net->stats.rx_errors++;
break;
}
skb_reserve(skb, hdr_size);
skb_put(skb, frame_size);
net->skb = skb;
} else {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, hdr_size, frame_size,
TBNET_RX_PAGE_SIZE - hdr_size);
}
net->rx_hdr.frame_size = hdr->frame_size;
net->rx_hdr.frame_count = hdr->frame_count;
net->rx_hdr.frame_index = hdr->frame_index;
net->rx_hdr.frame_id = hdr->frame_id;
last = le16_to_cpu(net->rx_hdr.frame_index) ==
le32_to_cpu(net->rx_hdr.frame_count) - 1;
rx_packets++;
net->stats.rx_bytes += frame_size;
if (last) {
skb->protocol = eth_type_trans(skb, net->dev);
trace_tbnet_rx_skb(skb);
napi_gro_receive(&net->napi, skb);
net->skb = NULL;
}
}
net->stats.rx_packets += rx_packets;
if (cleaned_count)
tbnet_alloc_rx_buffers(net, cleaned_count);
if (rx_packets >= budget)
return budget;
napi_complete_done(napi, rx_packets);
/* Re-enable the ring interrupt */
tb_ring_poll_complete(net->rx_ring.ring);
return rx_packets;
}
static void tbnet_start_poll(void *data)
{
struct tbnet *net = data;
napi_schedule(&net->napi);
}
static int tbnet_open(struct net_device *dev)
{
struct tbnet *net = netdev_priv(dev);
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
unsigned int flags;
int hopid;
netif_carrier_off(dev);
ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
RING_FLAG_FRAME);
if (!ring) {
netdev_err(dev, "failed to allocate Tx ring\n");
return -ENOMEM;
}
net->tx_ring.ring = ring;
hopid = tb_xdomain_alloc_out_hopid(xd, -1);
if (hopid < 0) {
netdev_err(dev, "failed to allocate Tx HopID\n");
tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL;
return hopid;
}
net->local_transmit_path = hopid;
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
flags = RING_FLAG_FRAME;
/* Only enable full E2E if the other end supports it too */
if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
flags |= RING_FLAG_E2E;
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
net->tx_ring.ring->hop, sof_mask,
eof_mask, tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");
tb_xdomain_release_out_hopid(xd, hopid);
tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL;
return -ENOMEM;
}
net->rx_ring.ring = ring;
napi_enable(&net->napi);
start_login(net);
return 0;
}
static int tbnet_stop(struct net_device *dev)
{
struct tbnet *net = netdev_priv(dev);
napi_disable(&net->napi);
cancel_work_sync(&net->disconnect_work);
tbnet_tear_down(net, true);
tb_ring_free(net->rx_ring.ring);
net->rx_ring.ring = NULL;
tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL;
return 0;
}
static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
struct tbnet_frame **frames, u32 frame_count)
{
struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
unsigned int i, len, offset = skb_transport_offset(skb);
/* Remove payload length from checksum */
u32 paylen = skb->len - skb_transport_offset(skb);
__wsum wsum = (__force __wsum)htonl(paylen);
__be16 protocol = skb->protocol;
void *data = skb->data;
void *dest = hdr + 1;
__sum16 *tucso;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* No need to calculate checksum so we just update the
* total frame count and sync the frames for DMA.
*/
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
dma_sync_single_for_device(dma_dev,
frames[i]->frame.buffer_phy,
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
}
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, vh;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
if (!vhdr)
return false;
protocol = vhdr->h_vlan_encapsulated_proto;
}
/* Data points on the beginning of packet.
* Check is the checksum absolute place in the packet.
* ipcso will update IP checksum.
* tucso will update TCP/UDP checksum.
*/
if (protocol == htons(ETH_P_IP)) {
__sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
*ipcso = 0;
*ipcso = ip_fast_csum(dest + skb_network_offset(skb),
ip_hdr(skb)->ihl);
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
else
return false;
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
ipv6_hdr(skb)->nexthdr, 0);
} else {
return false;
}
/* First frame was headers, rest of the frames contain data.
* Calculate checksum over each frame.
*/
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
dest = (void *)(hdr + 1) + offset;
len = le32_to_cpu(hdr->frame_size) - offset;
wsum = csum_partial(dest, len, wsum);
hdr->frame_count = cpu_to_le32(frame_count);
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
offset = 0;
}
*tucso = csum_fold(wsum);
/* Checksum is finally calculated and we don't touch the memory
* anymore, so DMA sync the frames now.
*/
for (i = 0; i < frame_count; i++) {
dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
}
static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
unsigned int *len)
{
const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
*len = skb_frag_size(frag);
return kmap_local_page(skb_frag_page(frag)) + skb_frag_off(frag);
}
static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct tbnet *net = netdev_priv(dev);
struct tbnet_frame *frames[MAX_SKB_FRAGS];
u16 frame_id = atomic_read(&net->frame_id);
struct thunderbolt_ip_frame_header *hdr;
unsigned int len = skb_headlen(skb);
unsigned int data_len = skb->len;
unsigned int nframes, i;
unsigned int frag = 0;
void *src = skb->data;
u32 frame_index = 0;
bool unmap = false;
void *dest;
trace_tbnet_tx_skb(skb);
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
if (tbnet_available_buffers(&net->tx_ring) < nframes) {
netif_stop_queue(net->dev);
return NETDEV_TX_BUSY;
}
frames[frame_index] = tbnet_get_tx_buffer(net);
if (!frames[frame_index])
goto err_drop;
hdr = page_address(frames[frame_index]->page);
dest = hdr + 1;
/* If overall packet is bigger than the frame data size */
while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
hdr->frame_index = cpu_to_le16(frame_index);
hdr->frame_id = cpu_to_le16(frame_id);
do {
if (len > size_left) {
/* Copy data onto Tx buffer data with
* full frame size then break and go to
* next frame
*/
memcpy(dest, src, size_left);
len -= size_left;
dest += size_left;
src += size_left;
break;
}
memcpy(dest, src, len);
size_left -= len;
dest += len;
if (unmap) {
kunmap_local(src);
unmap = false;
}
/* Ensure all fragments have been processed */
if (frag < skb_shinfo(skb)->nr_frags) {
/* Map and then unmap quickly */
src = tbnet_kmap_frag(skb, frag++, &len);
unmap = true;
} else if (unlikely(size_left > 0)) {
goto err_drop;
}
} while (size_left > 0);
data_len -= TBNET_MAX_PAYLOAD_SIZE;
frame_index++;
frames[frame_index] = tbnet_get_tx_buffer(net);
if (!frames[frame_index])
goto err_drop;
hdr = page_address(frames[frame_index]->page);
dest = hdr + 1;
}
hdr->frame_size = cpu_to_le32(data_len);
hdr->frame_index = cpu_to_le16(frame_index);
hdr->frame_id = cpu_to_le16(frame_id);
frames[frame_index]->frame.size = data_len + sizeof(*hdr);
/* In case the remaining data_len is smaller than a frame */
while (len < data_len) {
memcpy(dest, src, len);
data_len -= len;
dest += len;
if (unmap) {
kunmap_local(src);
unmap = false;
}
if (frag < skb_shinfo(skb)->nr_frags) {
src = tbnet_kmap_frag(skb, frag++, &len);
unmap = true;
} else if (unlikely(data_len > 0)) {
goto err_drop;
}
}
memcpy(dest, src, data_len);
if (unmap)
kunmap_local(src);
if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
goto err_drop;
for (i = 0; i < frame_index + 1; i++)
tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
atomic_inc(&net->frame_id);
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
trace_tbnet_consume_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
err_drop:
/* We can re-use the buffers */
net->tx_ring.cons -= frame_index;
dev_kfree_skb_any(skb);
net->stats.tx_errors++;
return NETDEV_TX_OK;
}
static void tbnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct tbnet *net = netdev_priv(dev);
stats->tx_packets = net->stats.tx_packets;
stats->rx_packets = net->stats.rx_packets;
stats->tx_bytes = net->stats.tx_bytes;
stats->rx_bytes = net->stats.rx_bytes;
stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
net->stats.rx_over_errors + net->stats.rx_crc_errors +
net->stats.rx_missed_errors;
stats->tx_errors = net->stats.tx_errors;
stats->rx_length_errors = net->stats.rx_length_errors;
stats->rx_over_errors = net->stats.rx_over_errors;
stats->rx_crc_errors = net->stats.rx_crc_errors;
stats->rx_missed_errors = net->stats.rx_missed_errors;
}
static const struct net_device_ops tbnet_netdev_ops = {
.ndo_open = tbnet_open,
.ndo_stop = tbnet_stop,
.ndo_start_xmit = tbnet_start_xmit,
.ndo_get_stats64 = tbnet_get_stats64,
};
static void tbnet_generate_mac(struct net_device *dev)
{
const struct tbnet *net = netdev_priv(dev);
const struct tb_xdomain *xd = net->xd;
u8 addr[ETH_ALEN];
u8 phy_port;
u32 hash;
phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
/* Unicast and locally administered MAC */
addr[0] = phy_port << 4 | 0x02;
hash = jhash2((u32 *)xd->local_uuid, 4, 0);
memcpy(addr + 1, &hash, sizeof(hash));
hash = jhash2((u32 *)xd->local_uuid, 4, hash);
addr[5] = hash & 0xff;
eth_hw_addr_set(dev, addr);
}
static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
{
struct tb_xdomain *xd = tb_service_parent(svc);
struct net_device *dev;
struct tbnet *net;
int ret;
dev = alloc_etherdev(sizeof(*net));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &svc->dev);
net = netdev_priv(dev);
INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
INIT_WORK(&net->connected_work, tbnet_connected_work);
INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
mutex_init(&net->connection_lock);
atomic_set(&net->command_id, 0);
atomic_set(&net->frame_id, 0);
net->svc = svc;
net->dev = dev;
net->xd = xd;
tbnet_generate_mac(dev);
strcpy(dev->name, "thunderbolt%d");
dev->netdev_ops = &tbnet_netdev_ops;
/* ThunderboltIP takes advantage of TSO packets but instead of
* segmenting them we just split the packet into Thunderbolt
* frames (maximum payload size of each frame is 4084 bytes) and
* calculate checksum over the whole packet here.
*
* The receiving side does the opposite if the host OS supports
* LRO, otherwise it needs to split the large packet into MTU
* sized smaller packets.
*
* In order to receive large packets from the networking stack,
* we need to announce support for most of the offloading
* features here.
*/
dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
netif_napi_add(dev, &net->napi, tbnet_poll);
/* MTU range: 68 - 65522 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
net->handler.uuid = &tbnet_svc_uuid;
net->handler.callback = tbnet_handle_packet;
net->handler.data = net;
tb_register_protocol_handler(&net->handler);
tb_service_set_drvdata(svc, net);
ret = register_netdev(dev);
if (ret) {
tb_unregister_protocol_handler(&net->handler);
free_netdev(dev);
return ret;
}
return 0;
}
static void tbnet_remove(struct tb_service *svc)
{
struct tbnet *net = tb_service_get_drvdata(svc);
unregister_netdev(net->dev);
tb_unregister_protocol_handler(&net->handler);
free_netdev(net->dev);
}
static void tbnet_shutdown(struct tb_service *svc)
{
tbnet_tear_down(tb_service_get_drvdata(svc), true);
}
static int tbnet_suspend(struct device *dev)
{
struct tb_service *svc = tb_to_service(dev);
struct tbnet *net = tb_service_get_drvdata(svc);
stop_login(net);
if (netif_running(net->dev)) {
netif_device_detach(net->dev);
tbnet_tear_down(net, true);
}
tb_unregister_protocol_handler(&net->handler);
return 0;
}
static int tbnet_resume(struct device *dev)
{
struct tb_service *svc = tb_to_service(dev);
struct tbnet *net = tb_service_get_drvdata(svc);
tb_register_protocol_handler(&net->handler);
netif_carrier_off(net->dev);
if (netif_running(net->dev)) {
netif_device_attach(net->dev);
start_login(net);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(tbnet_pm_ops, tbnet_suspend, tbnet_resume);
static const struct tb_service_id tbnet_ids[] = {
{ TB_SERVICE("network", 1) },
{ },
};
MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
static struct tb_service_driver tbnet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "thunderbolt-net",
.pm = pm_sleep_ptr(&tbnet_pm_ops),
},
.probe = tbnet_probe,
.remove = tbnet_remove,
.shutdown = tbnet_shutdown,
.id_table = tbnet_ids,
};
static int __init tbnet_init(void)
{
unsigned int flags;
int ret;
tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
if (!tbnet_dir)
return -ENOMEM;
tb_property_add_immediate(tbnet_dir, "prtcid", 1);
tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
if (tbnet_e2e)
flags |= TBNET_E2E;
tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
if (ret)
goto err_free_dir;
ret = tb_register_service_driver(&tbnet_driver);
if (ret)
goto err_unregister;
return 0;
err_unregister:
tb_unregister_property_dir("network", tbnet_dir);
err_free_dir:
tb_property_free_dir(tbnet_dir);
return ret;
}
module_init(tbnet_init);
static void __exit tbnet_exit(void)
{
tb_unregister_service_driver(&tbnet_driver);
tb_unregister_property_dir("network", tbnet_dir);
tb_property_free_dir(tbnet_dir);
}
module_exit(tbnet_exit);
MODULE_AUTHOR("Amir Levy <[email protected]>");
MODULE_AUTHOR("Michael Jamet <[email protected]>");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_DESCRIPTION("Thunderbolt/USB4 network driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/thunderbolt/main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Vxlan vni filter for collect metadata mode
*
* Authors: Roopa Prabhu <[email protected]>
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/rhashtable.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/vxlan.h>
#include "vxlan_private.h"
static inline int vxlan_vni_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct vxlan_vni_node *vnode = ptr;
__be32 vni = *(__be32 *)arg->key;
return vnode->vni != vni;
}
const struct rhashtable_params vxlan_vni_rht_params = {
.head_offset = offsetof(struct vxlan_vni_node, vnode),
.key_offset = offsetof(struct vxlan_vni_node, vni),
.key_len = sizeof(__be32),
.nelem_hint = 3,
.max_size = VXLAN_N_VID,
.obj_cmpfn = vxlan_vni_cmp,
.automatic_shrinking = true,
};
static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
struct vxlan_vni_node *v,
bool del)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_dev_node *node;
struct vxlan_sock *vs;
spin_lock(&vn->sock_lock);
if (del) {
if (!hlist_unhashed(&v->hlist4.hlist))
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
if (!hlist_unhashed(&v->hlist6.hlist))
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
goto out;
}
#if IS_ENABLED(CONFIG_IPV6)
vs = rtnl_dereference(vxlan->vn6_sock);
if (vs && v) {
node = &v->hlist6;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
#endif
vs = rtnl_dereference(vxlan->vn4_sock);
if (vs && v) {
node = &v->hlist4;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
out:
spin_unlock(&vn->sock_lock);
}
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
bool ipv6)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
struct vxlan_dev_node *node;
if (!vg)
return;
spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
node = &v->hlist6;
else
#endif
node = &v->hlist4;
node->vxlan = vxlan;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
spin_unlock(&vn->sock_lock);
}
void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_node *v, *tmp;
if (!vg)
return;
spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
}
spin_unlock(&vn->sock_lock);
}
static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
struct vxlan_vni_stats *dest)
{
int i;
memset(dest, 0, sizeof(*dest));
for_each_possible_cpu(i) {
struct vxlan_vni_stats_pcpu *pstats;
struct vxlan_vni_stats temp;
unsigned int start;
pstats = per_cpu_ptr(vninode->stats, i);
do {
start = u64_stats_fetch_begin(&pstats->syncp);
memcpy(&temp, &pstats->stats, sizeof(temp));
} while (u64_stats_fetch_retry(&pstats->syncp, start));
dest->rx_packets += temp.rx_packets;
dest->rx_bytes += temp.rx_bytes;
dest->rx_drops += temp.rx_drops;
dest->rx_errors += temp.rx_errors;
dest->tx_packets += temp.tx_packets;
dest->tx_bytes += temp.tx_bytes;
dest->tx_drops += temp.tx_drops;
dest->tx_errors += temp.tx_errors;
}
}
static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
int type, unsigned int len)
{
struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
u64_stats_update_begin(&pstats->syncp);
switch (type) {
case VXLAN_VNI_STATS_RX:
pstats->stats.rx_bytes += len;
pstats->stats.rx_packets++;
break;
case VXLAN_VNI_STATS_RX_DROPS:
pstats->stats.rx_drops++;
break;
case VXLAN_VNI_STATS_RX_ERRORS:
pstats->stats.rx_errors++;
break;
case VXLAN_VNI_STATS_TX:
pstats->stats.tx_bytes += len;
pstats->stats.tx_packets++;
break;
case VXLAN_VNI_STATS_TX_DROPS:
pstats->stats.tx_drops++;
break;
case VXLAN_VNI_STATS_TX_ERRORS:
pstats->stats.tx_errors++;
break;
}
u64_stats_update_end(&pstats->syncp);
}
void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
struct vxlan_vni_node *vninode,
int type, unsigned int len)
{
struct vxlan_vni_node *vnode;
if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
return;
if (vninode) {
vnode = vninode;
} else {
vnode = vxlan_vnifilter_lookup(vxlan, vni);
if (!vnode)
return;
}
vxlan_vnifilter_stats_add(vnode, type, len);
}
static u32 vnirange(struct vxlan_vni_node *vbegin,
struct vxlan_vni_node *vend)
{
return (be32_to_cpu(vend->vni) - be32_to_cpu(vbegin->vni));
}
static size_t vxlan_vnifilter_entry_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct tunnel_msg))
+ nla_total_size(0) /* VXLAN_VNIFILTER_ENTRY */
+ nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_START */
+ nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_END */
+ nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */
}
static int __vnifilter_entry_fill_stats(struct sk_buff *skb,
const struct vxlan_vni_node *vbegin)
{
struct vxlan_vni_stats vstats;
struct nlattr *vstats_attr;
vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS);
if (!vstats_attr)
goto out_stats_err;
vxlan_vnifilter_stats_get(vbegin, &vstats);
if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES,
vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS,
vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS,
vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS,
vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES,
vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS,
vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS,
vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS,
vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD))
goto out_stats_err;
nla_nest_end(skb, vstats_attr);
return 0;
out_stats_err:
nla_nest_cancel(skb, vstats_attr);
return -EMSGSIZE;
}
static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb,
struct vxlan_vni_node *vbegin,
struct vxlan_vni_node *vend,
bool fill_stats)
{
struct nlattr *ventry;
u32 vs = be32_to_cpu(vbegin->vni);
u32 ve = 0;
if (vbegin != vend)
ve = be32_to_cpu(vend->vni);
ventry = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY);
if (!ventry)
return false;
if (nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_START, vs))
goto out_err;
if (ve && nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_END, ve))
goto out_err;
if (!vxlan_addr_any(&vbegin->remote_ip)) {
if (vbegin->remote_ip.sa.sa_family == AF_INET) {
if (nla_put_in_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP,
vbegin->remote_ip.sin.sin_addr.s_addr))
goto out_err;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (nla_put_in6_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP6,
&vbegin->remote_ip.sin6.sin6_addr))
goto out_err;
#endif
}
}
if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin))
goto out_err;
nla_nest_end(skb, ventry);
return true;
out_err:
nla_nest_cancel(skb, ventry);
return false;
}
static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan,
struct vxlan_vni_node *vninode, int cmd)
{
struct tunnel_msg *tmsg;
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct net *net = dev_net(vxlan->dev);
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_vnifilter_entry_nlmsg_size(), GFP_KERNEL);
if (!skb)
goto out_err;
err = -EMSGSIZE;
nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*tmsg), 0);
if (!nlh)
goto out_err;
tmsg = nlmsg_data(nlh);
memset(tmsg, 0, sizeof(*tmsg));
tmsg->family = AF_BRIDGE;
tmsg->ifindex = vxlan->dev->ifindex;
if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false))
goto out_err;
nlmsg_end(skb, nlh);
rtnl_notify(skb, net, 0, RTNLGRP_TUNNEL, NULL, GFP_KERNEL);
return;
out_err:
rtnl_set_sk_err(net, RTNLGRP_TUNNEL, err);
kfree_skb(skb);
}
static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct vxlan_vni_node *tmp, *v, *vbegin = NULL, *vend = NULL;
struct vxlan_dev *vxlan = netdev_priv(dev);
struct tunnel_msg *new_tmsg, *tmsg;
int idx = 0, s_idx = cb->args[1];
struct vxlan_vni_group *vg;
struct nlmsghdr *nlh;
bool dump_stats;
int err = 0;
if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
return -EINVAL;
/* RCU needed because of the vni locking rules (rcu || rtnl) */
vg = rcu_dereference(vxlan->vnigrp);
if (!vg || !vg->num_vnis)
return 0;
tmsg = nlmsg_data(cb->nlh);
dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS);
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
new_tmsg = nlmsg_data(nlh);
memset(new_tmsg, 0, sizeof(*new_tmsg));
new_tmsg->family = PF_BRIDGE;
new_tmsg->ifindex = dev->ifindex;
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
if (idx < s_idx) {
idx++;
continue;
}
if (!vbegin) {
vbegin = v;
vend = v;
continue;
}
if (!dump_stats && vnirange(vend, v) == 1 &&
vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) {
goto update_end;
} else {
if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend,
dump_stats)) {
err = -EMSGSIZE;
break;
}
idx += vnirange(vbegin, vend) + 1;
vbegin = v;
}
update_end:
vend = v;
}
if (!err && vbegin) {
if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats))
err = -EMSGSIZE;
}
cb->args[1] = err ? idx : 0;
nlmsg_end(skb, nlh);
return err;
}
static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx = 0, err = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
struct tunnel_msg *tmsg;
struct net_device *dev;
tmsg = nlmsg_data(cb->nlh);
if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
return -EINVAL;
}
rcu_read_lock();
if (tmsg->ifindex) {
dev = dev_get_by_index_rcu(net, tmsg->ifindex);
if (!dev) {
err = -ENODEV;
goto out_err;
}
if (!netif_is_vxlan(dev)) {
NL_SET_ERR_MSG(cb->extack,
"The device is not a vxlan device");
err = -EINVAL;
goto out_err;
}
err = vxlan_vnifilter_dump_dev(dev, skb, cb);
/* if the dump completed without an error we return 0 here */
if (err != -EMSGSIZE)
goto out_err;
} else {
for_each_netdev_rcu(net, dev) {
if (!netif_is_vxlan(dev))
continue;
if (idx < s_idx)
goto skip;
err = vxlan_vnifilter_dump_dev(dev, skb, cb);
if (err == -EMSGSIZE)
break;
skip:
idx++;
}
}
cb->args[0] = idx;
rcu_read_unlock();
return skb->len;
out_err:
rcu_read_unlock();
return err;
}
static const struct nla_policy vni_filter_entry_policy[VXLAN_VNIFILTER_ENTRY_MAX + 1] = {
[VXLAN_VNIFILTER_ENTRY_START] = { .type = NLA_U32 },
[VXLAN_VNIFILTER_ENTRY_END] = { .type = NLA_U32 },
[VXLAN_VNIFILTER_ENTRY_GROUP] = { .type = NLA_BINARY,
.len = sizeof_field(struct iphdr, daddr) },
[VXLAN_VNIFILTER_ENTRY_GROUP6] = { .type = NLA_BINARY,
.len = sizeof(struct in6_addr) },
};
static const struct nla_policy vni_filter_policy[VXLAN_VNIFILTER_MAX + 1] = {
[VXLAN_VNIFILTER_ENTRY] = { .type = NLA_NESTED },
};
static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
union vxlan_addr *old_remote_ip,
union vxlan_addr *remote_ip,
struct netlink_ext_ack *extack)
{
struct vxlan_rdst *dst = &vxlan->default_dst;
u32 hash_index;
int err = 0;
hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
if (remote_ip && !vxlan_addr_any(remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
vni,
vni,
dst->remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
}
if (old_remote_ip && !vxlan_addr_any(old_remote_ip)) {
__vxlan_fdb_delete(vxlan, all_zeros_mac,
*old_remote_ip,
vxlan->cfg.dst_port,
vni, vni,
dst->remote_ifindex,
true);
}
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
static int vxlan_vni_update_group(struct vxlan_dev *vxlan,
struct vxlan_vni_node *vninode,
union vxlan_addr *group,
bool create, bool *changed,
struct netlink_ext_ack *extack)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_rdst *dst = &vxlan->default_dst;
union vxlan_addr *newrip = NULL, *oldrip = NULL;
union vxlan_addr old_remote_ip;
int ret = 0;
memcpy(&old_remote_ip, &vninode->remote_ip, sizeof(old_remote_ip));
/* if per vni remote ip is not present use vxlan dev
* default dst remote ip for fdb entry
*/
if (group && !vxlan_addr_any(group)) {
newrip = group;
} else {
if (!vxlan_addr_any(&dst->remote_ip))
newrip = &dst->remote_ip;
}
/* if old rip exists, and no newrip,
* explicitly delete old rip
*/
if (!newrip && !vxlan_addr_any(&old_remote_ip))
oldrip = &old_remote_ip;
if (!newrip && !oldrip)
return 0;
if (!create && oldrip && newrip && vxlan_addr_equal(oldrip, newrip))
return 0;
ret = vxlan_update_default_fdb_entry(vxlan, vninode->vni,
oldrip, newrip,
extack);
if (ret)
goto out;
if (group)
memcpy(&vninode->remote_ip, group, sizeof(vninode->remote_ip));
if (vxlan->dev->flags & IFF_UP) {
if (vxlan_addr_multicast(&old_remote_ip) &&
!vxlan_group_used(vn, vxlan, vninode->vni,
&old_remote_ip,
vxlan->default_dst.remote_ifindex)) {
ret = vxlan_igmp_leave(vxlan, &old_remote_ip,
0);
if (ret)
goto out;
}
if (vxlan_addr_multicast(&vninode->remote_ip)) {
ret = vxlan_igmp_join(vxlan, &vninode->remote_ip, 0);
if (ret == -EADDRINUSE)
ret = 0;
if (ret)
goto out;
}
}
*changed = true;
return 0;
out:
return ret;
}
int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
union vxlan_addr *old_remote_ip,
union vxlan_addr *new_remote_ip,
struct netlink_ext_ack *extack)
{
struct list_head *headp, *hpos;
struct vxlan_vni_group *vg;
struct vxlan_vni_node *vent;
int ret;
vg = rtnl_dereference(vxlan->vnigrp);
headp = &vg->vni_list;
list_for_each_prev(hpos, headp) {
vent = list_entry(hpos, struct vxlan_vni_node, vlist);
if (vxlan_addr_any(&vent->remote_ip)) {
ret = vxlan_update_default_fdb_entry(vxlan, vent->vni,
old_remote_ip,
new_remote_ip,
extack);
if (ret)
return ret;
}
}
return 0;
}
static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
struct vxlan_vni_node *vninode)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_rdst *dst = &vxlan->default_dst;
/* if per vni remote_ip not present, delete the
* default dst remote_ip previously added for this vni
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
!vxlan_addr_any(&dst->remote_ip))
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
vxlan->cfg.dst_port,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
if (vxlan->dev->flags & IFF_UP) {
if (vxlan_addr_multicast(&vninode->remote_ip) &&
!vxlan_group_used(vn, vxlan, vninode->vni,
&vninode->remote_ip,
dst->remote_ifindex)) {
vxlan_igmp_leave(vxlan, &vninode->remote_ip, 0);
}
}
}
static int vxlan_vni_update(struct vxlan_dev *vxlan,
struct vxlan_vni_group *vg,
__be32 vni, union vxlan_addr *group,
bool *changed,
struct netlink_ext_ack *extack)
{
struct vxlan_vni_node *vninode;
int ret;
vninode = rhashtable_lookup_fast(&vg->vni_hash, &vni,
vxlan_vni_rht_params);
if (!vninode)
return 0;
ret = vxlan_vni_update_group(vxlan, vninode, group, false, changed,
extack);
if (ret)
return ret;
if (changed)
vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
return 0;
}
static void __vxlan_vni_add_list(struct vxlan_vni_group *vg,
struct vxlan_vni_node *v)
{
struct list_head *headp, *hpos;
struct vxlan_vni_node *vent;
headp = &vg->vni_list;
list_for_each_prev(hpos, headp) {
vent = list_entry(hpos, struct vxlan_vni_node, vlist);
if (be32_to_cpu(v->vni) < be32_to_cpu(vent->vni))
continue;
else
break;
}
list_add_rcu(&v->vlist, hpos);
vg->num_vnis++;
}
static void __vxlan_vni_del_list(struct vxlan_vni_group *vg,
struct vxlan_vni_node *v)
{
list_del_rcu(&v->vlist);
vg->num_vnis--;
}
static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
__be32 vni)
{
struct vxlan_vni_node *vninode;
vninode = kzalloc(sizeof(*vninode), GFP_KERNEL);
if (!vninode)
return NULL;
vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu);
if (!vninode->stats) {
kfree(vninode);
return NULL;
}
vninode->vni = vni;
vninode->hlist4.vxlan = vxlan;
#if IS_ENABLED(CONFIG_IPV6)
vninode->hlist6.vxlan = vxlan;
#endif
return vninode;
}
static void vxlan_vni_free(struct vxlan_vni_node *vninode)
{
free_percpu(vninode->stats);
kfree(vninode);
}
static int vxlan_vni_add(struct vxlan_dev *vxlan,
struct vxlan_vni_group *vg,
u32 vni, union vxlan_addr *group,
struct netlink_ext_ack *extack)
{
struct vxlan_vni_node *vninode;
__be32 v = cpu_to_be32(vni);
bool changed = false;
int err = 0;
if (vxlan_vnifilter_lookup(vxlan, v))
return vxlan_vni_update(vxlan, vg, v, group, &changed, extack);
err = vxlan_vni_in_use(vxlan->net, vxlan, &vxlan->cfg, v);
if (err) {
NL_SET_ERR_MSG(extack, "VNI in use");
return err;
}
vninode = vxlan_vni_alloc(vxlan, v);
if (!vninode)
return -ENOMEM;
err = rhashtable_lookup_insert_fast(&vg->vni_hash,
&vninode->vnode,
vxlan_vni_rht_params);
if (err) {
vxlan_vni_free(vninode);
return err;
}
__vxlan_vni_add_list(vg, vninode);
if (vxlan->dev->flags & IFF_UP)
vxlan_vs_add_del_vninode(vxlan, vninode, false);
err = vxlan_vni_update_group(vxlan, vninode, group, true, &changed,
extack);
if (changed)
vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
return err;
}
static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
{
struct vxlan_vni_node *v;
v = container_of(rcu, struct vxlan_vni_node, rcu);
vxlan_vni_free(v);
}
static int vxlan_vni_del(struct vxlan_dev *vxlan,
struct vxlan_vni_group *vg,
u32 vni, struct netlink_ext_ack *extack)
{
struct vxlan_vni_node *vninode;
__be32 v = cpu_to_be32(vni);
int err = 0;
vg = rtnl_dereference(vxlan->vnigrp);
vninode = rhashtable_lookup_fast(&vg->vni_hash, &v,
vxlan_vni_rht_params);
if (!vninode) {
err = -ENOENT;
goto out;
}
vxlan_vni_delete_group(vxlan, vninode);
err = rhashtable_remove_fast(&vg->vni_hash,
&vninode->vnode,
vxlan_vni_rht_params);
if (err)
goto out;
__vxlan_vni_del_list(vg, vninode);
vxlan_vnifilter_notify(vxlan, vninode, RTM_DELTUNNEL);
if (vxlan->dev->flags & IFF_UP)
vxlan_vs_add_del_vninode(vxlan, vninode, true);
call_rcu(&vninode->rcu, vxlan_vni_node_rcu_free);
return 0;
out:
return err;
}
static int vxlan_vni_add_del(struct vxlan_dev *vxlan, __u32 start_vni,
__u32 end_vni, union vxlan_addr *group,
int cmd, struct netlink_ext_ack *extack)
{
struct vxlan_vni_group *vg;
int v, err = 0;
vg = rtnl_dereference(vxlan->vnigrp);
for (v = start_vni; v <= end_vni; v++) {
switch (cmd) {
case RTM_NEWTUNNEL:
err = vxlan_vni_add(vxlan, vg, v, group, extack);
break;
case RTM_DELTUNNEL:
err = vxlan_vni_del(vxlan, vg, v, extack);
break;
default:
err = -EOPNOTSUPP;
break;
}
if (err)
goto out;
}
return 0;
out:
return err;
}
static int vxlan_process_vni_filter(struct vxlan_dev *vxlan,
struct nlattr *nlvnifilter,
int cmd, struct netlink_ext_ack *extack)
{
struct nlattr *vattrs[VXLAN_VNIFILTER_ENTRY_MAX + 1];
u32 vni_start = 0, vni_end = 0;
union vxlan_addr group;
int err;
err = nla_parse_nested(vattrs,
VXLAN_VNIFILTER_ENTRY_MAX,
nlvnifilter, vni_filter_entry_policy,
extack);
if (err)
return err;
if (vattrs[VXLAN_VNIFILTER_ENTRY_START]) {
vni_start = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_START]);
vni_end = vni_start;
}
if (vattrs[VXLAN_VNIFILTER_ENTRY_END])
vni_end = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_END]);
if (!vni_start && !vni_end) {
NL_SET_ERR_MSG_ATTR(extack, nlvnifilter,
"vni start nor end found in vni entry");
return -EINVAL;
}
if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]) {
group.sin.sin_addr.s_addr =
nla_get_in_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]);
group.sa.sa_family = AF_INET;
} else if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]) {
group.sin6.sin6_addr =
nla_get_in6_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]);
group.sa.sa_family = AF_INET6;
} else {
memset(&group, 0, sizeof(group));
}
if (vxlan_addr_multicast(&group) && !vxlan->default_dst.remote_ifindex) {
NL_SET_ERR_MSG(extack,
"Local interface required for multicast remote group");
return -EINVAL;
}
err = vxlan_vni_add_del(vxlan, vni_start, vni_end, &group, cmd,
extack);
if (err)
return err;
return 0;
}
void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan)
{
struct vxlan_vni_node *v, *tmp;
struct vxlan_vni_group *vg;
vg = rtnl_dereference(vxlan->vnigrp);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
rhashtable_remove_fast(&vg->vni_hash, &v->vnode,
vxlan_vni_rht_params);
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
__vxlan_vni_del_list(vg, v);
vxlan_vnifilter_notify(vxlan, v, RTM_DELTUNNEL);
call_rcu(&v->rcu, vxlan_vni_node_rcu_free);
}
rhashtable_destroy(&vg->vni_hash);
kfree(vg);
}
int vxlan_vnigroup_init(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg;
int ret;
vg = kzalloc(sizeof(*vg), GFP_KERNEL);
if (!vg)
return -ENOMEM;
ret = rhashtable_init(&vg->vni_hash, &vxlan_vni_rht_params);
if (ret) {
kfree(vg);
return ret;
}
INIT_LIST_HEAD(&vg->vni_list);
rcu_assign_pointer(vxlan->vnigrp, vg);
return 0;
}
static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct tunnel_msg *tmsg;
struct vxlan_dev *vxlan;
struct net_device *dev;
struct nlattr *attr;
int err, vnis = 0;
int rem;
/* this should validate the header and check for remaining bytes */
err = nlmsg_parse(nlh, sizeof(*tmsg), NULL, VXLAN_VNIFILTER_MAX,
vni_filter_policy, extack);
if (err < 0)
return err;
tmsg = nlmsg_data(nlh);
dev = __dev_get_by_index(net, tmsg->ifindex);
if (!dev)
return -ENODEV;
if (!netif_is_vxlan(dev)) {
NL_SET_ERR_MSG_MOD(extack, "The device is not a vxlan device");
return -EINVAL;
}
vxlan = netdev_priv(dev);
if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
return -EOPNOTSUPP;
nlmsg_for_each_attr(attr, nlh, sizeof(*tmsg), rem) {
switch (nla_type(attr)) {
case VXLAN_VNIFILTER_ENTRY:
err = vxlan_process_vni_filter(vxlan, attr,
nlh->nlmsg_type, extack);
break;
default:
continue;
}
vnis++;
if (err)
break;
}
if (!vnis) {
NL_SET_ERR_MSG_MOD(extack, "No vnis found to process");
err = -EINVAL;
}
return err;
}
void vxlan_vnifilter_init(void)
{
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
vxlan_vnifilter_dump, 0);
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
vxlan_vnifilter_process, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
vxlan_vnifilter_process, NULL, 0);
}
void vxlan_vnifilter_uninit(void)
{
rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
}
| linux-master | drivers/net/vxlan/vxlan_vnifilter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VXLAN: Virtual eXtensible Local Area Network
*
* Copyright (c) 2012-2013 Vyatta Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/udp.h>
#include <linux/igmp.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/gro.h>
#include <net/ipv6_stubs.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/tun_proto.h>
#include <net/vxlan.h>
#include <net/nexthop.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
#include "vxlan_private.h"
#define VXLAN_VERSION "0.1"
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
/* UDP port for VXLAN traffic.
* The IANA assigned port is 4789, but the Linux default is 8472
* for compatibility with early adopters.
*/
static unsigned short vxlan_port __read_mostly = 8472;
module_param_named(udp_port, vxlan_port, ushort, 0444);
MODULE_PARM_DESC(udp_port, "Destination UDP port");
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
unsigned int vxlan_net_id;
const u8 all_zeros_mac[ETH_ALEN + 2];
static struct rtnl_link_ops vxlan_link_ops;
static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
return vs->flags & VXLAN_F_COLLECT_METADATA ||
ip_tunnel_collect_metadata();
}
/* Find VXLAN socket based on network namespace, address family, UDP port,
* enabled unshareable flags and socket device binding (see l3mdev with
* non-default VRF).
*/
static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
__be16 port, u32 flags, int ifindex)
{
struct vxlan_sock *vs;
flags &= VXLAN_F_RCV_FLAGS;
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port &&
vxlan_get_sk_family(vs) == family &&
vs->flags == flags &&
vs->sock->sk->sk_bound_dev_if == ifindex)
return vs;
}
return NULL;
}
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs,
int ifindex, __be32 vni,
struct vxlan_vni_node **vninode)
{
struct vxlan_vni_node *vnode;
struct vxlan_dev_node *node;
/* For flow based devices, map all packets to VNI 0 */
if (vs->flags & VXLAN_F_COLLECT_METADATA &&
!(vs->flags & VXLAN_F_VNIFILTER))
vni = 0;
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
if (!node->vxlan)
continue;
vnode = NULL;
if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
vnode = vxlan_vnifilter_lookup(node->vxlan, vni);
if (!vnode)
continue;
} else if (node->vxlan->default_dst.remote_vni != vni) {
continue;
}
if (IS_ENABLED(CONFIG_IPV6)) {
const struct vxlan_config *cfg = &node->vxlan->cfg;
if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
cfg->remote_ifindex != ifindex)
continue;
}
if (vninode)
*vninode = vnode;
return node->vxlan;
}
return NULL;
}
/* Look up VNI in a per net namespace table */
static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
__be32 vni, sa_family_t family,
__be16 port, u32 flags)
{
struct vxlan_sock *vs;
vs = vxlan_find_sock(net, family, port, flags, ifindex);
if (!vs)
return NULL;
return vxlan_vs_find_vni(vs, ifindex, vni, NULL);
}
/* Fill in neighbour message in skbuff. */
static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
u32 portid, u32 seq, int type, unsigned int flags,
const struct vxlan_rdst *rdst)
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
bool send_ip, send_eth;
struct nlmsghdr *nlh;
struct nexthop *nh;
struct ndmsg *ndm;
int nh_family;
u32 nh_id;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
memset(ndm, 0, sizeof(*ndm));
send_eth = send_ip = true;
rcu_read_lock();
nh = rcu_dereference(fdb->nh);
if (nh) {
nh_family = nexthop_get_family(nh);
nh_id = nh->id;
}
rcu_read_unlock();
if (type == RTM_GETNEIGH) {
if (rdst) {
send_ip = !vxlan_addr_any(&rdst->remote_ip);
ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
} else if (nh) {
ndm->ndm_family = nh_family;
}
send_eth = !is_zero_ether_addr(fdb->eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
if (rdst && rdst->offloaded)
ndm->ndm_flags |= NTF_OFFLOADED;
ndm->ndm_type = RTN_UNICAST;
if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
nla_put_s32(skb, NDA_LINK_NETNSID,
peernet2id(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
if (nh) {
if (nla_put_u32(skb, NDA_NH_ID, nh_id))
goto nla_put_failure;
} else if (rdst) {
if (send_ip && vxlan_nla_put_addr(skb, NDA_DST,
&rdst->remote_ip))
goto nla_put_failure;
if (rdst->remote_port &&
rdst->remote_port != vxlan->cfg.dst_port &&
nla_put_be16(skb, NDA_PORT, rdst->remote_port))
goto nla_put_failure;
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
goto nla_put_failure;
if (rdst->remote_ifindex &&
nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
goto nla_put_failure;
}
if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
nla_put_u32(skb, NDA_SRC_VNI,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t vxlan_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
+ nla_total_size(sizeof(__be16)) /* NDA_PORT */
+ nla_total_size(sizeof(__be32)) /* NDA_VNI */
+ nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
+ nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
struct vxlan_rdst *rd, int type)
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
const struct vxlan_rdst *rd,
struct netlink_ext_ack *extack,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
fdb_info->info.dev = vxlan->dev;
fdb_info->info.extack = extack;
fdb_info->remote_ip = rd->remote_ip;
fdb_info->remote_port = rd->remote_port;
fdb_info->remote_vni = rd->remote_vni;
fdb_info->remote_ifindex = rd->remote_ifindex;
memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
fdb_info->vni = fdb->vni;
fdb_info->offloaded = rd->offloaded;
fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
}
static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
struct vxlan_fdb *fdb,
struct vxlan_rdst *rd,
bool adding,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_vxlan_fdb_info info;
enum switchdev_notifier_type notifier_type;
int ret;
if (WARN_ON(!rd))
return 0;
notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
: SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info);
ret = call_switchdev_notifiers(notifier_type, vxlan->dev,
&info.info, extack);
return notifier_to_errno(ret);
}
static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
struct vxlan_rdst *rd, int type, bool swdev_notify,
struct netlink_ext_ack *extack)
{
int err;
if (swdev_notify && rd) {
switch (type) {
case RTM_NEWNEIGH:
err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
true, extack);
if (err)
return err;
break;
case RTM_DELNEIGH:
vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
false, extack);
break;
}
}
__vxlan_fdb_notify(vxlan, fdb, rd, type);
return 0;
}
static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb f = {
.state = NUD_STALE,
};
struct vxlan_rdst remote = {
.remote_ip = *ipa, /* goes to NDA_DST */
.remote_vni = cpu_to_be32(VXLAN_N_VID),
};
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
{
struct vxlan_fdb f = {
.state = NUD_STALE,
};
struct vxlan_rdst remote = { };
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
/* Hash Ethernet address */
static u32 eth_hash(const unsigned char *addr)
{
u64 value = get_unaligned((u64 *)addr);
/* only want 6 bytes */
#ifdef __BIG_ENDIAN
value >>= 16;
#else
value <<= 16;
#endif
return hash_64(value, FDB_HASH_BITS);
}
u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(addr + 2));
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
{
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
return eth_vni_hash(mac, vni);
else
return eth_hash(mac);
}
/* Hash chain to use given mac address */
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
}
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
struct vxlan_fdb *f;
hlist_for_each_entry_rcu(f, head, hlist) {
if (ether_addr_equal(mac, f->eth_addr)) {
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (vni == f->vni)
return f;
} else {
return f;
}
}
}
return NULL;
}
static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, vni);
if (f && f->used != jiffies)
f->used = jiffies;
return f;
}
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port,
__be32 vni, __u32 ifindex)
{
struct vxlan_rdst *rd;
list_for_each_entry(rd, &f->remotes, list) {
if (vxlan_addr_equal(&rd->remote_ip, ip) &&
rd->remote_port == port &&
rd->remote_vni == vni &&
rd->remote_ifindex == ifindex)
return rd;
}
return NULL;
}
int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
u8 eth_addr[ETH_ALEN + 2] = { 0 };
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
int rc = 0;
if (is_multicast_ether_addr(mac) ||
is_zero_ether_addr(mac))
return -EINVAL;
ether_addr_copy(eth_addr, mac);
rcu_read_lock();
f = __vxlan_find_mac(vxlan, eth_addr, vni);
if (!f) {
rc = -ENOENT;
goto out;
}
rdst = first_remote_rcu(f);
vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
out:
rcu_read_unlock();
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
static int vxlan_fdb_notify_one(struct notifier_block *nb,
const struct vxlan_dev *vxlan,
const struct vxlan_fdb *f,
const struct vxlan_rdst *rdst,
struct netlink_ext_ack *extack)
{
struct switchdev_notifier_vxlan_fdb_info fdb_info;
int rc;
vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info);
rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
&fdb_info);
return notifier_to_errno(rc);
}
int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
unsigned int h;
int rc = 0;
if (!netif_is_vxlan(dev))
return -EINVAL;
vxlan = netdev_priv(dev);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
if (f->vni == vni) {
list_for_each_entry(rdst, &f->remotes, list) {
rc = vxlan_fdb_notify_one(nb, vxlan,
f, rdst,
extack);
if (rc)
goto unlock;
}
}
}
spin_unlock_bh(&vxlan->hash_lock[h]);
}
return 0;
unlock:
spin_unlock_bh(&vxlan->hash_lock[h]);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
{
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
unsigned int h;
if (!netif_is_vxlan(dev))
return;
vxlan = netdev_priv(dev);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
if (f->vni == vni)
list_for_each_entry(rdst, &f->remotes, list)
rdst->offloaded = false;
spin_unlock_bh(&vxlan->hash_lock[h]);
}
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
__u32 ifindex, struct vxlan_rdst *oldrd)
{
struct vxlan_rdst *rd;
rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
if (rd)
return 0;
rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
if (!rd)
return 0;
*oldrd = *rd;
dst_cache_reset(&rd->dst_cache);
rd->remote_ip = *ip;
rd->remote_port = port;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
rd->offloaded = false;
return 1;
}
/* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
__u32 ifindex, struct vxlan_rdst **rdp)
{
struct vxlan_rdst *rd;
rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
if (rd)
return 0;
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
return -ENOMEM;
}
rd->remote_ip = *ip;
rd->remote_port = port;
rd->offloaded = false;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
list_add_tail_rcu(&rd->list, &f->remotes);
*rdp = rd;
return 1;
}
static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
{
struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
/* Need to have Next Protocol set for interfaces in GPE mode. */
if (!gpe->np_applied)
return false;
/* "The initial version is 0. If a receiver does not support the
* version indicated it MUST drop the packet.
*/
if (gpe->version != 0)
return false;
/* "When the O bit is set to 1, the packet is an OAM packet and OAM
* processing MUST occur." However, we don't implement OAM
* processing, thus drop the packet.
*/
if (gpe->oam_flag)
return false;
*protocol = tun_p_to_eth_p(gpe->next_protocol);
if (!*protocol)
return false;
return true;
}
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
unsigned int off,
struct vxlanhdr *vh, size_t hdrlen,
__be32 vni_field,
struct gro_remcsum *grc,
bool nopartial)
{
size_t start, offset;
if (skb->remcsum_offload)
return vh;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
start = vxlan_rco_start(vni_field);
offset = start + vxlan_rco_offset(vni_field);
vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
start, offset, grc, nopartial);
skb->remcsum_offload = 1;
return vh;
}
static struct vxlanhdr *vxlan_gro_prepare_receive(struct sock *sk,
struct list_head *head,
struct sk_buff *skb,
struct gro_remcsum *grc)
{
struct sk_buff *p;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
__be32 flags;
skb_gro_remcsum_init(grc);
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
vh = skb_gro_header(skb, hlen, off_vx);
if (unlikely(!vh))
return NULL;
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
flags = vh->vx_flags;
if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
vh->vx_vni, grc,
!!(vs->flags &
VXLAN_F_REMCSUM_NOPARTIAL));
if (!vh)
return NULL;
}
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
vh2 = (struct vxlanhdr *)(p->data + off_vx);
if (vh->vx_flags != vh2->vx_flags ||
vh->vx_vni != vh2->vx_vni) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
return vh;
}
static struct sk_buff *vxlan_gro_receive(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
struct sk_buff *pp = NULL;
struct gro_remcsum grc;
int flush = 1;
if (vxlan_gro_prepare_receive(sk, head, skb, &grc)) {
pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
}
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
return pp;
}
static struct sk_buff *vxlan_gpe_gro_receive(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
const struct packet_offload *ptype;
struct sk_buff *pp = NULL;
struct gro_remcsum grc;
struct vxlanhdr *vh;
__be16 protocol;
int flush = 1;
vh = vxlan_gro_prepare_receive(sk, head, skb, &grc);
if (vh) {
if (!vxlan_parse_gpe_proto(vh, &protocol))
goto out;
ptype = gro_find_receive_by_type(protocol);
if (!ptype)
goto out;
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
}
out:
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
return pp;
}
static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
/* Sets 'skb->inner_mac_header' since we are always called with
* 'skb->encapsulation' set.
*/
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
static int vxlan_gpe_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
struct vxlanhdr *vh = (struct vxlanhdr *)(skb->data + nhoff);
const struct packet_offload *ptype;
int err = -ENOSYS;
__be16 protocol;
if (!vxlan_parse_gpe_proto(vh, &protocol))
return err;
ptype = gro_find_complete_by_type(protocol);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
return err;
}
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
__u16 state, __be32 src_vni,
__u16 ndm_flags)
{
struct vxlan_fdb *f;
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return NULL;
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
f->vni = src_vni;
f->nh = NULL;
RCU_INIT_POINTER(f->vdev, vxlan);
INIT_LIST_HEAD(&f->nh_list);
INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN);
return f;
}
static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
__be32 src_vni, struct vxlan_fdb *f)
{
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
vxlan_fdb_head(vxlan, mac, src_vni));
}
static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
u32 nhid, struct netlink_ext_ack *extack)
{
struct nexthop *old_nh = rtnl_dereference(fdb->nh);
struct nexthop *nh;
int err = -EINVAL;
if (old_nh && old_nh->id == nhid)
return 0;
nh = nexthop_find_by_id(vxlan->net, nhid);
if (!nh) {
NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
goto err_inval;
}
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
nh = NULL;
goto err_inval;
}
if (!nexthop_is_fdb(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
goto err_inval;
}
if (!nexthop_is_multipath(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
goto err_inval;
}
/* check nexthop group family */
switch (vxlan->default_dst.remote_ip.sa.sa_family) {
case AF_INET:
if (!nexthop_has_v4(nh)) {
err = -EAFNOSUPPORT;
NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
goto err_inval;
}
break;
case AF_INET6:
if (nexthop_has_v4(nh)) {
err = -EAFNOSUPPORT;
NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
goto err_inval;
}
}
if (old_nh) {
list_del_rcu(&fdb->nh_list);
nexthop_put(old_nh);
}
rcu_assign_pointer(fdb->nh, nh);
list_add_tail_rcu(&fdb->nh_list, &nh->fdb_list);
return 1;
err_inval:
if (nh)
nexthop_put(nh);
return err;
}
int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
__be32 vni, __u32 ifindex, __u16 ndm_flags,
u32 nhid, struct vxlan_fdb **fdb,
struct netlink_ext_ack *extack)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int rc;
if (vxlan->cfg.addrmax &&
vxlan->addrcnt >= vxlan->cfg.addrmax)
return -ENOSPC;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
if (!f)
return -ENOMEM;
if (nhid)
rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
else
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0)
goto errout;
*fdb = f;
return 0;
errout:
kfree(f);
return rc;
}
static void __vxlan_fdb_free(struct vxlan_fdb *f)
{
struct vxlan_rdst *rd, *nd;
struct nexthop *nh;
nh = rcu_dereference_raw(f->nh);
if (nh) {
rcu_assign_pointer(f->nh, NULL);
rcu_assign_pointer(f->vdev, NULL);
nexthop_put(nh);
}
list_for_each_entry_safe(rd, nd, &f->remotes, list) {
dst_cache_destroy(&rd->dst_cache);
kfree(rd);
}
kfree(f);
}
static void vxlan_fdb_free(struct rcu_head *head)
{
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
__vxlan_fdb_free(f);
}
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify, bool swdev_notify)
{
struct vxlan_rdst *rd;
netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
if (do_notify) {
if (rcu_access_pointer(f->nh))
vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH,
swdev_notify, NULL);
else
list_for_each_entry(rd, &f->remotes, list)
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
swdev_notify, NULL);
}
hlist_del_rcu(&f->hlist);
list_del_rcu(&f->nh_list);
call_rcu(&f->rcu, vxlan_fdb_free);
}
static void vxlan_dst_free(struct rcu_head *head)
{
struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
dst_cache_destroy(&rd->dst_cache);
kfree(rd);
}
static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 vni,
__u32 ifindex, __u16 ndm_flags,
struct vxlan_fdb *f, u32 nhid,
bool swdev_notify,
struct netlink_ext_ack *extack)
{
__u16 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
struct vxlan_rdst oldrd;
int notify = 0;
int rc = 0;
int err;
if (nhid && !rcu_access_pointer(f->nh)) {
NL_SET_ERR_MSG(extack,
"Cannot replace an existing non nexthop fdb with a nexthop");
return -EOPNOTSUPP;
}
if (nhid && (flags & NLM_F_APPEND)) {
NL_SET_ERR_MSG(extack,
"Cannot append to a nexthop fdb");
return -EOPNOTSUPP;
}
/* Do not allow an externally learned entry to take over an entry added
* by the user.
*/
if (!(fdb_flags & NTF_EXT_LEARNED) ||
!(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
if (f->state != state) {
f->state = state;
f->updated = jiffies;
notify = 1;
}
if (f->flags != fdb_flags) {
f->flags = fdb_flags;
f->updated = jiffies;
notify = 1;
}
}
if ((flags & NLM_F_REPLACE)) {
/* Only change unicasts */
if (!(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
if (nhid) {
rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
if (rc < 0)
return rc;
} else {
rc = vxlan_fdb_replace(f, ip, port, vni,
ifindex, &oldrd);
}
notify |= rc;
} else {
NL_SET_ERR_MSG(extack, "Cannot replace non-unicast fdb entries");
return -EOPNOTSUPP;
}
}
if ((flags & NLM_F_APPEND) &&
(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0)
return rc;
notify |= rc;
}
if (ndm_flags & NTF_USE)
f->used = jiffies;
if (notify) {
if (rd == NULL)
rd = first_remote_rtnl(f);
err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH,
swdev_notify, extack);
if (err)
goto err_notify;
}
return 0;
err_notify:
if (nhid)
return err;
if ((flags & NLM_F_REPLACE) && rc)
*rd = oldrd;
else if ((flags & NLM_F_APPEND) && rc) {
list_del_rcu(&rd->list);
call_rcu(&rd->rcu, vxlan_dst_free);
}
return err;
}
static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u16 ndm_flags, u32 nhid,
bool swdev_notify,
struct netlink_ext_ack *extack)
{
__u16 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_fdb *f;
int rc;
/* Disallow replace to add a multicast entry */
if ((flags & NLM_F_REPLACE) &&
(is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
return -EOPNOTSUPP;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
vni, ifindex, fdb_flags, nhid, &f, extack);
if (rc < 0)
return rc;
vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
goto err_notify;
return 0;
err_notify:
vxlan_fdb_destroy(vxlan, f, false, false);
return rc;
}
/* Add new entry to forwarding table -- assumes lock held */
int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u16 ndm_flags, u32 nhid,
bool swdev_notify,
struct netlink_ext_ack *extack)
{
struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, src_vni);
if (f) {
if (flags & NLM_F_EXCL) {
netdev_dbg(vxlan->dev,
"lost race to create %pM\n", mac);
return -EEXIST;
}
return vxlan_fdb_update_existing(vxlan, ip, state, flags, port,
vni, ifindex, ndm_flags, f,
nhid, swdev_notify, extack);
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
return vxlan_fdb_update_create(vxlan, mac, ip, state, flags,
port, src_vni, vni, ifindex,
ndm_flags, nhid, swdev_notify,
extack);
}
}
static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
struct vxlan_rdst *rd, bool swdev_notify)
{
list_del_rcu(&rd->list);
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL);
call_rcu(&rd->rcu, vxlan_dst_free);
}
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
__be32 *vni, u32 *ifindex, u32 *nhid,
struct netlink_ext_ack *extack)
{
struct net *net = dev_net(vxlan->dev);
int err;
if (tb[NDA_NH_ID] &&
(tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || tb[NDA_PORT])) {
NL_SET_ERR_MSG(extack, "DST, VNI, ifindex and port are mutually exclusive with NH_ID");
return -EINVAL;
}
if (tb[NDA_DST]) {
err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
if (err) {
NL_SET_ERR_MSG(extack, "Unsupported address family");
return err;
}
} else {
union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
if (remote->sa.sa_family == AF_INET) {
ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
ip->sa.sa_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip->sin6.sin6_addr = in6addr_any;
ip->sa.sa_family = AF_INET6;
#endif
}
}
if (tb[NDA_PORT]) {
if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) {
NL_SET_ERR_MSG(extack, "Invalid vxlan port");
return -EINVAL;
}
*port = nla_get_be16(tb[NDA_PORT]);
} else {
*port = vxlan->cfg.dst_port;
}
if (tb[NDA_VNI]) {
if (nla_len(tb[NDA_VNI]) != sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid vni");
return -EINVAL;
}
*vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
} else {
*vni = vxlan->default_dst.remote_vni;
}
if (tb[NDA_SRC_VNI]) {
if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid src vni");
return -EINVAL;
}
*src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
} else {
*src_vni = vxlan->default_dst.remote_vni;
}
if (tb[NDA_IFINDEX]) {
struct net_device *tdev;
if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid ifindex");
return -EINVAL;
}
*ifindex = nla_get_u32(tb[NDA_IFINDEX]);
tdev = __dev_get_by_index(net, *ifindex);
if (!tdev) {
NL_SET_ERR_MSG(extack, "Device not found");
return -EADDRNOTAVAIL;
}
} else {
*ifindex = 0;
}
if (tb[NDA_NH_ID])
*nhid = nla_get_u32(tb[NDA_NH_ID]);
else
*nhid = 0;
return 0;
}
/* Add static entry (via netlink) */
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid, u16 flags,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
union vxlan_addr ip;
__be16 port;
__be32 src_vni, vni;
u32 ifindex, nhid;
u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
pr_info("RTM_NEWNEIGH with invalid state %#x\n",
ndm->ndm_state);
return -EINVAL;
}
if (!tb || (!tb[NDA_DST] && !tb[NDA_NH_ID]))
return -EINVAL;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex,
&nhid, extack);
if (err)
return err;
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
hash_index = fdb_head_index(vxlan, addr, src_vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
nhid, true, extack);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
__be16 port, __be32 src_vni, __be32 vni,
u32 ifindex, bool swdev_notify)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int err = -ENOENT;
f = vxlan_find_mac(vxlan, addr, src_vni);
if (!f)
return err;
if (!vxlan_addr_any(&ip)) {
rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
if (!rd)
goto out;
}
/* remove a destination if it's not the only one on the list,
* otherwise destroy the fdb entry
*/
if (rd && !list_is_singular(&f->remotes)) {
vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify);
goto out;
}
vxlan_fdb_destroy(vxlan, f, true, swdev_notify);
out:
return 0;
}
/* Delete entry (via netlink) */
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
union vxlan_addr ip;
__be32 src_vni, vni;
u32 ifindex, nhid;
u32 hash_index;
__be16 port;
int err;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex,
&nhid, extack);
if (err)
return err;
hash_index = fdb_head_index(vxlan, addr, src_vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
/* Dump forwarding table */
static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int *idx)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned int h;
int err = 0;
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
if (rcu_access_pointer(f->nh)) {
if (*idx < cb->args[2])
goto skip_nh;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, NULL);
if (err < 0) {
rcu_read_unlock();
goto out;
}
skip_nh:
*idx += 1;
continue;
}
list_for_each_entry_rcu(rd, &f->remotes, list) {
if (*idx < cb->args[2])
goto skip;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
if (err < 0) {
rcu_read_unlock();
goto out;
}
skip:
*idx += 1;
}
}
rcu_read_unlock();
}
out:
return err;
}
static int vxlan_fdb_get(struct sk_buff *skb,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid, u32 portid, u32 seq,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
__be32 vni;
int err;
if (tb[NDA_VNI])
vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
else
vni = vxlan->default_dst.remote_vni;
rcu_read_lock();
f = __vxlan_find_mac(vxlan, addr, vni);
if (!f) {
NL_SET_ERR_MSG(extack, "Fdb entry not found");
err = -ENOENT;
goto errout;
}
err = vxlan_fdb_info(skb, vxlan, f, portid, seq,
RTM_NEWNEIGH, 0, first_remote_rcu(f));
errout:
rcu_read_unlock();
return err;
}
/* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint.
* Return true if packet is bogus and should be dropped.
*/
static bool vxlan_snoop(struct net_device *dev,
union vxlan_addr *src_ip, const u8 *src_mac,
u32 src_ifindex, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
u32 ifindex = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (src_ip->sa.sa_family == AF_INET6 &&
(ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
ifindex = src_ifindex;
#endif
f = vxlan_find_mac(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
rdst->remote_ifindex == ifindex))
return false;
/* Don't migrate static entries, drop packets */
if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;
/* Don't override an fdb with nexthop with a learnt entry */
if (rcu_access_pointer(f->nh))
return true;
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pIS to %pIS\n",
src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip;
f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
/* learned new entry */
spin_lock(&vxlan->hash_lock[hash_index]);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
vxlan_fdb_update(vxlan, src_mac, src_ip,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, 0, true, NULL);
spin_unlock(&vxlan->hash_lock[hash_index]);
}
return false;
}
static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
if (!vs)
return false;
if (!refcount_dec_and_test(&vs->refcnt))
return false;
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
udp_tunnel_notify_del_rx_port(vs->sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
spin_unlock(&vn->sock_lock);
return true;
}
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
#endif
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
synchronize_net();
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vs_del_vnigrp(vxlan);
else
vxlan_vs_del_dev(vxlan);
if (__vxlan_sock_release_prep(sock4)) {
udp_tunnel_sock_release(sock4->sock);
kfree(sock4);
}
#if IS_ENABLED(CONFIG_IPV6)
if (__vxlan_sock_release_prep(sock6)) {
udp_tunnel_sock_release(sock6->sock);
kfree(sock6);
}
#endif
}
static bool vxlan_remcsum(struct vxlanhdr *unparsed,
struct sk_buff *skb, u32 vxflags)
{
size_t start, offset;
if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
goto out;
start = vxlan_rco_start(unparsed->vx_vni);
offset = start + vxlan_rco_offset(unparsed->vx_vni);
if (!pskb_may_pull(skb, offset + sizeof(u16)))
return false;
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
!!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
out:
unparsed->vx_flags &= ~VXLAN_HF_RCO;
unparsed->vx_vni &= VXLAN_VNI_MASK;
return true;
}
static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
struct sk_buff *skb, u32 vxflags,
struct vxlan_metadata *md)
{
struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
struct metadata_dst *tun_dst;
if (!(unparsed->vx_flags & VXLAN_HF_GBP))
goto out;
md->gbp = ntohs(gbp->policy_id);
tun_dst = (struct metadata_dst *)skb_dst(skb);
if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
tun_dst->u.tun_info.options_len = sizeof(*md);
}
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;
if (gbp->policy_applied)
md->gbp |= VXLAN_GBP_POLICY_APPLIED;
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
out:
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
struct sk_buff *skb, __be32 vni)
{
union vxlan_addr saddr;
u32 ifindex = skb->dev->ifindex;
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
saddr.sa.sa_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
saddr.sa.sa_family = AF_INET6;
#endif
}
if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
return false;
return true;
}
static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
struct sk_buff *skb)
{
int err = 0;
if (vxlan_get_sk_family(vs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
else
err = IP6_ECN_decapsulate(oiph, skb);
#endif
if (unlikely(err) && log_ecn_error) {
if (vxlan_get_sk_family(vs) == AF_INET)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
((struct iphdr *)oiph)->tos);
else
net_info_ratelimited("non-ECT from %pI6\n",
&((struct ipv6hdr *)oiph)->saddr);
}
return err <= 1;
}
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 protocol = htons(ETH_P_TEB);
bool raw_proto = false;
void *oiph;
__be32 vni = 0;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto drop;
unparsed = *vxlan_hdr(skb);
/* VNI flag always required to be set */
if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
ntohl(vxlan_hdr(skb)->vx_flags),
ntohl(vxlan_hdr(skb)->vx_vni));
/* Return non vxlan pkt */
goto drop;
}
unparsed.vx_flags &= ~VXLAN_HF_VNI;
unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan)
goto drop;
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
if (vs->flags & VXLAN_F_GPE) {
if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
goto drop;
unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
!net_eq(vxlan->net, dev_net(vxlan->dev))))
goto drop;
if (vs->flags & VXLAN_F_REMCSUM_RX)
if (unlikely(!vxlan_remcsum(&unparsed, skb, vs->flags)))
goto drop;
if (vxlan_collect_metadata(vs)) {
struct metadata_dst *tun_dst;
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
key32_to_tunnel_id(vni), sizeof(*md));
if (!tun_dst)
goto drop;
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
skb_dst_set(skb, (struct dst_entry *)tun_dst);
} else {
memset(md, 0, sizeof(*md));
}
if (vs->flags & VXLAN_F_GBP)
vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
/* Note that GBP and GPE can never be active together. This is
* ensured in vxlan_dev_configure.
*/
if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat
* this as a malformed packet. This behavior diverges from
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
* in reserved fields are to be ignored. The approach here
* maintains compatibility with previous stack code, and also
* is more robust and provides a little more security in
* adding extensions to VXLAN.
*/
goto drop;
}
if (!raw_proto) {
if (!vxlan_set_mac(vxlan, vs, skb, vni))
goto drop;
} else {
skb_reset_mac_header(skb);
skb->dev = vxlan->dev;
skb->pkt_type = PACKET_HOST;
}
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
}
rcu_read_lock();
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
dev_core_stats_rx_dropped_inc(vxlan->dev);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_DROPS, 0);
goto drop;
}
dev_sw_netstats_rx_add(vxlan->dev, skb->len);
vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
return 0;
drop:
/* Consume bad packet */
kfree_skb(skb);
return 0;
}
/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr *hdr;
__be32 vni;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + VXLAN_HLEN))
return -EINVAL;
hdr = vxlan_hdr(skb);
if (!(hdr->vx_flags & VXLAN_HF_VNI))
return -EINVAL;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
return -ENOENT;
vni = vxlan_vni(hdr->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL);
if (!vxlan)
return -ENOENT;
return 0;
}
static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct arphdr *parp;
u8 *arpptr, *sha;
__be32 sip, tip;
struct neighbour *n;
if (dev->flags & IFF_NOARP)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
dev->stats.tx_dropped++;
goto out;
}
parp = arp_hdr(skb);
if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
parp->ar_pro != htons(ETH_P_IP) ||
parp->ar_op != htons(ARPOP_REQUEST) ||
parp->ar_hln != dev->addr_len ||
parp->ar_pln != 4)
goto out;
arpptr = (u8 *)parp + sizeof(struct arphdr);
sha = arpptr;
arpptr += dev->addr_len; /* sha */
memcpy(&sip, arpptr, sizeof(sip));
arpptr += sizeof(sip);
arpptr += dev->addr_len; /* tha */
memcpy(&tip, arpptr, sizeof(tip));
if (ipv4_is_loopback(tip) ||
ipv4_is_multicast(tip))
goto out;
n = neigh_lookup(&arp_tbl, &tip, dev);
if (n) {
struct vxlan_fdb *f;
struct sk_buff *reply;
if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) {
neigh_release(n);
goto out;
}
f = vxlan_find_mac(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
}
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
n->ha, sha);
neigh_release(n);
if (reply == NULL)
goto out;
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
.sin.sin_family = AF_INET,
};
vxlan_ip_miss(dev, &ipa);
}
out:
consume_skb(skb);
return NETDEV_TX_OK;
}
#if IS_ENABLED(CONFIG_IPV6)
static struct sk_buff *vxlan_na_create(struct sk_buff *request,
struct neighbour *n, bool isrouter)
{
struct net_device *dev = request->dev;
struct sk_buff *reply;
struct nd_msg *ns, *na;
struct ipv6hdr *pip6;
u8 *daddr;
int na_olen = 8; /* opt hdr + ETH_ALEN for target */
int ns_olen;
int i, len;
if (dev == NULL || !pskb_may_pull(request, request->len))
return NULL;
len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
sizeof(*na) + na_olen + dev->needed_tailroom;
reply = alloc_skb(len, GFP_ATOMIC);
if (reply == NULL)
return NULL;
reply->protocol = htons(ETH_P_IPV6);
reply->dev = dev;
skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
skb_push(reply, sizeof(struct ethhdr));
skb_reset_mac_header(reply);
ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
daddr = eth_hdr(request)->h_source;
ns_olen = request->len - skb_network_offset(request) -
sizeof(struct ipv6hdr) - sizeof(*ns);
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
if (!ns->opt[i + 1]) {
kfree_skb(reply);
return NULL;
}
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;
}
}
/* Ethernet header */
ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
reply->protocol = htons(ETH_P_IPV6);
skb_pull(reply, sizeof(struct ethhdr));
skb_reset_network_header(reply);
skb_put(reply, sizeof(struct ipv6hdr));
/* IPv6 header */
pip6 = ipv6_hdr(reply);
memset(pip6, 0, sizeof(struct ipv6hdr));
pip6->version = 6;
pip6->priority = ipv6_hdr(request)->priority;
pip6->nexthdr = IPPROTO_ICMPV6;
pip6->hop_limit = 255;
pip6->daddr = ipv6_hdr(request)->saddr;
pip6->saddr = *(struct in6_addr *)n->primary_key;
skb_pull(reply, sizeof(struct ipv6hdr));
skb_reset_transport_header(reply);
/* Neighbor Advertisement */
na = skb_put_zero(reply, sizeof(*na) + na_olen);
na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
na->icmph.icmp6_router = isrouter;
na->icmph.icmp6_override = 1;
na->icmph.icmp6_solicited = 1;
na->target = ns->target;
ether_addr_copy(&na->opt[2], n->ha);
na->opt[0] = ND_OPT_TARGET_LL_ADDR;
na->opt[1] = na_olen >> 3;
na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
&pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
csum_partial(na, sizeof(*na)+na_olen, 0));
pip6->payload_len = htons(sizeof(*na)+na_olen);
skb_push(reply, sizeof(struct ipv6hdr));
reply->ip_summed = CHECKSUM_UNNECESSARY;
return reply;
}
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
const struct in6_addr *daddr;
const struct ipv6hdr *iphdr;
struct inet6_dev *in6_dev;
struct neighbour *n;
struct nd_msg *msg;
rcu_read_lock();
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
goto out;
iphdr = ipv6_hdr(skb);
daddr = &iphdr->daddr;
msg = (struct nd_msg *)(iphdr + 1);
if (ipv6_addr_loopback(daddr) ||
ipv6_addr_is_multicast(&msg->target))
goto out;
n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
struct sk_buff *reply;
if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) {
neigh_release(n);
goto out;
}
f = vxlan_find_mac(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
}
reply = vxlan_na_create(skb, n,
!!(f ? f->flags & NTF_ROUTER : 0));
neigh_release(n);
if (reply == NULL)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
.sin6.sin6_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
}
out:
rcu_read_unlock();
consume_skb(skb);
return NETDEV_TX_OK;
}
#endif
static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct neighbour *n;
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
return false;
n = NULL;
switch (ntohs(eth_hdr(skb)->h_proto)) {
case ETH_P_IP:
{
struct iphdr *pip;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return false;
pip = ip_hdr(skb);
n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = pip->daddr,
.sin.sin_family = AF_INET,
};
vxlan_ip_miss(dev, &ipa);
return false;
}
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
{
struct ipv6hdr *pip6;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return false;
pip6 = ipv6_hdr(skb);
n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin6.sin6_addr = pip6->daddr,
.sin6.sin6_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
return false;
}
break;
}
#endif
default:
return false;
}
if (n) {
bool diff;
diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
if (diff) {
memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
dev->addr_len);
memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
}
neigh_release(n);
return diff;
}
return false;
}
static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, __be16 protocol)
{
struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
gpe->np_applied = 1;
gpe->next_protocol = tun_p_from_eth_p(protocol);
if (!gpe->next_protocol)
return -EPFNOSUPPORT;
return 0;
}
static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
int iphdr_len, __be32 vni,
struct vxlan_metadata *md, u32 vxflags,
bool udp_sum)
{
struct vxlanhdr *vxh;
int min_headroom;
int err;
int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
__be16 inner_protocol = htons(ETH_P_TEB);
if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
int csum_start = skb_checksum_start_offset(skb);
if (csum_start <= VXLAN_MAX_REMCSUM_START &&
!(csum_start & VXLAN_RCO_SHIFT_MASK) &&
(skb->csum_offset == offsetof(struct udphdr, check) ||
skb->csum_offset == offsetof(struct tcphdr, check)))
type |= SKB_GSO_TUNNEL_REMCSUM;
}
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + iphdr_len;
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
return err;
err = iptunnel_handle_offloads(skb, type);
if (err)
return err;
vxh = __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vni);
if (type & SKB_GSO_TUNNEL_REMCSUM) {
unsigned int start;
start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
vxh->vx_flags |= VXLAN_HF_RCO;
if (!skb_is_gso(skb)) {
skb->ip_summed = CHECKSUM_NONE;
skb->encapsulation = 0;
}
}
if (vxflags & VXLAN_F_GBP)
vxlan_build_gbp_hdr(vxh, md);
if (vxflags & VXLAN_F_GPE) {
err = vxlan_build_gpe_hdr(vxh, skb->protocol);
if (err < 0)
return err;
inner_protocol = skb->protocol;
}
skb_set_inner_protocol(skb, inner_protocol);
return 0;
}
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
__u8 flow_flags, struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct rtable *rt = NULL;
struct flowi4 fl4;
if (!sock4)
return ERR_PTR(-EIO);
if (tos && !info)
use_cache = false;
if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, saddr);
if (rt)
return rt;
}
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = oif;
fl4.flowi4_tos = RT_TOS(tos);
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = *saddr;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
fl4.flowi4_flags = flow_flags;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to %pI4\n", &daddr);
ip_rt_put(rt);
return ERR_PTR(-ELOOP);
}
*saddr = fl4.saddr;
if (use_cache)
dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
} else {
netdev_dbg(dev, "no route to %pI4\n", &daddr);
return ERR_PTR(-ENETUNREACH);
}
return rt;
}
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct net_device *dev,
struct vxlan_sock *sock6,
struct sk_buff *skb, int oif, u8 tos,
__be32 label,
const struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
if (!sock6)
return ERR_PTR(-EIO);
if (tos && !info)
use_cache = false;
if (use_cache) {
ndst = dst_cache_get_ip6(dst_cache, saddr);
if (ndst)
return ndst;
}
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = *saddr;
fl6.flowlabel = ip6_make_flowinfo(tos, label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
fl6.fl6_sport = sport;
ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
&fl6, NULL);
if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n", daddr);
return ERR_PTR(-ENETUNREACH);
}
if (unlikely(ndst->dev == dev)) {
netdev_dbg(dev, "circular route to %pI6\n", daddr);
dst_release(ndst);
return ERR_PTR(-ELOOP);
}
*saddr = fl6.saddr;
if (use_cache)
dst_cache_set_ip6(dst_cache, ndst, saddr);
return ndst;
}
#endif
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan, __be32 vni,
bool snoop)
{
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
struct net_device *dev;
int len = skb->len;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
__skb_pull(skb, skb_network_offset(skb));
if (remote_ip->sa.sa_family == AF_INET) {
loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
loopback.sa.sa_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
loopback.sin6.sin6_addr = in6addr_loopback;
loopback.sa.sa_family = AF_INET6;
#endif
}
rcu_read_lock();
dev = skb->dev;
if (unlikely(!(dev->flags & IFF_UP))) {
kfree_skb(skb);
goto drop;
}
if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop)
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
dev_sw_netstats_tx_add(src_vxlan->dev, 1, len);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
dev_sw_netstats_rx_add(dst_vxlan->dev, len);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
drop:
dev->stats.rx_dropped++;
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
rcu_read_unlock();
}
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *vxlan,
union vxlan_addr *daddr,
__be16 dst_port, int dst_ifindex, __be32 vni,
struct dst_entry *dst,
u32 rt_flags)
{
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
* RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
* we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
*/
BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
#endif
/* Bypass encapsulation if the destination is local */
if (rt_flags & RTCF_LOCAL &&
!(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
vxlan->cfg.flags & VXLAN_F_LOCALBYPASS) {
struct vxlan_dev *dst_vxlan;
dst_release(dst);
dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
daddr->sa.sa_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
return -ENOENT;
}
vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true);
return 1;
}
return 0;
}
void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
__be32 default_vni, struct vxlan_rdst *rdst, bool did_rsc)
{
struct dst_cache *dst_cache;
struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
const struct iphdr *old_iph = ip_hdr(skb);
union vxlan_addr *dst;
union vxlan_addr remote_ip, local_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
__u8 tos, ttl, flow_flags = 0;
int ifindex;
int err;
u32 flags = vxlan->cfg.flags;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
__be32 vni = 0;
#if IS_ENABLED(CONFIG_IPV6)
__be32 label;
#endif
info = skb_tunnel_info(skb);
if (rdst) {
dst = &rdst->remote_ip;
if (vxlan_addr_any(dst)) {
if (did_rsc) {
/* short-circuited back to local bridge */
vxlan_encap_bypass(skb, vxlan, vxlan,
default_vni, true);
return;
}
goto drop;
}
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = (rdst->remote_vni) ? : default_vni;
ifindex = rdst->remote_ifindex;
local_ip = vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
md->gbp = skb->mark;
if (flags & VXLAN_F_TTL_INHERIT) {
ttl = ip_tunnel_get_ttl(old_iph, skb);
} else {
ttl = vxlan->cfg.ttl;
if (!ttl && vxlan_addr_multicast(dst))
ttl = 1;
}
tos = vxlan->cfg.tos;
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
if (dst->sa.sa_family == AF_INET)
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
else
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
#if IS_ENABLED(CONFIG_IPV6)
label = vxlan->cfg.label;
#endif
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
dev->name);
goto drop;
}
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
} else {
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
}
dst = &remote_ip;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
flow_flags = info->key.flow_flags;
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
if (info->options_len < sizeof(*md))
goto drop;
md = ip_tunnel_info_opts(info);
}
ttl = info->key.ttl;
tos = info->key.tos;
#if IS_ENABLED(CONFIG_IPV6)
label = info->key.label;
#endif
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
rcu_read_lock();
if (dst->sa.sa_family == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
__be16 df = 0;
if (!ifindex)
ifindex = sock4->sock->sk->sk_bound_dev_if;
rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
dst_port, src_port, flow_flags,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto tx_error;
}
if (!info) {
/* Bypass encapsulation if the destination is local */
err = encap_bypass_if_local(skb, dev, vxlan, dst,
dst_port, ifindex, vni,
&rt->dst, rt->rt_flags);
if (err)
goto out_unlock;
if (vxlan->cfg.df == VXLAN_DF_SET) {
df = htons(IP_DF);
} else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
struct ethhdr *eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
(ntohs(eth->h_proto) == ETH_P_IP &&
old_iph->frag_off & htons(IP_DF)))
df = htons(IP_DF);
}
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
df = htons(IP_DF);
}
ndst = &rt->dst;
err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE),
netif_is_any_bridge_port(dev));
if (err < 0) {
goto tx_error;
} else if (err) {
if (info) {
struct ip_tunnel_info *unclone;
struct in_addr src, dst;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone))
goto tx_error;
src = remote_ip.sin.sin_addr;
dst = local_ip.sin.sin_addr;
unclone->key.u.ipv4.src = src.s_addr;
unclone->key.u.ipv4.dst = dst.s_addr;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
dst_release(ndst);
goto out_unlock;
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
if (err < 0)
goto tx_error;
udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
if (!ifindex)
ifindex = sock6->sock->sk->sk_bound_dev_if;
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
label, &dst->sin6.sin6_addr,
&local_ip.sin6.sin6_addr,
dst_port, src_port,
dst_cache, info);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
ndst = NULL;
goto tx_error;
}
if (!info) {
u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
err = encap_bypass_if_local(skb, dev, vxlan, dst,
dst_port, ifindex, vni,
ndst, rt6i_flags);
if (err)
goto out_unlock;
}
err = skb_tunnel_check_pmtu(skb, ndst,
vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6),
netif_is_any_bridge_port(dev));
if (err < 0) {
goto tx_error;
} else if (err) {
if (info) {
struct ip_tunnel_info *unclone;
struct in6_addr src, dst;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone))
goto tx_error;
src = remote_ip.sin6.sin6_addr;
dst = local_ip.sin6.sin6_addr;
unclone->key.u.ipv6.src = src;
unclone->key.u.ipv6.dst = dst;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
dst_release(ndst);
goto out_unlock;
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
vni, md, flags, udp_sum);
if (err < 0)
goto tx_error;
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&local_ip.sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
label, src_port, dst_port, !udp_sum);
#endif
}
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
out_unlock:
rcu_read_unlock();
return;
drop:
dev->stats.tx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
dst_release(ndst);
dev->stats.tx_errors++;
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
struct vxlan_fdb *f, __be32 vni, bool did_rsc)
{
struct vxlan_rdst nh_rdst;
struct nexthop *nh;
bool do_xmit;
u32 hash;
memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
hash = skb_get_hash(skb);
rcu_read_lock();
nh = rcu_dereference(f->nh);
if (!nh) {
rcu_read_unlock();
goto drop;
}
do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
rcu_read_unlock();
if (likely(do_xmit))
vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc);
else
goto drop;
return;
drop:
dev->stats.tx_dropped++;
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
}
static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
u32 nhid, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst nh_rdst;
struct nexthop *nh;
bool do_xmit;
u32 hash;
memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
hash = skb_get_hash(skb);
rcu_read_lock();
nh = nexthop_find_by_id(dev_net(dev), nhid);
if (unlikely(!nh || !nexthop_is_fdb(nh) || !nexthop_is_multipath(nh))) {
rcu_read_unlock();
goto drop;
}
do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
rcu_read_unlock();
if (vxlan->cfg.saddr.sa.sa_family != nh_rdst.remote_ip.sa.sa_family)
goto drop;
if (likely(do_xmit))
vxlan_xmit_one(skb, dev, vni, &nh_rdst, false);
else
goto drop;
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* Transmit local packets over Vxlan
*
* Outer IP header inherits ECN and DF from inner header.
* Outer UDP destination is the VXLAN assigned port.
* source port is based on hash of flow
*/
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
bool did_rsc = false;
struct vxlan_fdb *f;
struct ethhdr *eth;
__be32 vni = 0;
u32 nhid = 0;
info = skb_tunnel_info(skb);
skb_reset_mac_header(skb);
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
info->mode & IP_TUNNEL_INFO_TX) {
vni = tunnel_id_to_key32(info->key.tun_id);
nhid = info->key.nhid;
} else {
if (info && info->mode & IP_TUNNEL_INFO_TX)
vxlan_xmit_one(skb, dev, vni, NULL, false);
else
kfree_skb(skb);
return NETDEV_TX_OK;
}
}
if (vxlan->cfg.flags & VXLAN_F_PROXY) {
eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
#if IS_ENABLED(CONFIG_IPV6)
else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
pskb_may_pull(skb, sizeof(struct ipv6hdr) +
sizeof(struct nd_msg)) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
if (m->icmph.icmp6_code == 0 &&
m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
return neigh_reduce(dev, skb, vni);
}
#endif
}
if (nhid)
return vxlan_xmit_nhid(skb, dev, nhid, vni);
if (vxlan->cfg.flags & VXLAN_F_MDB) {
struct vxlan_mdb_entry *mdb_entry;
rcu_read_lock();
mdb_entry = vxlan_mdb_entry_skb_get(vxlan, skb, vni);
if (mdb_entry) {
netdev_tx_t ret;
ret = vxlan_mdb_xmit(vxlan, mdb_entry, skb);
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
}
eth = eth_hdr(skb);
f = vxlan_find_mac(vxlan, eth->h_dest, vni);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
(ntohs(eth->h_proto) == ETH_P_IP ||
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
f = vxlan_find_mac(vxlan, eth->h_dest, vni);
}
if (f == NULL) {
f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f == NULL) {
if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
return NETDEV_TX_OK;
}
}
if (rcu_access_pointer(f->nh)) {
vxlan_xmit_nh(skb, dev, f,
(vni ? : vxlan->default_dst.remote_vni), did_rsc);
} else {
list_for_each_entry_rcu(rdst, &f->remotes, list) {
struct sk_buff *skb1;
if (!fdst) {
fdst = rdst;
continue;
}
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
}
if (fdst)
vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
kfree_skb(skb);
}
return NETDEV_TX_OK;
}
/* Walk the forwarding table and purge stale entries */
static void vxlan_cleanup(struct timer_list *t)
{
struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
unsigned int h;
if (!netif_running(vxlan->dev))
return;
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
spin_lock(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
if (f->flags & NTF_EXT_LEARNED)
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
"garbage collect %pM\n",
f->eth_addr);
f->state = NUD_STALE;
vxlan_fdb_destroy(vxlan, f, true, true);
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
spin_unlock(&vxlan->hash_lock[h]);
}
mod_timer(&vxlan->age_timer, next_timer);
}
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
hlist_del_init_rcu(&vxlan->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&vxlan->hlist6.hlist);
#endif
spin_unlock(&vn->sock_lock);
}
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
struct vxlan_dev_node *node)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
__be32 vni = vxlan->default_dst.remote_vni;
node->vxlan = vxlan;
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
spin_unlock(&vn->sock_lock);
}
/* Setup stats when device is created */
static int vxlan_init(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_init(vxlan);
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats) {
err = -ENOMEM;
goto err_vnigroup_uninit;
}
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
goto err_free_percpu;
err = vxlan_mdb_init(vxlan);
if (err)
goto err_gro_cells_destroy;
return 0;
err_gro_cells_destroy:
gro_cells_destroy(&vxlan->gro_cells);
err_free_percpu:
free_percpu(dev->tstats);
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
return err;
}
static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
{
struct vxlan_fdb *f;
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
vxlan_fdb_destroy(vxlan, f, true, true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
vxlan_mdb_fini(vxlan);
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
free_percpu(dev->tstats);
}
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
int ret;
ret = vxlan_sock_add(vxlan);
if (ret < 0)
return ret;
ret = vxlan_multicast_join(vxlan);
if (ret) {
vxlan_sock_release(vxlan);
return ret;
}
if (vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
return ret;
}
/* Purge the forwarding table */
static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
{
unsigned int h;
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (is_zero_ether_addr(f->eth_addr) &&
f->vni == vxlan->cfg.vni)
continue;
vxlan_fdb_destroy(vxlan, f, true, true);
}
spin_unlock_bh(&vxlan->hash_lock[h]);
}
}
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
vxlan_multicast_leave(vxlan);
del_timer_sync(&vxlan->age_timer);
vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
return 0;
}
/* Stub, nothing needs to be done. */
static void vxlan_set_multicast_list(struct net_device *dev)
{
}
static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
/* This check is different than dev->max_mtu, because it looks at
* the lowerdev->mtu, rather than the static dev->max_mtu
*/
if (lowerdev) {
int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags);
if (new_mtu > max_mtu)
return -EINVAL;
}
dev->mtu = new_mtu;
return 0;
}
static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct ip_tunnel_info *info = skb_tunnel_info(skb);
__be16 sport, dport;
sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
&info->key.u.ipv4.src, dport, sport,
info->key.flow_flags, &info->dst_cache,
info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
} else {
#if IS_ENABLED(CONFIG_IPV6)
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, dport, sport,
&info->dst_cache, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
#else /* !CONFIG_IPV6 */
return -EPFNOSUPPORT;
#endif
}
info->key.tp_src = sport;
info->key.tp_dst = dport;
return 0;
}
static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_fdb_add = vxlan_fdb_add,
.ndo_fdb_del = vxlan_fdb_delete,
.ndo_fdb_dump = vxlan_fdb_dump,
.ndo_fdb_get = vxlan_fdb_get,
.ndo_mdb_add = vxlan_mdb_add,
.ndo_mdb_del = vxlan_mdb_del,
.ndo_mdb_dump = vxlan_mdb_dump,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
static const struct net_device_ops vxlan_netdev_raw_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
/* Info for udev, that this is a virtual tunnel endpoint */
static struct device_type vxlan_type = {
.name = "vxlan",
};
/* Calls the ndo_udp_tunnel_add of the caller in order to
* supply the listening VXLAN udp ports. Callers are expected
* to implement the ndo_udp_tunnel_add.
*/
static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
{
struct vxlan_sock *vs;
struct net *net = dev_net(dev);
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int i;
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
unsigned short type;
if (vs->flags & VXLAN_F_GPE)
type = UDP_TUNNEL_TYPE_VXLAN_GPE;
else
type = UDP_TUNNEL_TYPE_VXLAN;
if (push)
udp_tunnel_push_rx_port(dev, vs->sock, type);
else
udp_tunnel_drop_rx_port(dev, vs->sock, type);
}
}
spin_unlock(&vn->sock_lock);
}
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned int h;
eth_hw_addr_random(dev);
ether_setup(dev);
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->vlan_features = dev->features;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
vxlan->dev = dev;
for (h = 0; h < FDB_HASH_SIZE; ++h) {
spin_lock_init(&vxlan->hash_lock[h]);
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
}
static void vxlan_ether_setup(struct net_device *dev)
{
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &vxlan_netdev_ether_ops;
}
static void vxlan_raw_setup(struct net_device *dev)
{
dev->header_ops = NULL;
dev->type = ARPHRD_NONE;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &vxlan_netdev_raw_ops;
}
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_UNSPEC] = { .strict_start_type = IFLA_VXLAN_LOCALBYPASS },
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
[IFLA_VXLAN_GROUP] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_LINK] = { .type = NLA_U32 },
[IFLA_VXLAN_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
[IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
[IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
[IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
[IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
[IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
[IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
[IFLA_VXLAN_RSC] = { .type = NLA_U8 },
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
[IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
[IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
[IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
[IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
[IFLA_VXLAN_DF] = { .type = NLA_U8 },
[IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
"Provided link layer address is not Ethernet");
return -EINVAL;
}
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
"Provided Ethernet address is not unicast");
return -EADDRNOTAVAIL;
}
}
if (tb[IFLA_MTU]) {
u32 mtu = nla_get_u32(tb[IFLA_MTU]);
if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
"MTU must be between 68 and 65535");
return -EINVAL;
}
}
if (!data) {
NL_SET_ERR_MSG(extack,
"Required attributes not provided to perform the operation");
return -EINVAL;
}
if (data[IFLA_VXLAN_ID]) {
u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
if (id >= VXLAN_N_VID) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID],
"VXLAN ID must be lower than 16777216");
return -ERANGE;
}
}
if (data[IFLA_VXLAN_PORT_RANGE]) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
if (ntohs(p->high) < ntohs(p->low)) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE],
"Invalid source port range");
return -EINVAL;
}
}
if (data[IFLA_VXLAN_DF]) {
enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
if (df < 0 || df > VXLAN_DF_MAX) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF],
"Invalid DF attribute");
return -EINVAL;
}
}
return 0;
}
static void vxlan_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
strscpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
static int vxlan_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
if (!lowerdev) {
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
return 0;
}
return __ethtool_get_link_ksettings(lowerdev, cmd);
}
static const struct ethtool_ops vxlan_ethtool_ops = {
.get_drvinfo = vxlan_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = vxlan_get_link_ksettings,
};
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
__be16 port, u32 flags, int ifindex)
{
struct socket *sock;
struct udp_port_cfg udp_conf;
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
if (ipv6) {
udp_conf.family = AF_INET6;
udp_conf.use_udp6_rx_checksums =
!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
udp_conf.ipv6_v6only = 1;
} else {
udp_conf.family = AF_INET;
}
udp_conf.local_udp_port = port;
udp_conf.bind_ifindex = ifindex;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
if (err < 0)
return ERR_PTR(err);
udp_allow_gso(sock->sk);
return sock;
}
/* Create new listen socket if needed */
static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
__be16 port, u32 flags,
int ifindex)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
unsigned int h;
struct udp_tunnel_sock_cfg tunnel_cfg;
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
return ERR_PTR(-ENOMEM);
for (h = 0; h < VNI_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vs->vni_list[h]);
sock = vxlan_create_sock(net, ipv6, port, flags, ifindex);
if (IS_ERR(sock)) {
kfree(vs);
return ERR_CAST(sock);
}
vs->sock = sock;
refcount_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
udp_tunnel_notify_add_rx_port(sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
tunnel_cfg.encap_destroy = NULL;
if (vs->flags & VXLAN_F_GPE) {
tunnel_cfg.gro_receive = vxlan_gpe_gro_receive;
tunnel_cfg.gro_complete = vxlan_gpe_gro_complete;
} else {
tunnel_cfg.gro_receive = vxlan_gro_receive;
tunnel_cfg.gro_complete = vxlan_gro_complete;
}
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
return vs;
}
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
if (vxlan->cfg.remote_ifindex)
l3mdev_index = l3mdev_master_upper_ifindex_by_index(
vxlan->net, vxlan->cfg.remote_ifindex);
if (!vxlan->cfg.no_share) {
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
vxlan->cfg.dst_port, vxlan->cfg.flags,
l3mdev_index);
if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
spin_unlock(&vn->sock_lock);
return -EBUSY;
}
spin_unlock(&vn->sock_lock);
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
vxlan->cfg.dst_port, vxlan->cfg.flags,
l3mdev_index);
if (IS_ERR(vs))
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6) {
rcu_assign_pointer(vxlan->vn6_sock, vs);
node = &vxlan->hlist6;
} else
#endif
{
rcu_assign_pointer(vxlan->vn4_sock, vs);
node = &vxlan->hlist4;
}
if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER))
vxlan_vs_add_vnigrp(vxlan, vs, ipv6);
else
vxlan_vs_add_dev(vs, vxlan, node);
return 0;
}
static int vxlan_sock_add(struct vxlan_dev *vxlan)
{
bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
bool ipv4 = !ipv6 || metadata;
int ret = 0;
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
if (ipv6) {
ret = __vxlan_sock_add(vxlan, true);
if (ret < 0 && ret != -EAFNOSUPPORT)
ipv4 = false;
}
#endif
if (ipv4)
ret = __vxlan_sock_add(vxlan, false);
if (ret < 0)
vxlan_sock_release(vxlan);
return ret;
}
int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
struct vxlan_config *conf, __be32 vni)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
struct vxlan_dev *tmp;
list_for_each_entry(tmp, &vn->vxlan_list, next) {
if (tmp == vxlan)
continue;
if (tmp->cfg.flags & VXLAN_F_VNIFILTER) {
if (!vxlan_vnifilter_lookup(tmp, vni))
continue;
} else if (tmp->cfg.vni != vni) {
continue;
}
if (tmp->cfg.dst_port != conf->dst_port)
continue;
if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
(conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
continue;
if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
tmp->cfg.remote_ifindex != conf->remote_ifindex)
continue;
return -EEXIST;
}
return 0;
}
static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
struct net_device **lower,
struct vxlan_dev *old,
struct netlink_ext_ack *extack)
{
bool use_ipv6 = false;
if (conf->flags & VXLAN_F_GPE) {
/* For now, allow GPE only together with
* COLLECT_METADATA. This can be relaxed later; in such
* case, the other side of the PtP link will have to be
* provided.
*/
if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
NL_SET_ERR_MSG(extack,
"VXLAN GPE does not support this combination of attributes");
return -EINVAL;
}
}
if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
/* Unless IPv6 is explicitly requested, assume IPv4 */
conf->remote_ip.sa.sa_family = AF_INET;
conf->saddr.sa.sa_family = AF_INET;
} else if (!conf->remote_ip.sa.sa_family) {
conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
} else if (!conf->saddr.sa.sa_family) {
conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
}
if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) {
NL_SET_ERR_MSG(extack,
"Local and remote address must be from the same family");
return -EINVAL;
}
if (vxlan_addr_multicast(&conf->saddr)) {
NL_SET_ERR_MSG(extack, "Local address cannot be multicast");
return -EINVAL;
}
if (conf->saddr.sa.sa_family == AF_INET6) {
if (!IS_ENABLED(CONFIG_IPV6)) {
NL_SET_ERR_MSG(extack,
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
}
use_ipv6 = true;
conf->flags |= VXLAN_F_IPV6;
if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
int local_type =
ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
int remote_type =
ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
if (local_type & IPV6_ADDR_LINKLOCAL) {
if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
(remote_type != IPV6_ADDR_ANY)) {
NL_SET_ERR_MSG(extack,
"Invalid combination of local and remote address scopes");
return -EINVAL;
}
conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
} else {
if (remote_type ==
(IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) {
NL_SET_ERR_MSG(extack,
"Invalid combination of local and remote address scopes");
return -EINVAL;
}
conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
}
}
}
if (conf->label && !use_ipv6) {
NL_SET_ERR_MSG(extack,
"Label attribute only applies to IPv6 VXLAN devices");
return -EINVAL;
}
if (conf->remote_ifindex) {
struct net_device *lowerdev;
lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
if (!lowerdev) {
NL_SET_ERR_MSG(extack,
"Invalid local interface, device not found");
return -ENODEV;
}
#if IS_ENABLED(CONFIG_IPV6)
if (use_ipv6) {
struct inet6_dev *idev = __in6_dev_get(lowerdev);
if (idev && idev->cnf.disable_ipv6) {
NL_SET_ERR_MSG(extack,
"IPv6 support disabled by administrator");
return -EPERM;
}
}
#endif
*lower = lowerdev;
} else {
if (vxlan_addr_multicast(&conf->remote_ip)) {
NL_SET_ERR_MSG(extack,
"Local interface required for multicast remote destination");
return -EINVAL;
}
#if IS_ENABLED(CONFIG_IPV6)
if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) {
NL_SET_ERR_MSG(extack,
"Local interface required for link-local local/remote addresses");
return -EINVAL;
}
#endif
*lower = NULL;
}
if (!conf->dst_port) {
if (conf->flags & VXLAN_F_GPE)
conf->dst_port = htons(IANA_VXLAN_GPE_UDP_PORT);
else
conf->dst_port = htons(vxlan_port);
}
if (!conf->age_interval)
conf->age_interval = FDB_AGE_DEFAULT;
if (vxlan_vni_in_use(src_net, old, conf, conf->vni)) {
NL_SET_ERR_MSG(extack,
"A VXLAN device with the specified VNI already exists");
return -EEXIST;
}
return 0;
}
static void vxlan_config_apply(struct net_device *dev,
struct vxlan_config *conf,
struct net_device *lowerdev,
struct net *src_net,
bool changelink)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
int max_mtu = ETH_MAX_MTU;
u32 flags = conf->flags;
if (!changelink) {
if (flags & VXLAN_F_GPE)
vxlan_raw_setup(dev);
else
vxlan_ether_setup(dev);
if (conf->mtu)
dev->mtu = conf->mtu;
vxlan->net = src_net;
}
dst->remote_vni = conf->vni;
memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
if (lowerdev) {
dst->remote_ifindex = conf->remote_ifindex;
netif_inherit_tso_max(dev, lowerdev);
needed_headroom = lowerdev->hard_header_len;
needed_headroom += lowerdev->needed_headroom;
dev->needed_tailroom = lowerdev->needed_tailroom;
max_mtu = lowerdev->mtu - vxlan_headroom(flags);
if (max_mtu < ETH_MIN_MTU)
max_mtu = ETH_MIN_MTU;
if (!changelink && !conf->mtu)
dev->mtu = max_mtu;
}
if (dev->mtu > max_mtu)
dev->mtu = max_mtu;
if (flags & VXLAN_F_COLLECT_METADATA)
flags |= VXLAN_F_IPV6;
needed_headroom += vxlan_headroom(flags);
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
}
static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
struct vxlan_config *conf, bool changelink,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct net_device *lowerdev;
int ret;
ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack);
if (ret)
return ret;
vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
return 0;
}
static int __vxlan_dev_create(struct net *net, struct net_device *dev,
struct vxlan_config *conf,
struct netlink_ext_ack *extack)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct net_device *remote_dev = NULL;
struct vxlan_fdb *f = NULL;
bool unregister = false;
struct vxlan_rdst *dst;
int err;
dst = &vxlan->default_dst;
err = vxlan_dev_configure(net, dev, conf, false, extack);
if (err)
return err;
dev->ethtool_ops = &vxlan_ethtool_ops;
/* create an fdb entry for a valid default destination */
if (!vxlan_addr_any(&dst->remote_ip)) {
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
NTF_SELF, 0, &f, extack);
if (err)
return err;
}
err = register_netdevice(dev);
if (err)
goto errout;
unregister = true;
if (dst->remote_ifindex) {
remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
if (!remote_dev) {
err = -ENODEV;
goto errout;
}
err = netdev_upper_dev_link(remote_dev, dev, extack);
if (err)
goto errout;
}
err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto unlink;
if (f) {
vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
/* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
if (err) {
vxlan_fdb_destroy(vxlan, f, false, false);
if (remote_dev)
netdev_upper_dev_unlink(remote_dev, dev);
goto unregister;
}
}
list_add(&vxlan->next, &vn->vxlan_list);
if (remote_dev)
dst->remote_dev = remote_dev;
return 0;
unlink:
if (remote_dev)
netdev_upper_dev_unlink(remote_dev, dev);
errout:
/* unregister_netdevice() destroys the default FDB entry with deletion
* notification. But the addition notification was not sent yet, so
* destroy the entry by hand here.
*/
if (f)
__vxlan_fdb_free(f);
unregister:
if (unregister)
unregister_netdevice(dev);
return err;
}
/* Set/clear flags based on attribute */
static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[],
int attrtype, unsigned long mask, bool changelink,
bool changelink_supported,
struct netlink_ext_ack *extack)
{
unsigned long flags;
if (!tb[attrtype])
return 0;
if (changelink && !changelink_supported) {
vxlan_flag_attr_error(attrtype, extack);
return -EOPNOTSUPP;
}
if (vxlan_policy[attrtype].type == NLA_FLAG)
flags = conf->flags | mask;
else if (nla_get_u8(tb[attrtype]))
flags = conf->flags | mask;
else
flags = conf->flags & ~mask;
conf->flags = flags;
return 0;
}
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink, struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
int err = 0;
memset(conf, 0, sizeof(*conf));
/* if changelink operation, start with old existing cfg */
if (changelink)
memcpy(conf, &vxlan->cfg, sizeof(*conf));
if (data[IFLA_VXLAN_ID]) {
__be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
if (changelink && (vni != conf->vni)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI");
return -EOPNOTSUPP;
}
conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
}
if (data[IFLA_VXLAN_GROUP]) {
if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP], "New group address family does not match old group");
return -EOPNOTSUPP;
}
conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
conf->remote_ip.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
}
if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "New group address family does not match old group");
return -EOPNOTSUPP;
}
conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
conf->remote_ip.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LOCAL]) {
if (changelink && (conf->saddr.sa.sa_family != AF_INET)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL], "New local address family does not match old");
return -EOPNOTSUPP;
}
conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
conf->saddr.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_LOCAL6]) {
if (!IS_ENABLED(CONFIG_IPV6)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
}
if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "New local address family does not match old");
return -EOPNOTSUPP;
}
/* TODO: respect scope id */
conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
conf->saddr.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LINK])
conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
if (data[IFLA_VXLAN_TOS])
conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
if (data[IFLA_VXLAN_TTL])
conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
if (data[IFLA_VXLAN_TTL_INHERIT]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_TTL_INHERIT,
VXLAN_F_TTL_INHERIT, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_LABEL])
conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
IPV6_FLOWLABEL_MASK;
if (data[IFLA_VXLAN_LEARNING]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LEARNING,
VXLAN_F_LEARN, changelink, true,
extack);
if (err)
return err;
} else if (!changelink) {
/* default to learn on a new device */
conf->flags |= VXLAN_F_LEARN;
}
if (data[IFLA_VXLAN_AGEING])
conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
if (data[IFLA_VXLAN_PROXY]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_PROXY,
VXLAN_F_PROXY, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_RSC]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_RSC,
VXLAN_F_RSC, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_L2MISS]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L2MISS,
VXLAN_F_L2MISS, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_L3MISS]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L3MISS,
VXLAN_F_L3MISS, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_LIMIT]) {
if (changelink) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LIMIT],
"Cannot change limit");
return -EOPNOTSUPP;
}
conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
}
if (data[IFLA_VXLAN_COLLECT_METADATA]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_COLLECT_METADATA,
VXLAN_F_COLLECT_METADATA, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_PORT_RANGE]) {
if (!changelink) {
const struct ifla_vxlan_port_range *p
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
conf->port_min = ntohs(p->low);
conf->port_max = ntohs(p->high);
} else {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
"Cannot change port range");
return -EOPNOTSUPP;
}
}
if (data[IFLA_VXLAN_PORT]) {
if (changelink) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT],
"Cannot change port");
return -EOPNOTSUPP;
}
conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
}
if (data[IFLA_VXLAN_UDP_CSUM]) {
if (changelink) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_UDP_CSUM],
"Cannot change UDP_CSUM flag");
return -EOPNOTSUPP;
}
if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
}
if (data[IFLA_VXLAN_LOCALBYPASS]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LOCALBYPASS,
VXLAN_F_LOCALBYPASS, changelink,
true, extack);
if (err)
return err;
} else if (!changelink) {
/* default to local bypass on a new device */
conf->flags |= VXLAN_F_LOCALBYPASS;
}
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
VXLAN_F_UDP_ZERO_CSUM6_TX, changelink,
false, extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
VXLAN_F_UDP_ZERO_CSUM6_RX, changelink,
false, extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_REMCSUM_TX]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_TX,
VXLAN_F_REMCSUM_TX, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_REMCSUM_RX]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_RX,
VXLAN_F_REMCSUM_RX, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_GBP]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GBP,
VXLAN_F_GBP, changelink, false, extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_GPE]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GPE,
VXLAN_F_GPE, changelink, false,
extack);
if (err)
return err;
}
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_NOPARTIAL,
VXLAN_F_REMCSUM_NOPARTIAL, changelink,
false, extack);
if (err)
return err;
}
if (tb[IFLA_MTU]) {
if (changelink) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
"Cannot change mtu");
return -EOPNOTSUPP;
}
conf->mtu = nla_get_u32(tb[IFLA_MTU]);
}
if (data[IFLA_VXLAN_DF])
conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
if (data[IFLA_VXLAN_VNIFILTER]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_VNIFILTER,
VXLAN_F_VNIFILTER, changelink, false,
extack);
if (err)
return err;
if ((conf->flags & VXLAN_F_VNIFILTER) &&
!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_VNIFILTER],
"vxlan vnifilter only valid in collect metadata mode");
return -EINVAL;
}
}
return 0;
}
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct vxlan_config conf;
int err;
err = vxlan_nl2conf(tb, data, dev, &conf, false, extack);
if (err)
return err;
return __vxlan_dev_create(src_net, dev, &conf, extack);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct net_device *lowerdev;
struct vxlan_config conf;
struct vxlan_rdst *dst;
int err;
dst = &vxlan->default_dst;
err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
if (err)
return err;
err = vxlan_config_validate(vxlan->net, &conf, &lowerdev,
vxlan, extack);
if (err)
return err;
if (dst->remote_dev == lowerdev)
lowerdev = NULL;
err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
extack);
if (err)
return err;
/* handle default dst entry */
if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
conf.vni, conf.vni,
conf.remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
netdev_adjacent_change_abort(dst->remote_dev,
lowerdev, dev);
return err;
}
}
if (!vxlan_addr_any(&dst->remote_ip))
__vxlan_fdb_delete(vxlan, all_zeros_mac,
dst->remote_ip,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
/* If vni filtering device, also update fdb entries of
* all vnis that were using default remote ip
*/
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip,
&conf.remote_ip, extack);
if (err) {
netdev_adjacent_change_abort(dst->remote_dev,
lowerdev, dev);
return err;
}
}
}
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
if (lowerdev && lowerdev != dst->remote_dev)
dst->remote_dev = lowerdev;
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
vxlan_flush(vxlan, true);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
if (vxlan->default_dst.remote_dev)
netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
}
static size_t vxlan_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */
nla_total_size(0) + /* IFLA_VXLAN_GBP */
nla_total_size(0) + /* IFLA_VXLAN_GPE */
nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
0;
}
static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
const struct vxlan_dev *vxlan = netdev_priv(dev);
const struct vxlan_rdst *dst = &vxlan->default_dst;
struct ifla_vxlan_port_range ports = {
.low = htons(vxlan->cfg.port_min),
.high = htons(vxlan->cfg.port_max),
};
if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
goto nla_put_failure;
if (!vxlan_addr_any(&dst->remote_ip)) {
if (dst->remote_ip.sa.sa_family == AF_INET) {
if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
dst->remote_ip.sin.sin_addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
&dst->remote_ip.sin6.sin6_addr))
goto nla_put_failure;
#endif
}
}
if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
goto nla_put_failure;
if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
vxlan->cfg.saddr.sin.sin_addr.s_addr))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
&vxlan->cfg.saddr.sin6.sin6_addr))
goto nla_put_failure;
#endif
}
}
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
!!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
!!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
nla_put_u8(skb, IFLA_VXLAN_RSC,
!!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
!!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
!!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
!!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
!!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
!!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
!!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
!!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_LOCALBYPASS,
!!(vxlan->cfg.flags & VXLAN_F_LOCALBYPASS)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
goto nla_put_failure;
if (vxlan->cfg.flags & VXLAN_F_GBP &&
nla_put_flag(skb, IFLA_VXLAN_GBP))
goto nla_put_failure;
if (vxlan->cfg.flags & VXLAN_F_GPE &&
nla_put_flag(skb, IFLA_VXLAN_GPE))
goto nla_put_failure;
if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER &&
nla_put_u8(skb, IFLA_VXLAN_VNIFILTER,
!!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static struct net *vxlan_get_link_net(const struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
return vxlan->net;
}
static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
.priv_size = sizeof(struct vxlan_dev),
.setup = vxlan_setup,
.validate = vxlan_validate,
.newlink = vxlan_newlink,
.changelink = vxlan_changelink,
.dellink = vxlan_dellink,
.get_size = vxlan_get_size,
.fill_info = vxlan_fill_info,
.get_link_net = vxlan_get_link_net,
};
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type,
struct vxlan_config *conf)
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
int err;
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
&vxlan_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
err = __vxlan_dev_create(net, dev, conf, NULL);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
}
err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0) {
LIST_HEAD(list_kill);
vxlan_dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
return dev;
}
EXPORT_SYMBOL_GPL(vxlan_dev_create);
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
struct vxlan_dev *vxlan, *next;
LIST_HEAD(list_kill);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
struct vxlan_rdst *dst = &vxlan->default_dst;
/* In case we created vxlan device with carrier
* and we loose the carrier due to module unload
* we also need to remove vxlan device. In other
* cases, it's not necessary and remote_ifindex
* is 0 here, so no matches.
*/
if (dst->remote_ifindex == dev->ifindex)
vxlan_dellink(vxlan->dev, &list_kill);
}
unregister_netdevice_many(&list_kill);
}
static int vxlan_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
vxlan_offload_rx_ports(dev, true);
else if (event == NETDEV_UDP_TUNNEL_DROP_INFO)
vxlan_offload_rx_ports(dev, false);
return NOTIFY_DONE;
}
static struct notifier_block vxlan_notifier_block __read_mostly = {
.notifier_call = vxlan_netdevice_event,
};
static void
vxlan_fdb_offloaded_set(struct net_device *dev,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
u32 hash_index;
hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
goto out;
rdst = vxlan_fdb_find_rdst(f, &fdb_info->remote_ip,
fdb_info->remote_port,
fdb_info->remote_vni,
fdb_info->remote_ifindex);
if (!rdst)
goto out;
rdst->offloaded = fdb_info->offloaded;
out:
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static int
vxlan_fdb_external_learn_add(struct net_device *dev,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
u32 hash_index;
int err;
hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
fdb_info->remote_port,
fdb_info->vni,
fdb_info->remote_vni,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
0, false, extack);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
static int
vxlan_fdb_external_learn_del(struct net_device *dev,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
u32 hash_index;
int err = 0;
hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
err = -ENOENT;
else if (f->flags & NTF_EXT_LEARNED)
err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr,
fdb_info->remote_ip,
fdb_info->remote_port,
fdb_info->vni,
fdb_info->remote_vni,
fdb_info->remote_ifindex,
false);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
static int vxlan_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct switchdev_notifier_vxlan_fdb_info *fdb_info;
int err = 0;
switch (event) {
case SWITCHDEV_VXLAN_FDB_OFFLOADED:
vxlan_fdb_offloaded_set(dev, ptr);
break;
case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE:
fdb_info = ptr;
err = vxlan_fdb_external_learn_add(dev, fdb_info);
if (err) {
err = notifier_from_errno(err);
break;
}
fdb_info->offloaded = true;
vxlan_fdb_offloaded_set(dev, fdb_info);
break;
case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE:
fdb_info = ptr;
err = vxlan_fdb_external_learn_del(dev, fdb_info);
if (err) {
err = notifier_from_errno(err);
break;
}
fdb_info->offloaded = false;
vxlan_fdb_offloaded_set(dev, fdb_info);
break;
}
return err;
}
static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
.notifier_call = vxlan_switchdev_event,
};
static void vxlan_fdb_nh_flush(struct nexthop *nh)
{
struct vxlan_fdb *fdb;
struct vxlan_dev *vxlan;
u32 hash_index;
rcu_read_lock();
list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) {
vxlan = rcu_dereference(fdb->vdev);
WARN_ON(!vxlan);
hash_index = fdb_head_index(vxlan, fdb->eth_addr,
vxlan->default_dst.remote_vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
if (!hlist_unhashed(&fdb->hlist))
vxlan_fdb_destroy(vxlan, fdb, false, false);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
rcu_read_unlock();
}
static int vxlan_nexthop_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct nh_notifier_info *info = ptr;
struct nexthop *nh;
if (event != NEXTHOP_EVENT_DEL)
return NOTIFY_DONE;
nh = nexthop_find_by_id(info->net, info->id);
if (!nh)
return NOTIFY_DONE;
vxlan_fdb_nh_flush(nh);
return NOTIFY_DONE;
}
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int h;
INIT_LIST_HEAD(&vn->vxlan_list);
spin_lock_init(&vn->sock_lock);
vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vn->sock_list[h]);
return register_nexthop_notifier(net, &vn->nexthop_notifier_block,
NULL);
}
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
unregister_netdevice_queue(dev, head);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, head);
}
}
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
unsigned int h;
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
}
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
.exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
static int __init vxlan_init_module(void)
{
int rc;
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
rc = register_pernet_subsys(&vxlan_net_ops);
if (rc)
goto out1;
rc = register_netdevice_notifier(&vxlan_notifier_block);
if (rc)
goto out2;
rc = register_switchdev_notifier(&vxlan_switchdev_notifier_block);
if (rc)
goto out3;
rc = rtnl_link_register(&vxlan_link_ops);
if (rc)
goto out4;
vxlan_vnifilter_init();
return 0;
out4:
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
out3:
unregister_netdevice_notifier(&vxlan_notifier_block);
out2:
unregister_pernet_subsys(&vxlan_net_ops);
out1:
return rc;
}
late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
vxlan_vnifilter_uninit();
rtnl_link_unregister(&vxlan_link_ops);
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
unregister_netdevice_notifier(&vxlan_notifier_block);
unregister_pernet_subsys(&vxlan_net_ops);
/* rcu_barrier() is called by netns */
}
module_exit(vxlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(VXLAN_VERSION);
MODULE_AUTHOR("Stephen Hemminger <[email protected]>");
MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("vxlan");
| linux-master | drivers/net/vxlan/vxlan_core.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/if_bridge.h>
#include <linux/in.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/netlink.h>
#include <net/vxlan.h>
#include "vxlan_private.h"
struct vxlan_mdb_entry_key {
union vxlan_addr src;
union vxlan_addr dst;
__be32 vni;
};
struct vxlan_mdb_entry {
struct rhash_head rhnode;
struct list_head remotes;
struct vxlan_mdb_entry_key key;
struct hlist_node mdb_node;
struct rcu_head rcu;
};
#define VXLAN_MDB_REMOTE_F_BLOCKED BIT(0)
struct vxlan_mdb_remote {
struct list_head list;
struct vxlan_rdst __rcu *rd;
u8 flags;
u8 filter_mode;
u8 rt_protocol;
struct hlist_head src_list;
struct rcu_head rcu;
};
#define VXLAN_SGRP_F_DELETE BIT(0)
struct vxlan_mdb_src_entry {
struct hlist_node node;
union vxlan_addr addr;
u8 flags;
};
struct vxlan_mdb_dump_ctx {
long reserved;
long entry_idx;
long remote_idx;
};
struct vxlan_mdb_config_src_entry {
union vxlan_addr addr;
struct list_head node;
};
struct vxlan_mdb_config {
struct vxlan_dev *vxlan;
struct vxlan_mdb_entry_key group;
struct list_head src_list;
union vxlan_addr remote_ip;
u32 remote_ifindex;
__be32 remote_vni;
__be16 remote_port;
u16 nlflags;
u8 flags;
u8 filter_mode;
u8 rt_protocol;
};
static const struct rhashtable_params vxlan_mdb_rht_params = {
.head_offset = offsetof(struct vxlan_mdb_entry, rhnode),
.key_offset = offsetof(struct vxlan_mdb_entry, key),
.key_len = sizeof(struct vxlan_mdb_entry_key),
.automatic_shrinking = true,
};
static int __vxlan_mdb_add(const struct vxlan_mdb_config *cfg,
struct netlink_ext_ack *extack);
static int __vxlan_mdb_del(const struct vxlan_mdb_config *cfg,
struct netlink_ext_ack *extack);
static void vxlan_br_mdb_entry_fill(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote,
struct br_mdb_entry *e)
{
const union vxlan_addr *dst = &mdb_entry->key.dst;
memset(e, 0, sizeof(*e));
e->ifindex = vxlan->dev->ifindex;
e->state = MDB_PERMANENT;
if (remote->flags & VXLAN_MDB_REMOTE_F_BLOCKED)
e->flags |= MDB_FLAGS_BLOCKED;
switch (dst->sa.sa_family) {
case AF_INET:
e->addr.u.ip4 = dst->sin.sin_addr.s_addr;
e->addr.proto = htons(ETH_P_IP);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
e->addr.u.ip6 = dst->sin6.sin6_addr;
e->addr.proto = htons(ETH_P_IPV6);
break;
#endif
}
}
static int vxlan_mdb_entry_info_fill_srcs(struct sk_buff *skb,
const struct vxlan_mdb_remote *remote)
{
struct vxlan_mdb_src_entry *ent;
struct nlattr *nest;
if (hlist_empty(&remote->src_list))
return 0;
nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
if (!nest)
return -EMSGSIZE;
hlist_for_each_entry(ent, &remote->src_list, node) {
struct nlattr *nest_ent;
nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
if (!nest_ent)
goto out_cancel_err;
if (vxlan_nla_put_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
&ent->addr) ||
nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER, 0))
goto out_cancel_err;
nla_nest_end(skb, nest_ent);
}
nla_nest_end(skb, nest);
return 0;
out_cancel_err:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int vxlan_mdb_entry_info_fill(const struct vxlan_dev *vxlan,
struct sk_buff *skb,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote)
{
struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
struct br_mdb_entry e;
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY_INFO);
if (!nest)
return -EMSGSIZE;
vxlan_br_mdb_entry_fill(vxlan, mdb_entry, remote, &e);
if (nla_put_nohdr(skb, sizeof(e), &e) ||
nla_put_u32(skb, MDBA_MDB_EATTR_TIMER, 0))
goto nest_err;
if (!vxlan_addr_any(&mdb_entry->key.src) &&
vxlan_nla_put_addr(skb, MDBA_MDB_EATTR_SOURCE, &mdb_entry->key.src))
goto nest_err;
if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, remote->rt_protocol) ||
nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE, remote->filter_mode) ||
vxlan_mdb_entry_info_fill_srcs(skb, remote) ||
vxlan_nla_put_addr(skb, MDBA_MDB_EATTR_DST, &rd->remote_ip))
goto nest_err;
if (rd->remote_port && rd->remote_port != vxlan->cfg.dst_port &&
nla_put_u16(skb, MDBA_MDB_EATTR_DST_PORT,
be16_to_cpu(rd->remote_port)))
goto nest_err;
if (rd->remote_vni != vxlan->default_dst.remote_vni &&
nla_put_u32(skb, MDBA_MDB_EATTR_VNI, be32_to_cpu(rd->remote_vni)))
goto nest_err;
if (rd->remote_ifindex &&
nla_put_u32(skb, MDBA_MDB_EATTR_IFINDEX, rd->remote_ifindex))
goto nest_err;
if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) &&
mdb_entry->key.vni && nla_put_u32(skb, MDBA_MDB_EATTR_SRC_VNI,
be32_to_cpu(mdb_entry->key.vni)))
goto nest_err;
nla_nest_end(skb, nest);
return 0;
nest_err:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int vxlan_mdb_entry_fill(const struct vxlan_dev *vxlan,
struct sk_buff *skb,
struct vxlan_mdb_dump_ctx *ctx,
const struct vxlan_mdb_entry *mdb_entry)
{
int remote_idx = 0, s_remote_idx = ctx->remote_idx;
struct vxlan_mdb_remote *remote;
struct nlattr *nest;
int err = 0;
nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
if (!nest)
return -EMSGSIZE;
list_for_each_entry(remote, &mdb_entry->remotes, list) {
if (remote_idx < s_remote_idx)
goto skip;
err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote);
if (err)
break;
skip:
remote_idx++;
}
ctx->remote_idx = err ? remote_idx : 0;
nla_nest_end(skb, nest);
return err;
}
static int vxlan_mdb_fill(const struct vxlan_dev *vxlan, struct sk_buff *skb,
struct vxlan_mdb_dump_ctx *ctx)
{
int entry_idx = 0, s_entry_idx = ctx->entry_idx;
struct vxlan_mdb_entry *mdb_entry;
struct nlattr *nest;
int err = 0;
nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (!nest)
return -EMSGSIZE;
hlist_for_each_entry(mdb_entry, &vxlan->mdb_list, mdb_node) {
if (entry_idx < s_entry_idx)
goto skip;
err = vxlan_mdb_entry_fill(vxlan, skb, ctx, mdb_entry);
if (err)
break;
skip:
entry_idx++;
}
ctx->entry_idx = err ? entry_idx : 0;
nla_nest_end(skb, nest);
return err;
}
int vxlan_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb)
{
struct vxlan_mdb_dump_ctx *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
struct br_port_msg *bpm;
struct nlmsghdr *nlh;
int err;
ASSERT_RTNL();
NL_ASSERT_DUMP_CTX_FITS(struct vxlan_mdb_dump_ctx);
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWMDB, sizeof(*bpm),
NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = dev->ifindex;
err = vxlan_mdb_fill(vxlan, skb, ctx);
nlmsg_end(skb, nlh);
cb->seq = vxlan->mdb_seq;
nl_dump_check_consistent(cb, nlh);
return err;
}
static const struct nla_policy
vxlan_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
[MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
};
static const struct nla_policy
vxlan_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
[MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_entry_pol),
};
static struct netlink_range_validation vni_range = {
.max = VXLAN_N_VID - 1,
};
static const struct nla_policy vxlan_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
[MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
MCAST_INCLUDE),
[MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_pol),
[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
[MDBE_ATTR_DST] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
sizeof(struct in6_addr)),
[MDBE_ATTR_DST_PORT] = { .type = NLA_U16 },
[MDBE_ATTR_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
[MDBE_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
[MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
};
static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto,
struct netlink_ext_ack *extack)
{
switch (proto) {
case htons(ETH_P_IP):
if (nla_len(attr) != sizeof(struct in_addr)) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
return false;
}
if (ipv4_is_multicast(nla_get_in_addr(attr))) {
NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
return false;
}
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6): {
struct in6_addr src;
if (nla_len(attr) != sizeof(struct in6_addr)) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
return false;
}
src = nla_get_in6_addr(attr);
if (ipv6_addr_is_multicast(&src)) {
NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
return false;
}
break;
}
#endif
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
return false;
}
return true;
}
static void vxlan_mdb_config_group_set(struct vxlan_mdb_config *cfg,
const struct br_mdb_entry *entry,
const struct nlattr *source_attr)
{
struct vxlan_mdb_entry_key *group = &cfg->group;
switch (entry->addr.proto) {
case htons(ETH_P_IP):
group->dst.sa.sa_family = AF_INET;
group->dst.sin.sin_addr.s_addr = entry->addr.u.ip4;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
group->dst.sa.sa_family = AF_INET6;
group->dst.sin6.sin6_addr = entry->addr.u.ip6;
break;
#endif
}
if (source_attr)
vxlan_nla_get_addr(&group->src, source_attr);
}
static bool vxlan_mdb_is_star_g(const struct vxlan_mdb_entry_key *group)
{
return !vxlan_addr_any(&group->dst) && vxlan_addr_any(&group->src);
}
static bool vxlan_mdb_is_sg(const struct vxlan_mdb_entry_key *group)
{
return !vxlan_addr_any(&group->dst) && !vxlan_addr_any(&group->src);
}
static int vxlan_mdb_config_src_entry_init(struct vxlan_mdb_config *cfg,
__be16 proto,
const struct nlattr *src_entry,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
struct vxlan_mdb_config_src_entry *src;
int err;
err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
vxlan_mdbe_src_list_entry_pol, extack);
if (err)
return err;
if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
return -EINVAL;
if (!vxlan_mdb_is_valid_source(tb[MDBE_SRCATTR_ADDRESS], proto,
extack))
return -EINVAL;
src = kzalloc(sizeof(*src), GFP_KERNEL);
if (!src)
return -ENOMEM;
err = vxlan_nla_get_addr(&src->addr, tb[MDBE_SRCATTR_ADDRESS]);
if (err)
goto err_free_src;
list_add_tail(&src->node, &cfg->src_list);
return 0;
err_free_src:
kfree(src);
return err;
}
static void
vxlan_mdb_config_src_entry_fini(struct vxlan_mdb_config_src_entry *src)
{
list_del(&src->node);
kfree(src);
}
static int vxlan_mdb_config_src_list_init(struct vxlan_mdb_config *cfg,
__be16 proto,
const struct nlattr *src_list,
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_config_src_entry *src, *tmp;
struct nlattr *src_entry;
int rem, err;
nla_for_each_nested(src_entry, src_list, rem) {
err = vxlan_mdb_config_src_entry_init(cfg, proto, src_entry,
extack);
if (err)
goto err_src_entry_init;
}
return 0;
err_src_entry_init:
list_for_each_entry_safe_reverse(src, tmp, &cfg->src_list, node)
vxlan_mdb_config_src_entry_fini(src);
return err;
}
static void vxlan_mdb_config_src_list_fini(struct vxlan_mdb_config *cfg)
{
struct vxlan_mdb_config_src_entry *src, *tmp;
list_for_each_entry_safe_reverse(src, tmp, &cfg->src_list, node)
vxlan_mdb_config_src_entry_fini(src);
}
static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg,
const struct br_mdb_entry *entry,
const struct nlattr *set_attrs,
struct netlink_ext_ack *extack)
{
struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
int err;
err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX, set_attrs,
vxlan_mdbe_attrs_pol, extack);
if (err)
return err;
if (NL_REQ_ATTR_CHECK(extack, set_attrs, mdbe_attrs, MDBE_ATTR_DST)) {
NL_SET_ERR_MSG_MOD(extack, "Missing remote destination IP address");
return -EINVAL;
}
if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
!vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE],
entry->addr.proto, extack))
return -EINVAL;
vxlan_mdb_config_group_set(cfg, entry, mdbe_attrs[MDBE_ATTR_SOURCE]);
/* rtnetlink code only validates that IPv4 group address is
* multicast.
*/
if (!vxlan_addr_is_multicast(&cfg->group.dst) &&
!vxlan_addr_any(&cfg->group.dst)) {
NL_SET_ERR_MSG_MOD(extack, "Group address is not multicast");
return -EINVAL;
}
if (vxlan_addr_any(&cfg->group.dst) &&
mdbe_attrs[MDBE_ATTR_SOURCE]) {
NL_SET_ERR_MSG_MOD(extack, "Source cannot be specified for the all-zeros entry");
return -EINVAL;
}
if (vxlan_mdb_is_sg(&cfg->group))
cfg->filter_mode = MCAST_INCLUDE;
if (mdbe_attrs[MDBE_ATTR_GROUP_MODE]) {
if (!vxlan_mdb_is_star_g(&cfg->group)) {
NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
return -EINVAL;
}
cfg->filter_mode = nla_get_u8(mdbe_attrs[MDBE_ATTR_GROUP_MODE]);
}
if (mdbe_attrs[MDBE_ATTR_SRC_LIST]) {
if (!vxlan_mdb_is_star_g(&cfg->group)) {
NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
return -EINVAL;
}
if (!mdbe_attrs[MDBE_ATTR_GROUP_MODE]) {
NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
return -EINVAL;
}
err = vxlan_mdb_config_src_list_init(cfg, entry->addr.proto,
mdbe_attrs[MDBE_ATTR_SRC_LIST],
extack);
if (err)
return err;
}
if (vxlan_mdb_is_star_g(&cfg->group) && list_empty(&cfg->src_list) &&
cfg->filter_mode == MCAST_INCLUDE) {
NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
return -EINVAL;
}
if (mdbe_attrs[MDBE_ATTR_RTPROT])
cfg->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
err = vxlan_nla_get_addr(&cfg->remote_ip, mdbe_attrs[MDBE_ATTR_DST]);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Invalid remote destination address");
goto err_src_list_fini;
}
if (mdbe_attrs[MDBE_ATTR_DST_PORT])
cfg->remote_port =
cpu_to_be16(nla_get_u16(mdbe_attrs[MDBE_ATTR_DST_PORT]));
if (mdbe_attrs[MDBE_ATTR_VNI])
cfg->remote_vni =
cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_VNI]));
if (mdbe_attrs[MDBE_ATTR_IFINDEX]) {
cfg->remote_ifindex =
nla_get_s32(mdbe_attrs[MDBE_ATTR_IFINDEX]);
if (!__dev_get_by_index(cfg->vxlan->net, cfg->remote_ifindex)) {
NL_SET_ERR_MSG_MOD(extack, "Outgoing interface not found");
err = -EINVAL;
goto err_src_list_fini;
}
}
if (mdbe_attrs[MDBE_ATTR_SRC_VNI])
cfg->group.vni =
cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI]));
return 0;
err_src_list_fini:
vxlan_mdb_config_src_list_fini(cfg);
return err;
}
static int vxlan_mdb_config_init(struct vxlan_mdb_config *cfg,
struct net_device *dev, struct nlattr *tb[],
u16 nlmsg_flags,
struct netlink_ext_ack *extack)
{
struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
struct vxlan_dev *vxlan = netdev_priv(dev);
memset(cfg, 0, sizeof(*cfg));
cfg->vxlan = vxlan;
cfg->group.vni = vxlan->default_dst.remote_vni;
INIT_LIST_HEAD(&cfg->src_list);
cfg->nlflags = nlmsg_flags;
cfg->filter_mode = MCAST_EXCLUDE;
cfg->rt_protocol = RTPROT_STATIC;
cfg->remote_vni = vxlan->default_dst.remote_vni;
cfg->remote_port = vxlan->cfg.dst_port;
if (entry->ifindex != dev->ifindex) {
NL_SET_ERR_MSG_MOD(extack, "Port net device must be the VXLAN net device");
return -EINVAL;
}
/* State is not part of the entry key and can be ignored on deletion
* requests.
*/
if ((nlmsg_flags & (NLM_F_CREATE | NLM_F_REPLACE)) &&
entry->state != MDB_PERMANENT) {
NL_SET_ERR_MSG_MOD(extack, "MDB entry must be permanent");
return -EINVAL;
}
if (entry->flags) {
NL_SET_ERR_MSG_MOD(extack, "Invalid MDB entry flags");
return -EINVAL;
}
if (entry->vid) {
NL_SET_ERR_MSG_MOD(extack, "VID must not be specified");
return -EINVAL;
}
if (entry->addr.proto != htons(ETH_P_IP) &&
entry->addr.proto != htons(ETH_P_IPV6)) {
NL_SET_ERR_MSG_MOD(extack, "Group address must be an IPv4 / IPv6 address");
return -EINVAL;
}
if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY_ATTRS)) {
NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY_ATTRS attribute");
return -EINVAL;
}
return vxlan_mdb_config_attrs_init(cfg, entry, tb[MDBA_SET_ENTRY_ATTRS],
extack);
}
static void vxlan_mdb_config_fini(struct vxlan_mdb_config *cfg)
{
vxlan_mdb_config_src_list_fini(cfg);
}
static struct vxlan_mdb_entry *
vxlan_mdb_entry_lookup(struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry_key *group)
{
return rhashtable_lookup_fast(&vxlan->mdb_tbl, group,
vxlan_mdb_rht_params);
}
static struct vxlan_mdb_remote *
vxlan_mdb_remote_lookup(const struct vxlan_mdb_entry *mdb_entry,
const union vxlan_addr *addr)
{
struct vxlan_mdb_remote *remote;
list_for_each_entry(remote, &mdb_entry->remotes, list) {
struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
if (vxlan_addr_equal(addr, &rd->remote_ip))
return remote;
}
return NULL;
}
static void vxlan_mdb_rdst_free(struct rcu_head *head)
{
struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
dst_cache_destroy(&rd->dst_cache);
kfree(rd);
}
static int vxlan_mdb_remote_rdst_init(const struct vxlan_mdb_config *cfg,
struct vxlan_mdb_remote *remote)
{
struct vxlan_rdst *rd;
int err;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
err = dst_cache_init(&rd->dst_cache, GFP_KERNEL);
if (err)
goto err_free_rdst;
rd->remote_ip = cfg->remote_ip;
rd->remote_port = cfg->remote_port;
rd->remote_vni = cfg->remote_vni;
rd->remote_ifindex = cfg->remote_ifindex;
rcu_assign_pointer(remote->rd, rd);
return 0;
err_free_rdst:
kfree(rd);
return err;
}
static void vxlan_mdb_remote_rdst_fini(struct vxlan_rdst *rd)
{
call_rcu(&rd->rcu, vxlan_mdb_rdst_free);
}
static int vxlan_mdb_remote_init(const struct vxlan_mdb_config *cfg,
struct vxlan_mdb_remote *remote)
{
int err;
err = vxlan_mdb_remote_rdst_init(cfg, remote);
if (err)
return err;
remote->flags = cfg->flags;
remote->filter_mode = cfg->filter_mode;
remote->rt_protocol = cfg->rt_protocol;
INIT_HLIST_HEAD(&remote->src_list);
return 0;
}
static void vxlan_mdb_remote_fini(struct vxlan_dev *vxlan,
struct vxlan_mdb_remote *remote)
{
WARN_ON_ONCE(!hlist_empty(&remote->src_list));
vxlan_mdb_remote_rdst_fini(rtnl_dereference(remote->rd));
}
static struct vxlan_mdb_src_entry *
vxlan_mdb_remote_src_entry_lookup(const struct vxlan_mdb_remote *remote,
const union vxlan_addr *addr)
{
struct vxlan_mdb_src_entry *ent;
hlist_for_each_entry(ent, &remote->src_list, node) {
if (vxlan_addr_equal(&ent->addr, addr))
return ent;
}
return NULL;
}
static struct vxlan_mdb_src_entry *
vxlan_mdb_remote_src_entry_add(struct vxlan_mdb_remote *remote,
const union vxlan_addr *addr)
{
struct vxlan_mdb_src_entry *ent;
ent = kzalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return NULL;
ent->addr = *addr;
hlist_add_head(&ent->node, &remote->src_list);
return ent;
}
static void
vxlan_mdb_remote_src_entry_del(struct vxlan_mdb_src_entry *ent)
{
hlist_del(&ent->node);
kfree(ent);
}
static int
vxlan_mdb_remote_src_fwd_add(const struct vxlan_mdb_config *cfg,
const union vxlan_addr *addr,
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_config sg_cfg;
memset(&sg_cfg, 0, sizeof(sg_cfg));
sg_cfg.vxlan = cfg->vxlan;
sg_cfg.group.src = *addr;
sg_cfg.group.dst = cfg->group.dst;
sg_cfg.group.vni = cfg->group.vni;
INIT_LIST_HEAD(&sg_cfg.src_list);
sg_cfg.remote_ip = cfg->remote_ip;
sg_cfg.remote_ifindex = cfg->remote_ifindex;
sg_cfg.remote_vni = cfg->remote_vni;
sg_cfg.remote_port = cfg->remote_port;
sg_cfg.nlflags = cfg->nlflags;
sg_cfg.filter_mode = MCAST_INCLUDE;
if (cfg->filter_mode == MCAST_EXCLUDE)
sg_cfg.flags = VXLAN_MDB_REMOTE_F_BLOCKED;
sg_cfg.rt_protocol = cfg->rt_protocol;
return __vxlan_mdb_add(&sg_cfg, extack);
}
static void
vxlan_mdb_remote_src_fwd_del(struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry_key *group,
const struct vxlan_mdb_remote *remote,
const union vxlan_addr *addr)
{
struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
struct vxlan_mdb_config sg_cfg;
memset(&sg_cfg, 0, sizeof(sg_cfg));
sg_cfg.vxlan = vxlan;
sg_cfg.group.src = *addr;
sg_cfg.group.dst = group->dst;
sg_cfg.group.vni = group->vni;
INIT_LIST_HEAD(&sg_cfg.src_list);
sg_cfg.remote_ip = rd->remote_ip;
__vxlan_mdb_del(&sg_cfg, NULL);
}
static int
vxlan_mdb_remote_src_add(const struct vxlan_mdb_config *cfg,
struct vxlan_mdb_remote *remote,
const struct vxlan_mdb_config_src_entry *src,
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_src_entry *ent;
int err;
ent = vxlan_mdb_remote_src_entry_lookup(remote, &src->addr);
if (!ent) {
ent = vxlan_mdb_remote_src_entry_add(remote, &src->addr);
if (!ent)
return -ENOMEM;
} else if (!(cfg->nlflags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
return -EEXIST;
}
err = vxlan_mdb_remote_src_fwd_add(cfg, &ent->addr, extack);
if (err)
goto err_src_del;
/* Clear flags in case source entry was marked for deletion as part of
* replace flow.
*/
ent->flags = 0;
return 0;
err_src_del:
vxlan_mdb_remote_src_entry_del(ent);
return err;
}
static void vxlan_mdb_remote_src_del(struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry_key *group,
const struct vxlan_mdb_remote *remote,
struct vxlan_mdb_src_entry *ent)
{
vxlan_mdb_remote_src_fwd_del(vxlan, group, remote, &ent->addr);
vxlan_mdb_remote_src_entry_del(ent);
}
static int vxlan_mdb_remote_srcs_add(const struct vxlan_mdb_config *cfg,
struct vxlan_mdb_remote *remote,
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_config_src_entry *src;
struct vxlan_mdb_src_entry *ent;
struct hlist_node *tmp;
int err;
list_for_each_entry(src, &cfg->src_list, node) {
err = vxlan_mdb_remote_src_add(cfg, remote, src, extack);
if (err)
goto err_src_del;
}
return 0;
err_src_del:
hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node)
vxlan_mdb_remote_src_del(cfg->vxlan, &cfg->group, remote, ent);
return err;
}
static void vxlan_mdb_remote_srcs_del(struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry_key *group,
struct vxlan_mdb_remote *remote)
{
struct vxlan_mdb_src_entry *ent;
struct hlist_node *tmp;
hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node)
vxlan_mdb_remote_src_del(vxlan, group, remote, ent);
}
static size_t
vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group,
const struct vxlan_mdb_remote *remote)
{
struct vxlan_mdb_src_entry *ent;
size_t nlmsg_size;
if (hlist_empty(&remote->src_list))
return 0;
/* MDBA_MDB_EATTR_SRC_LIST */
nlmsg_size = nla_total_size(0);
hlist_for_each_entry(ent, &remote->src_list, node) {
/* MDBA_MDB_SRCLIST_ENTRY */
nlmsg_size += nla_total_size(0) +
/* MDBA_MDB_SRCATTR_ADDRESS */
nla_total_size(vxlan_addr_size(&group->dst)) +
/* MDBA_MDB_SRCATTR_TIMER */
nla_total_size(sizeof(u8));
}
return nlmsg_size;
}
static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote)
{
const struct vxlan_mdb_entry_key *group = &mdb_entry->key;
struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
size_t nlmsg_size;
nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
/* MDBA_MDB */
nla_total_size(0) +
/* MDBA_MDB_ENTRY */
nla_total_size(0) +
/* MDBA_MDB_ENTRY_INFO */
nla_total_size(sizeof(struct br_mdb_entry)) +
/* MDBA_MDB_EATTR_TIMER */
nla_total_size(sizeof(u32));
/* MDBA_MDB_EATTR_SOURCE */
if (vxlan_mdb_is_sg(group))
nlmsg_size += nla_total_size(vxlan_addr_size(&group->dst));
/* MDBA_MDB_EATTR_RTPROT */
nlmsg_size += nla_total_size(sizeof(u8));
/* MDBA_MDB_EATTR_SRC_LIST */
nlmsg_size += vxlan_mdb_nlmsg_src_list_size(group, remote);
/* MDBA_MDB_EATTR_GROUP_MODE */
nlmsg_size += nla_total_size(sizeof(u8));
/* MDBA_MDB_EATTR_DST */
nlmsg_size += nla_total_size(vxlan_addr_size(&rd->remote_ip));
/* MDBA_MDB_EATTR_DST_PORT */
if (rd->remote_port && rd->remote_port != vxlan->cfg.dst_port)
nlmsg_size += nla_total_size(sizeof(u16));
/* MDBA_MDB_EATTR_VNI */
if (rd->remote_vni != vxlan->default_dst.remote_vni)
nlmsg_size += nla_total_size(sizeof(u32));
/* MDBA_MDB_EATTR_IFINDEX */
if (rd->remote_ifindex)
nlmsg_size += nla_total_size(sizeof(u32));
/* MDBA_MDB_EATTR_SRC_VNI */
if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && group->vni)
nlmsg_size += nla_total_size(sizeof(u32));
return nlmsg_size;
}
static int vxlan_mdb_nlmsg_fill(const struct vxlan_dev *vxlan,
struct sk_buff *skb,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote,
int type)
{
struct nlattr *mdb_nest, *mdb_entry_nest;
struct br_port_msg *bpm;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = vxlan->dev->ifindex;
mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
if (!mdb_nest)
goto cancel;
mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
if (!mdb_entry_nest)
goto cancel;
if (vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote))
goto cancel;
nla_nest_end(skb, mdb_entry_nest);
nla_nest_end(skb, mdb_nest);
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static void vxlan_mdb_remote_notify(const struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
const struct vxlan_mdb_remote *remote,
int type)
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_mdb_nlmsg_size(vxlan, mdb_entry, remote),
GFP_KERNEL);
if (!skb)
goto errout;
err = vxlan_mdb_nlmsg_fill(vxlan, skb, mdb_entry, remote, type);
if (err) {
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_KERNEL);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
static int
vxlan_mdb_remote_srcs_replace(const struct vxlan_mdb_config *cfg,
const struct vxlan_mdb_entry *mdb_entry,
struct vxlan_mdb_remote *remote,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = cfg->vxlan;
struct vxlan_mdb_src_entry *ent;
struct hlist_node *tmp;
int err;
hlist_for_each_entry(ent, &remote->src_list, node)
ent->flags |= VXLAN_SGRP_F_DELETE;
err = vxlan_mdb_remote_srcs_add(cfg, remote, extack);
if (err)
goto err_clear_delete;
hlist_for_each_entry_safe(ent, tmp, &remote->src_list, node) {
if (ent->flags & VXLAN_SGRP_F_DELETE)
vxlan_mdb_remote_src_del(vxlan, &mdb_entry->key, remote,
ent);
}
return 0;
err_clear_delete:
hlist_for_each_entry(ent, &remote->src_list, node)
ent->flags &= ~VXLAN_SGRP_F_DELETE;
return err;
}
static int vxlan_mdb_remote_replace(const struct vxlan_mdb_config *cfg,
const struct vxlan_mdb_entry *mdb_entry,
struct vxlan_mdb_remote *remote,
struct netlink_ext_ack *extack)
{
struct vxlan_rdst *new_rd, *old_rd = rtnl_dereference(remote->rd);
struct vxlan_dev *vxlan = cfg->vxlan;
int err;
err = vxlan_mdb_remote_rdst_init(cfg, remote);
if (err)
return err;
new_rd = rtnl_dereference(remote->rd);
err = vxlan_mdb_remote_srcs_replace(cfg, mdb_entry, remote, extack);
if (err)
goto err_rdst_reset;
WRITE_ONCE(remote->flags, cfg->flags);
WRITE_ONCE(remote->filter_mode, cfg->filter_mode);
remote->rt_protocol = cfg->rt_protocol;
vxlan_mdb_remote_notify(vxlan, mdb_entry, remote, RTM_NEWMDB);
vxlan_mdb_remote_rdst_fini(old_rd);
return 0;
err_rdst_reset:
rcu_assign_pointer(remote->rd, old_rd);
vxlan_mdb_remote_rdst_fini(new_rd);
return err;
}
static int vxlan_mdb_remote_add(const struct vxlan_mdb_config *cfg,
struct vxlan_mdb_entry *mdb_entry,
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_remote *remote;
int err;
remote = vxlan_mdb_remote_lookup(mdb_entry, &cfg->remote_ip);
if (remote) {
if (!(cfg->nlflags & NLM_F_REPLACE)) {
NL_SET_ERR_MSG_MOD(extack, "Replace not specified and MDB remote entry already exists");
return -EEXIST;
}
return vxlan_mdb_remote_replace(cfg, mdb_entry, remote, extack);
}
if (!(cfg->nlflags & NLM_F_CREATE)) {
NL_SET_ERR_MSG_MOD(extack, "Create not specified and entry does not exist");
return -ENOENT;
}
remote = kzalloc(sizeof(*remote), GFP_KERNEL);
if (!remote)
return -ENOMEM;
err = vxlan_mdb_remote_init(cfg, remote);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize remote MDB entry");
goto err_free_remote;
}
err = vxlan_mdb_remote_srcs_add(cfg, remote, extack);
if (err)
goto err_remote_fini;
list_add_rcu(&remote->list, &mdb_entry->remotes);
vxlan_mdb_remote_notify(cfg->vxlan, mdb_entry, remote, RTM_NEWMDB);
return 0;
err_remote_fini:
vxlan_mdb_remote_fini(cfg->vxlan, remote);
err_free_remote:
kfree(remote);
return err;
}
static void vxlan_mdb_remote_del(struct vxlan_dev *vxlan,
struct vxlan_mdb_entry *mdb_entry,
struct vxlan_mdb_remote *remote)
{
vxlan_mdb_remote_notify(vxlan, mdb_entry, remote, RTM_DELMDB);
list_del_rcu(&remote->list);
vxlan_mdb_remote_srcs_del(vxlan, &mdb_entry->key, remote);
vxlan_mdb_remote_fini(vxlan, remote);
kfree_rcu(remote, rcu);
}
static struct vxlan_mdb_entry *
vxlan_mdb_entry_get(struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry_key *group)
{
struct vxlan_mdb_entry *mdb_entry;
int err;
mdb_entry = vxlan_mdb_entry_lookup(vxlan, group);
if (mdb_entry)
return mdb_entry;
mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
if (!mdb_entry)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&mdb_entry->remotes);
memcpy(&mdb_entry->key, group, sizeof(mdb_entry->key));
hlist_add_head(&mdb_entry->mdb_node, &vxlan->mdb_list);
err = rhashtable_lookup_insert_fast(&vxlan->mdb_tbl,
&mdb_entry->rhnode,
vxlan_mdb_rht_params);
if (err)
goto err_free_entry;
if (hlist_is_singular_node(&mdb_entry->mdb_node, &vxlan->mdb_list))
vxlan->cfg.flags |= VXLAN_F_MDB;
return mdb_entry;
err_free_entry:
hlist_del(&mdb_entry->mdb_node);
kfree(mdb_entry);
return ERR_PTR(err);
}
static void vxlan_mdb_entry_put(struct vxlan_dev *vxlan,
struct vxlan_mdb_entry *mdb_entry)
{
if (!list_empty(&mdb_entry->remotes))
return;
if (hlist_is_singular_node(&mdb_entry->mdb_node, &vxlan->mdb_list))
vxlan->cfg.flags &= ~VXLAN_F_MDB;
rhashtable_remove_fast(&vxlan->mdb_tbl, &mdb_entry->rhnode,
vxlan_mdb_rht_params);
hlist_del(&mdb_entry->mdb_node);
kfree_rcu(mdb_entry, rcu);
}
static int __vxlan_mdb_add(const struct vxlan_mdb_config *cfg,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = cfg->vxlan;
struct vxlan_mdb_entry *mdb_entry;
int err;
mdb_entry = vxlan_mdb_entry_get(vxlan, &cfg->group);
if (IS_ERR(mdb_entry))
return PTR_ERR(mdb_entry);
err = vxlan_mdb_remote_add(cfg, mdb_entry, extack);
if (err)
goto err_entry_put;
vxlan->mdb_seq++;
return 0;
err_entry_put:
vxlan_mdb_entry_put(vxlan, mdb_entry);
return err;
}
static int __vxlan_mdb_del(const struct vxlan_mdb_config *cfg,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = cfg->vxlan;
struct vxlan_mdb_entry *mdb_entry;
struct vxlan_mdb_remote *remote;
mdb_entry = vxlan_mdb_entry_lookup(vxlan, &cfg->group);
if (!mdb_entry) {
NL_SET_ERR_MSG_MOD(extack, "Did not find MDB entry");
return -ENOENT;
}
remote = vxlan_mdb_remote_lookup(mdb_entry, &cfg->remote_ip);
if (!remote) {
NL_SET_ERR_MSG_MOD(extack, "Did not find MDB remote entry");
return -ENOENT;
}
vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
vxlan_mdb_entry_put(vxlan, mdb_entry);
vxlan->mdb_seq++;
return 0;
}
int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_config cfg;
int err;
ASSERT_RTNL();
err = vxlan_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
if (err)
return err;
err = __vxlan_mdb_add(&cfg, extack);
vxlan_mdb_config_fini(&cfg);
return err;
}
int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_mdb_config cfg;
int err;
ASSERT_RTNL();
err = vxlan_mdb_config_init(&cfg, dev, tb, 0, extack);
if (err)
return err;
err = __vxlan_mdb_del(&cfg, extack);
vxlan_mdb_config_fini(&cfg);
return err;
}
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
struct sk_buff *skb,
__be32 src_vni)
{
struct vxlan_mdb_entry *mdb_entry;
struct vxlan_mdb_entry_key group;
if (!is_multicast_ether_addr(eth_hdr(skb)->h_dest) ||
is_broadcast_ether_addr(eth_hdr(skb)->h_dest))
return NULL;
/* When not in collect metadata mode, 'src_vni' is zero, but MDB
* entries are stored with the VNI of the VXLAN device.
*/
if (!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA))
src_vni = vxlan->default_dst.remote_vni;
memset(&group, 0, sizeof(group));
group.vni = src_vni;
switch (skb->protocol) {
case htons(ETH_P_IP):
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return NULL;
group.dst.sa.sa_family = AF_INET;
group.dst.sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
group.src.sa.sa_family = AF_INET;
group.src.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return NULL;
group.dst.sa.sa_family = AF_INET6;
group.dst.sin6.sin6_addr = ipv6_hdr(skb)->daddr;
group.src.sa.sa_family = AF_INET6;
group.src.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
break;
#endif
default:
return NULL;
}
mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group);
if (mdb_entry)
return mdb_entry;
memset(&group.src, 0, sizeof(group.src));
mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group);
if (mdb_entry)
return mdb_entry;
/* No (S, G) or (*, G) found. Look up the all-zeros entry, but only if
* the destination IP address is not link-local multicast since we want
* to transmit such traffic together with broadcast and unknown unicast
* traffic.
*/
switch (skb->protocol) {
case htons(ETH_P_IP):
if (ipv4_is_local_multicast(group.dst.sin.sin_addr.s_addr))
return NULL;
group.dst.sin.sin_addr.s_addr = 0;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
if (ipv6_addr_type(&group.dst.sin6.sin6_addr) &
IPV6_ADDR_LINKLOCAL)
return NULL;
memset(&group.dst.sin6.sin6_addr, 0,
sizeof(group.dst.sin6.sin6_addr));
break;
#endif
default:
return NULL;
}
return vxlan_mdb_entry_lookup(vxlan, &group);
}
netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan,
const struct vxlan_mdb_entry *mdb_entry,
struct sk_buff *skb)
{
struct vxlan_mdb_remote *remote, *fremote = NULL;
__be32 src_vni = mdb_entry->key.vni;
list_for_each_entry_rcu(remote, &mdb_entry->remotes, list) {
struct sk_buff *skb1;
if ((vxlan_mdb_is_star_g(&mdb_entry->key) &&
READ_ONCE(remote->filter_mode) == MCAST_INCLUDE) ||
(READ_ONCE(remote->flags) & VXLAN_MDB_REMOTE_F_BLOCKED))
continue;
if (!fremote) {
fremote = remote;
continue;
}
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
vxlan_xmit_one(skb1, vxlan->dev, src_vni,
rcu_dereference(remote->rd), false);
}
if (fremote)
vxlan_xmit_one(skb, vxlan->dev, src_vni,
rcu_dereference(fremote->rd), false);
else
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void vxlan_mdb_check_empty(void *ptr, void *arg)
{
WARN_ON_ONCE(1);
}
static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
struct vxlan_mdb_entry *mdb_entry)
{
struct vxlan_mdb_remote *remote, *tmp;
list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list)
vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
}
static void vxlan_mdb_entries_flush(struct vxlan_dev *vxlan)
{
struct vxlan_mdb_entry *mdb_entry;
struct hlist_node *tmp;
/* The removal of an entry cannot trigger the removal of another entry
* since entries are always added to the head of the list.
*/
hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
vxlan_mdb_remotes_flush(vxlan, mdb_entry);
vxlan_mdb_entry_put(vxlan, mdb_entry);
}
}
int vxlan_mdb_init(struct vxlan_dev *vxlan)
{
int err;
err = rhashtable_init(&vxlan->mdb_tbl, &vxlan_mdb_rht_params);
if (err)
return err;
INIT_HLIST_HEAD(&vxlan->mdb_list);
return 0;
}
void vxlan_mdb_fini(struct vxlan_dev *vxlan)
{
vxlan_mdb_entries_flush(vxlan);
WARN_ON_ONCE(vxlan->cfg.flags & VXLAN_F_MDB);
rhashtable_free_and_destroy(&vxlan->mdb_tbl, vxlan_mdb_check_empty,
NULL);
}
| linux-master | drivers/net/vxlan/vxlan_mdb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Vxlan multicast group handling
*
*/
#include <linux/kernel.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/igmp.h>
#include <net/vxlan.h>
#include "vxlan_private.h"
/* Update multicast group membership when first VNI on
* multicast address is brought up
*/
int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
int rifindex)
{
union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
int ret = -EINVAL;
struct sock *sk;
if (ip->sa.sa_family == AF_INET) {
struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
&ip->sin6.sin6_addr);
release_sock(sk);
#endif
}
return ret;
}
int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
int rifindex)
{
union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
int ret = -EINVAL;
struct sock *sk;
if (ip->sa.sa_family == AF_INET) {
struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
&ip->sin6.sin6_addr);
release_sock(sk);
#endif
}
return ret;
}
static bool vxlan_group_used_match(union vxlan_addr *ip, int ifindex,
union vxlan_addr *rip, int rifindex)
{
if (!vxlan_addr_multicast(rip))
return false;
if (!vxlan_addr_equal(rip, ip))
return false;
if (rifindex != ifindex)
return false;
return true;
}
static bool vxlan_group_used_by_vnifilter(struct vxlan_dev *vxlan,
union vxlan_addr *ip, int ifindex)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
if (vxlan_group_used_match(ip, ifindex,
&vxlan->default_dst.remote_ip,
vxlan->default_dst.remote_ifindex))
return true;
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
if (!vxlan_addr_multicast(&v->remote_ip))
continue;
if (vxlan_group_used_match(ip, ifindex,
&v->remote_ip,
vxlan->default_dst.remote_ifindex))
return true;
}
return false;
}
/* See if multicast group is already in use by other ID */
bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
__be32 vni, union vxlan_addr *rip, int rifindex)
{
union vxlan_addr *ip = (rip ? : &dev->default_dst.remote_ip);
int ifindex = (rifindex ? : dev->default_dst.remote_ifindex);
struct vxlan_dev *vxlan;
struct vxlan_sock *sock4;
#if IS_ENABLED(CONFIG_IPV6)
struct vxlan_sock *sock6;
#endif
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
sock4 = rtnl_dereference(dev->vn4_sock);
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
sock6 = rtnl_dereference(dev->vn6_sock);
if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
return false;
#endif
list_for_each_entry(vxlan, &vn->vxlan_list, next) {
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
if (family == AF_INET &&
rtnl_dereference(vxlan->vn4_sock) != sock4)
continue;
#if IS_ENABLED(CONFIG_IPV6)
if (family == AF_INET6 &&
rtnl_dereference(vxlan->vn6_sock) != sock6)
continue;
#endif
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
if (!vxlan_group_used_by_vnifilter(vxlan, ip, ifindex))
continue;
} else {
if (!vxlan_group_used_match(ip, ifindex,
&vxlan->default_dst.remote_ip,
vxlan->default_dst.remote_ifindex))
continue;
}
return true;
}
return false;
}
static int vxlan_multicast_join_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp, *vgood = NULL;
int ret = 0;
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
if (!vxlan_addr_multicast(&v->remote_ip))
continue;
/* skip if address is same as default address */
if (vxlan_addr_equal(&v->remote_ip,
&vxlan->default_dst.remote_ip))
continue;
ret = vxlan_igmp_join(vxlan, &v->remote_ip, 0);
if (ret == -EADDRINUSE)
ret = 0;
if (ret)
goto out;
vgood = v;
}
out:
if (ret) {
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
if (!vxlan_addr_multicast(&v->remote_ip))
continue;
if (vxlan_addr_equal(&v->remote_ip,
&vxlan->default_dst.remote_ip))
continue;
vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
if (v == vgood)
break;
}
}
return ret;
}
static int vxlan_multicast_leave_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
int last_err = 0, ret;
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
if (vxlan_addr_multicast(&v->remote_ip) &&
!vxlan_group_used(vn, vxlan, v->vni, &v->remote_ip,
0)) {
ret = vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
if (ret)
last_err = ret;
}
}
return last_err;
}
int vxlan_multicast_join(struct vxlan_dev *vxlan)
{
int ret = 0;
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
ret = vxlan_igmp_join(vxlan, &vxlan->default_dst.remote_ip,
vxlan->default_dst.remote_ifindex);
if (ret == -EADDRINUSE)
ret = 0;
if (ret)
return ret;
}
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
return vxlan_multicast_join_vnigrp(vxlan);
return 0;
}
int vxlan_multicast_leave(struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
int ret = 0;
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
!vxlan_group_used(vn, vxlan, 0, NULL, 0)) {
ret = vxlan_igmp_leave(vxlan, &vxlan->default_dst.remote_ip,
vxlan->default_dst.remote_ifindex);
if (ret)
return ret;
}
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
return vxlan_multicast_leave_vnigrp(vxlan);
return 0;
}
| linux-master | drivers/net/vxlan/vxlan_multicast.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Distributed Switch Architecture loopback driver
*
* Copyright (C) 2016, Florian Fainelli <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/export.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/if_bridge.h>
#include <linux/dsa/loop.h>
#include <net/dsa.h>
#include "dsa_loop.h"
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
[DSA_LOOP_PHY_READ_ERR] = { "phy_read_err", },
[DSA_LOOP_PHY_WRITE_OK] = { "phy_write_ok", },
[DSA_LOOP_PHY_WRITE_ERR] = { "phy_write_err", },
};
static struct phy_device *phydevs[PHY_MAX_ADDR];
enum dsa_loop_devlink_resource_id {
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
};
static u64 dsa_loop_devlink_vtu_get(void *priv)
{
struct dsa_loop_priv *ps = priv;
unsigned int i, count = 0;
struct dsa_loop_vlan *vl;
for (i = 0; i < ARRAY_SIZE(ps->vlans); i++) {
vl = &ps->vlans[i];
if (vl->members)
count++;
}
return count;
}
static int dsa_loop_setup_devlink_resources(struct dsa_switch *ds)
{
struct devlink_resource_size_params size_params;
struct dsa_loop_priv *ps = ds->priv;
int err;
devlink_resource_size_params_init(&size_params, ARRAY_SIZE(ps->vlans),
ARRAY_SIZE(ps->vlans),
1, DEVLINK_RESOURCE_UNIT_ENTRY);
err = dsa_devlink_resource_register(ds, "VTU", ARRAY_SIZE(ps->vlans),
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&size_params);
if (err)
goto out;
dsa_devlink_resource_occ_get_register(ds,
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
dsa_loop_devlink_vtu_get, ps);
return 0;
out:
dsa_devlink_resources_unregister(ds);
return err;
}
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
dev_dbg(ds->dev, "%s: port: %d\n", __func__, port);
return DSA_TAG_PROTO_NONE;
}
static int dsa_loop_setup(struct dsa_switch *ds)
{
struct dsa_loop_priv *ps = ds->priv;
unsigned int i;
for (i = 0; i < ds->num_ports; i++)
memcpy(ps->ports[i].mib, dsa_loop_mibs,
sizeof(dsa_loop_mibs));
dev_dbg(ds->dev, "%s\n", __func__);
return dsa_loop_setup_devlink_resources(ds);
}
static void dsa_loop_teardown(struct dsa_switch *ds)
{
dsa_devlink_resources_unregister(ds);
}
static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS)
return 0;
return __DSA_LOOP_CNT_MAX;
}
static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
struct dsa_loop_priv *ps = ds->priv;
unsigned int i;
if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS)
return;
for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
memcpy(data + i * ETH_GSTRING_LEN,
ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
}
static void dsa_loop_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct dsa_loop_priv *ps = ds->priv;
unsigned int i;
for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
data[i] = ps->ports[port].mib[i].val;
}
static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
int ret;
ret = mdiobus_read_nested(bus, ps->port_base + port, regnum);
if (ret < 0)
ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++;
else
ps->ports[port].mib[DSA_LOOP_PHY_READ_OK].val++;
return ret;
}
static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
int regnum, u16 value)
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
int ret;
ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
if (ret < 0)
ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++;
else
ps->ports[port].mib[DSA_LOOP_PHY_WRITE_OK].val++;
return ret;
}
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
__func__, port, bridge.dev->name);
return 0;
}
static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
__func__, port, bridge.dev->name);
}
static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
dev_dbg(ds->dev, "%s: port: %d, state: %d\n",
__func__, port, state);
}
static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
__func__, port, vlan_filtering);
return 0;
}
static int dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
if (vlan->vid >= ARRAY_SIZE(ps->vlans))
return -ERANGE;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
vl = &ps->vlans[vlan->vid];
vl->members |= BIT(port);
if (untagged)
vl->untagged |= BIT(port);
else
vl->untagged &= ~BIT(port);
dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
__func__, port, vlan->vid, untagged ? "un" : "", pvid);
if (pvid)
ps->ports[port].pvid = vlan->vid;
return 0;
}
static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct dsa_loop_priv *ps = ds->priv;
u16 pvid = ps->ports[port].pvid;
struct mii_bus *bus = ps->bus;
struct dsa_loop_vlan *vl;
/* Just do a sleeping operation to make lockdep checks effective */
mdiobus_read(bus, ps->port_base + port, MII_BMSR);
vl = &ps->vlans[vlan->vid];
vl->members &= ~BIT(port);
if (untagged)
vl->untagged &= ~BIT(port);
if (pvid == vlan->vid)
pvid = 1;
dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
__func__, port, vlan->vid, untagged ? "un" : "", pvid);
ps->ports[port].pvid = pvid;
return 0;
}
static int dsa_loop_port_change_mtu(struct dsa_switch *ds, int port,
int new_mtu)
{
struct dsa_loop_priv *priv = ds->priv;
priv->ports[port].mtu = new_mtu;
return 0;
}
static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
{
return ETH_MAX_MTU;
}
static const struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
.teardown = dsa_loop_teardown,
.get_strings = dsa_loop_get_strings,
.get_ethtool_stats = dsa_loop_get_ethtool_stats,
.get_sset_count = dsa_loop_get_sset_count,
.get_ethtool_phy_stats = dsa_loop_get_ethtool_stats,
.phy_read = dsa_loop_phy_read,
.phy_write = dsa_loop_phy_write,
.port_bridge_join = dsa_loop_port_bridge_join,
.port_bridge_leave = dsa_loop_port_bridge_leave,
.port_stp_state_set = dsa_loop_port_stp_state_set,
.port_vlan_filtering = dsa_loop_port_vlan_filtering,
.port_vlan_add = dsa_loop_port_vlan_add,
.port_vlan_del = dsa_loop_port_vlan_del,
.port_change_mtu = dsa_loop_port_change_mtu,
.port_max_mtu = dsa_loop_port_max_mtu,
};
static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
{
struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data;
struct dsa_loop_priv *ps;
struct dsa_switch *ds;
int ret;
if (!pdata)
return -ENODEV;
ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = &mdiodev->dev;
ds->num_ports = DSA_LOOP_NUM_PORTS;
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
if (!ps->netdev)
return -EPROBE_DEFER;
pdata->cd.netdev[DSA_LOOP_CPU_PORT] = &ps->netdev->dev;
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
ret = dsa_register_switch(ds);
if (!ret)
dev_info(&mdiodev->dev, "%s: 0x%0x\n",
pdata->name, pdata->enabled_ports);
return ret;
}
static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
struct dsa_loop_priv *ps;
if (!ds)
return;
ps = ds->priv;
dsa_unregister_switch(ds);
dev_put(ps->netdev);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
if (!ds)
return;
dsa_switch_shutdown(ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver dsa_loop_drv = {
.mdiodrv.driver = {
.name = "dsa-loop",
},
.probe = dsa_loop_drv_probe,
.remove = dsa_loop_drv_remove,
.shutdown = dsa_loop_drv_shutdown,
};
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
static void dsa_loop_phydevs_unregister(void)
{
unsigned int i;
for (i = 0; i < NUM_FIXED_PHYS; i++)
if (!IS_ERR(phydevs[i])) {
fixed_phy_unregister(phydevs[i]);
phy_device_free(phydevs[i]);
}
}
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
.link = 1,
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
ret = mdio_driver_register(&dsa_loop_drv);
if (ret)
dsa_loop_phydevs_unregister();
return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
mdio_driver_unregister(&dsa_loop_drv);
dsa_loop_phydevs_unregister();
}
module_exit(dsa_loop_exit);
MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
| linux-master | drivers/net/dsa/dsa_loop.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
*
* Copyright (C) 2010 Lantiq Deutschland
* Copyright (C) 2012 John Crispin <[email protected]>
* Copyright (C) 2017 - 2019 Hauke Mehrtens <[email protected]>
*
* The VLAN and bridge model the GSWIP hardware uses does not directly
* matches the model DSA uses.
*
* The hardware has 64 possible table entries for bridges with one VLAN
* ID, one flow id and a list of ports for each bridge. All entries which
* match the same flow ID are combined in the mac learning table, they
* act as one global bridge.
* The hardware does not support VLAN filter on the port, but on the
* bridge, this driver converts the DSA model to the hardware.
*
* The CPU gets all the exception frames which do not match any forwarding
* rule and the CPU port is also added to all bridges. This makes it possible
* to handle all the special cases easily in software.
* At the initialization the driver allocates one bridge table entry for
* each switch port which is used when the port is used without an
* explicit bridge. This prevents the frames from being forwarded
* between all LAN ports by default.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <net/dsa.h>
#include <dt-bindings/mips/lantiq_rcu_gphy.h>
#include "lantiq_pce.h"
/* GSWIP MDIO Registers */
#define GSWIP_MDIO_GLOB 0x00
#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
#define GSWIP_MDIO_CTRL 0x08
#define GSWIP_MDIO_CTRL_BUSY BIT(12)
#define GSWIP_MDIO_CTRL_RD BIT(11)
#define GSWIP_MDIO_CTRL_WR BIT(10)
#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
#define GSWIP_MDIO_READ 0x09
#define GSWIP_MDIO_WRITE 0x0A
#define GSWIP_MDIO_MDC_CFG0 0x0B
#define GSWIP_MDIO_MDC_CFG1 0x0C
#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
#define GSWIP_MDIO_PHY_LINK_UP 0x2000
#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
GSWIP_MDIO_PHY_FCONRX_MASK | \
GSWIP_MDIO_PHY_FCONTX_MASK | \
GSWIP_MDIO_PHY_LINK_MASK | \
GSWIP_MDIO_PHY_SPEED_MASK | \
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_RESET BIT(15)
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_ISOLATE BIT(13)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
#define GSWIP_MII_CFG_RMII_CLK BIT(7)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
#define GSWIP_MII_CFG_MODE_MIIM 0x1
#define GSWIP_MII_CFG_MODE_RMIIP 0x2
#define GSWIP_MII_CFG_MODE_RMIIM 0x3
#define GSWIP_MII_CFG_MODE_RGMII 0x4
#define GSWIP_MII_CFG_MODE_GMII 0x9
#define GSWIP_MII_CFG_MODE_MASK 0xf
#define GSWIP_MII_CFG_RATE_M2P5 0x00
#define GSWIP_MII_CFG_RATE_M25 0x10
#define GSWIP_MII_CFG_RATE_M125 0x20
#define GSWIP_MII_CFG_RATE_M50 0x30
#define GSWIP_MII_CFG_RATE_AUTO 0x40
#define GSWIP_MII_CFG_RATE_MASK 0x70
#define GSWIP_MII_PCDU0 0x01
#define GSWIP_MII_PCDU1 0x03
#define GSWIP_MII_PCDU5 0x05
#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
/* GSWIP Core Registers */
#define GSWIP_SWRES 0x000
#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
#define GSWIP_VERSION 0x013
#define GSWIP_VERSION_REV_SHIFT 0
#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
#define GSWIP_VERSION_MOD_SHIFT 8
#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
#define GSWIP_VERSION_2_0 0x100
#define GSWIP_VERSION_2_1 0x021
#define GSWIP_VERSION_2_2 0x122
#define GSWIP_VERSION_2_2_ETC 0x022
#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
#define GSWIP_BM_RAM_ADDR 0x044
#define GSWIP_BM_RAM_CTRL 0x045
#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
#define GSWIP_BM_QUEUE_GCTRL 0x04A
#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
/* buffer management Port Configuration Register */
#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
/* buffer management Port Control Register */
#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
/* PCE */
#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
#define GSWIP_PCE_TBL_MASK 0x448
#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
#define GSWIP_PCE_TBL_ADDR 0x44E
#define GSWIP_PCE_TBL_CTRL 0x44F
#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
#define GSWIP_PCE_GCTRL_0 0x456
#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
#define GSWIP_PCE_GCTRL_1 0x457
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5
#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
/* Ethernet Switch Fetch DMA Port Control Register */
#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
/* Ethernet Switch Store DMA Port Control Register */
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
#define GSWIP_TABLE_MAC_BRIDGE 0x0b
#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
/* Maximum packet size supported by the switch. In theory this should be 10240,
* but long packets currently cause lock-ups with an MTU of over 2526. Medium
* packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
* over 2526), hence an MTU value of 2400 seems safe. This issue only affects
* packet reception. This is probably caused by the PPA engine, which is on the
* RX part of the device. Packet transmission works properly up to 10240.
*/
#define GSWIP_MAX_PACKET_LENGTH 2400
struct gswip_hw_info {
int max_ports;
int cpu_port;
const struct dsa_switch_ops *ops;
};
struct xway_gphy_match_data {
char *fe_firmware_name;
char *ge_firmware_name;
};
struct gswip_gphy_fw {
struct clk *clk_gate;
struct reset_control *reset;
u32 fw_addr_offset;
char *fw_name;
};
struct gswip_vlan {
struct net_device *bridge;
u16 vid;
u8 fid;
};
struct gswip_priv {
__iomem void *gswip;
__iomem void *mdio;
__iomem void *mii;
const struct gswip_hw_info *hw_info;
const struct xway_gphy_match_data *gphy_fw_name_cfg;
struct dsa_switch *ds;
struct device *dev;
struct regmap *rcu_regmap;
struct gswip_vlan vlans[64];
int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw;
u32 port_vlan_filter;
struct mutex pce_table_lock;
};
struct gswip_pce_table_entry {
u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
u16 table; // PCE_TBL_CTRL.ADDR = pData->table
u16 key[8];
u16 val[5];
u16 mask;
u8 gmap;
bool type;
bool valid;
bool key_mode;
};
struct gswip_rmon_cnt_desc {
unsigned int size;
unsigned int offset;
const char *name;
};
#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
/** Receive Packet Count (only packets that are accepted and not discarded). */
MIB_DESC(1, 0x1F, "RxGoodPkts"),
MIB_DESC(1, 0x23, "RxUnicastPkts"),
MIB_DESC(1, 0x22, "RxMulticastPkts"),
MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
MIB_DESC(1, 0x20, "RxGoodPausePkts"),
MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
MIB_DESC(1, 0x12, "Rx64BytePkts"),
MIB_DESC(1, 0x13, "Rx127BytePkts"),
MIB_DESC(1, 0x14, "Rx255BytePkts"),
MIB_DESC(1, 0x15, "Rx511BytePkts"),
MIB_DESC(1, 0x16, "Rx1023BytePkts"),
/** Receive Size 1024-1522 (or more, if configured) Packet Count. */
MIB_DESC(1, 0x17, "RxMaxBytePkts"),
MIB_DESC(1, 0x18, "RxDroppedPkts"),
MIB_DESC(1, 0x19, "RxFilteredPkts"),
MIB_DESC(2, 0x24, "RxGoodBytes"),
MIB_DESC(2, 0x26, "RxBadBytes"),
MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
MIB_DESC(1, 0x0C, "TxGoodPkts"),
MIB_DESC(1, 0x06, "TxUnicastPkts"),
MIB_DESC(1, 0x07, "TxMulticastPkts"),
MIB_DESC(1, 0x00, "Tx64BytePkts"),
MIB_DESC(1, 0x01, "Tx127BytePkts"),
MIB_DESC(1, 0x02, "Tx255BytePkts"),
MIB_DESC(1, 0x03, "Tx511BytePkts"),
MIB_DESC(1, 0x04, "Tx1023BytePkts"),
/** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
MIB_DESC(1, 0x05, "TxMaxBytePkts"),
MIB_DESC(1, 0x08, "TxSingleCollCount"),
MIB_DESC(1, 0x09, "TxMultCollCount"),
MIB_DESC(1, 0x0A, "TxLateCollCount"),
MIB_DESC(1, 0x0B, "TxExcessCollCount"),
MIB_DESC(1, 0x0D, "TxPauseCount"),
MIB_DESC(1, 0x10, "TxDroppedPkts"),
MIB_DESC(2, 0x0E, "TxGoodBytes"),
};
static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
{
return __raw_readl(priv->gswip + (offset * 4));
}
static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
{
__raw_writel(val, priv->gswip + (offset * 4));
}
static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
u32 offset)
{
u32 val = gswip_switch_r(priv, offset);
val &= ~(clear);
val |= set;
gswip_switch_w(priv, val, offset);
}
static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
u32 cleared)
{
u32 val;
return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
(val & cleared) == 0, 20, 50000);
}
static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
{
return __raw_readl(priv->mdio + (offset * 4));
}
static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
{
__raw_writel(val, priv->mdio + (offset * 4));
}
static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
u32 offset)
{
u32 val = gswip_mdio_r(priv, offset);
val &= ~(clear);
val |= set;
gswip_mdio_w(priv, val, offset);
}
static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
{
return __raw_readl(priv->mii + (offset * 4));
}
static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
{
__raw_writel(val, priv->mii + (offset * 4));
}
static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
u32 offset)
{
u32 val = gswip_mii_r(priv, offset);
val &= ~(clear);
val |= set;
gswip_mii_w(priv, val, offset);
}
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
/* There's no MII_CFG register for the CPU port */
if (!dsa_is_cpu_port(priv->ds, port))
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
switch (port) {
case 0:
gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
break;
case 1:
gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
break;
case 5:
gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
break;
}
}
static int gswip_mdio_poll(struct gswip_priv *priv)
{
int cnt = 100;
while (likely(cnt--)) {
u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
return 0;
usleep_range(20, 40);
}
return -ETIMEDOUT;
}
static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct gswip_priv *priv = bus->priv;
int err;
err = gswip_mdio_poll(priv);
if (err) {
dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
return err;
}
gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
GSWIP_MDIO_CTRL);
return 0;
}
static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
{
struct gswip_priv *priv = bus->priv;
int err;
err = gswip_mdio_poll(priv);
if (err) {
dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
return err;
}
gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
(reg & GSWIP_MDIO_CTRL_REGAD_MASK),
GSWIP_MDIO_CTRL);
err = gswip_mdio_poll(priv);
if (err) {
dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
return err;
}
return gswip_mdio_r(priv, GSWIP_MDIO_READ);
}
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
int err;
ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
ds->slave_mii_bus->priv = priv;
ds->slave_mii_bus->read = gswip_mdio_rd;
ds->slave_mii_bus->write = gswip_mdio_wr;
ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
dev_name(priv->dev));
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
if (err)
mdiobus_free(ds->slave_mii_bus);
return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
struct gswip_pce_table_entry *tbl)
{
int i;
int err;
u16 crtl;
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
mutex_lock(&priv->pce_table_lock);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err) {
mutex_unlock(&priv->pce_table_lock);
return err;
}
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
GSWIP_PCE_TBL_CTRL);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err) {
mutex_unlock(&priv->pce_table_lock);
return err;
}
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
mutex_unlock(&priv->pce_table_lock);
return 0;
}
static int gswip_pce_table_entry_write(struct gswip_priv *priv,
struct gswip_pce_table_entry *tbl)
{
int i;
int err;
u16 crtl;
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
mutex_lock(&priv->pce_table_lock);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err) {
mutex_unlock(&priv->pce_table_lock);
return err;
}
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
tbl->table | addr_mode,
GSWIP_PCE_TBL_CTRL);
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
tbl->table | addr_mode,
GSWIP_PCE_TBL_CTRL);
gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
GSWIP_PCE_TBL_CTRL_GMAP_MASK);
if (tbl->type)
crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
if (tbl->valid)
crtl |= GSWIP_PCE_TBL_CTRL_VLD;
crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
crtl |= GSWIP_PCE_TBL_CTRL_BAS;
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
mutex_unlock(&priv->pce_table_lock);
return err;
}
/* Add the LAN port into a bridge with the CPU port by
* default. This prevents automatic forwarding of
* packages between the LAN ports when no explicit
* bridge is configured.
*/
static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
{
struct gswip_pce_table_entry vlan_active = {0,};
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int cpu_port = priv->hw_info->cpu_port;
unsigned int max_ports = priv->hw_info->max_ports;
int err;
if (port >= max_ports) {
dev_err(priv->dev, "single port for %i supported\n", port);
return -EIO;
}
vlan_active.index = port + 1;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.key[0] = 0; /* vid */
vlan_active.val[0] = port + 1 /* fid */;
vlan_active.valid = add;
err = gswip_pce_table_entry_write(priv, &vlan_active);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
return err;
}
if (!add)
return 0;
vlan_mapping.index = port + 1;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
vlan_mapping.val[0] = 0 /* vid */;
vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
vlan_mapping.val[2] = 0;
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
return err;
}
return 0;
}
static int gswip_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct gswip_priv *priv = ds->priv;
int err;
if (!dsa_is_user_port(ds, port))
return 0;
if (!dsa_is_cpu_port(ds, port)) {
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
}
/* RMON Counter Enable for port */
gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
/* enable port fetch/store dma & VLAN Modification */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
if (!dsa_is_cpu_port(ds, port)) {
u32 mdio_phy = 0;
if (phydev)
mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
return 0;
}
static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
if (!dsa_is_user_port(ds, port))
return;
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
GSWIP_SDMA_PCTRLp(port));
}
static int gswip_pce_load_microcode(struct gswip_priv *priv)
{
int i;
int err;
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
GSWIP_PCE_TBL_VAL(0));
gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
GSWIP_PCE_TBL_VAL(1));
gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
GSWIP_PCE_TBL_VAL(2));
gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
GSWIP_PCE_TBL_VAL(3));
/* start the table access: */
gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
GSWIP_PCE_TBL_CTRL);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
if (err)
return err;
}
/* tell the switch that the microcode is loaded */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
GSWIP_PCE_GCTRL_0);
return 0;
}
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
/* Do not allow changing the VLAN filtering options while in bridge */
if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
NL_SET_ERR_MSG_MOD(extack,
"Dynamic toggling of vlan_filtering not supported");
return -EIO;
}
if (vlan_filtering) {
/* Use port based VLAN tag */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_VSR,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR,
GSWIP_PCE_VCTRL(port));
gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
GSWIP_PCE_PCTRL_0p(port));
} else {
/* Use port based VLAN tag */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR,
GSWIP_PCE_VCTRL_VSR,
GSWIP_PCE_VCTRL(port));
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
GSWIP_PCE_PCTRL_0p(port));
}
return 0;
}
static int gswip_setup(struct dsa_switch *ds)
{
struct gswip_priv *priv = ds->priv;
unsigned int cpu_port = priv->hw_info->cpu_port;
int i;
int err;
gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
usleep_range(5000, 10000);
gswip_switch_w(priv, 0, GSWIP_SWRES);
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++) {
gswip_port_disable(ds, i);
gswip_port_vlan_filtering(ds, i, false, NULL);
}
/* enable Switch */
gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
err = gswip_pce_load_microcode(priv);
if (err) {
dev_err(priv->dev, "writing PCE microcode failed, %i", err);
return err;
}
/* Default unknown Broadcast/Multicast/Unicast port maps */
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
* interoperability problem with this auto polling mechanism because
* their status registers think that the link is in a different state
* than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
* as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
* auto polling state machine consider the link being negotiated with
* 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
* to the switch port being completely dead (RX and TX are both not
* working).
* Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
* GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
* it would work fine for a few minutes to hours and then stop, on
* other device it would no traffic could be sent or received at all.
* Testing shows that when PHY auto polling is disabled these problems
* go away.
*/
gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
/* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII interface and clear it's isolation bit */
for (i = 0; i < priv->hw_info->max_ports; i++)
gswip_mii_mask_cfg(priv,
GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
GSWIP_FDMA_PCTRLp(cpu_port));
/* accept special tag in ingress direction */
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
/* VLAN aware Switching */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
/* Flush MAC Table */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
GSWIP_PCE_GCTRL_0_MTFL);
if (err) {
dev_err(priv->dev, "MAC flushing didn't finish\n");
return err;
}
ds->mtu_enforcement_ingress = true;
gswip_port_enable(ds, cpu_port, NULL);
ds->configure_vlan_while_not_filtering = false;
return 0;
}
static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_GSWIP;
}
static int gswip_vlan_active_create(struct gswip_priv *priv,
struct net_device *bridge,
int fid, u16 vid)
{
struct gswip_pce_table_entry vlan_active = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
int idx = -1;
int err;
int i;
/* Look for a free slot */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (!priv->vlans[i].bridge) {
idx = i;
break;
}
}
if (idx == -1)
return -ENOSPC;
if (fid == -1)
fid = idx;
vlan_active.index = idx;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.key[0] = vid;
vlan_active.val[0] = fid;
vlan_active.valid = true;
err = gswip_pce_table_entry_write(priv, &vlan_active);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
return err;
}
priv->vlans[idx].bridge = bridge;
priv->vlans[idx].vid = vid;
priv->vlans[idx].fid = fid;
return idx;
}
static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
{
struct gswip_pce_table_entry vlan_active = {0,};
int err;
vlan_active.index = idx;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.valid = false;
err = gswip_pce_table_entry_write(priv, &vlan_active);
if (err)
dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
priv->vlans[idx].bridge = NULL;
return err;
}
static int gswip_vlan_add_unaware(struct gswip_priv *priv,
struct net_device *bridge, int port)
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
unsigned int cpu_port = priv->hw_info->cpu_port;
bool active_vlan_created = false;
int idx = -1;
int i;
int err;
/* Check if there is already a page for this bridge */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
idx = i;
break;
}
}
/* If this bridge is not programmed yet, add a Active VLAN table
* entry in a free slot and prepare the VLAN mapping table entry.
*/
if (idx == -1) {
idx = gswip_vlan_active_create(priv, bridge, -1, 0);
if (idx < 0)
return idx;
active_vlan_created = true;
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
/* VLAN ID byte, maps to the VLAN ID of vlan active table */
vlan_mapping.val[0] = 0;
} else {
/* Read the existing VLAN mapping entry from the switch */
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
err = gswip_pce_table_entry_read(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
err);
return err;
}
}
/* Update the VLAN mapping entry and write it to the switch */
vlan_mapping.val[1] |= BIT(cpu_port);
vlan_mapping.val[1] |= BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
/* In case an Active VLAN was creaetd delete it again */
if (active_vlan_created)
gswip_vlan_active_remove(priv, idx);
return err;
}
gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
return 0;
}
static int gswip_vlan_add_aware(struct gswip_priv *priv,
struct net_device *bridge, int port,
u16 vid, bool untagged,
bool pvid)
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
unsigned int cpu_port = priv->hw_info->cpu_port;
bool active_vlan_created = false;
int idx = -1;
int fid = -1;
int i;
int err;
/* Check if there is already a page for this bridge */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
if (fid != -1 && fid != priv->vlans[i].fid)
dev_err(priv->dev, "one bridge with multiple flow ids\n");
fid = priv->vlans[i].fid;
if (priv->vlans[i].vid == vid) {
idx = i;
break;
}
}
}
/* If this bridge is not programmed yet, add a Active VLAN table
* entry in a free slot and prepare the VLAN mapping table entry.
*/
if (idx == -1) {
idx = gswip_vlan_active_create(priv, bridge, fid, vid);
if (idx < 0)
return idx;
active_vlan_created = true;
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
/* VLAN ID byte, maps to the VLAN ID of vlan active table */
vlan_mapping.val[0] = vid;
} else {
/* Read the existing VLAN mapping entry from the switch */
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
err = gswip_pce_table_entry_read(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
err);
return err;
}
}
vlan_mapping.val[0] = vid;
/* Update the VLAN mapping entry and write it to the switch */
vlan_mapping.val[1] |= BIT(cpu_port);
vlan_mapping.val[2] |= BIT(cpu_port);
vlan_mapping.val[1] |= BIT(port);
if (untagged)
vlan_mapping.val[2] &= ~BIT(port);
else
vlan_mapping.val[2] |= BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
/* In case an Active VLAN was creaetd delete it again */
if (active_vlan_created)
gswip_vlan_active_remove(priv, idx);
return err;
}
if (pvid)
gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
return 0;
}
static int gswip_vlan_remove(struct gswip_priv *priv,
struct net_device *bridge, int port,
u16 vid, bool pvid, bool vlan_aware)
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
unsigned int cpu_port = priv->hw_info->cpu_port;
int idx = -1;
int i;
int err;
/* Check if there is already a page for this bridge */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge &&
(!vlan_aware || priv->vlans[i].vid == vid)) {
idx = i;
break;
}
}
if (idx == -1) {
dev_err(priv->dev, "bridge to leave does not exists\n");
return -ENOENT;
}
vlan_mapping.index = idx;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
err = gswip_pce_table_entry_read(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
return err;
}
vlan_mapping.val[1] &= ~BIT(port);
vlan_mapping.val[2] &= ~BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
return err;
}
/* In case all ports are removed from the bridge, remove the VLAN */
if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
err = gswip_vlan_active_remove(priv, idx);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n",
err);
return err;
}
}
/* GSWIP 2.2 (GRX300) and later program here the VID directly. */
if (pvid)
gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
return 0;
}
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
int err;
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
if (!br_vlan_enabled(br)) {
err = gswip_vlan_add_unaware(priv, br, port);
if (err)
return err;
priv->port_vlan_filter &= ~BIT(port);
} else {
priv->port_vlan_filter |= BIT(port);
}
return gswip_add_single_port_br(priv, port, false);
}
static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
gswip_add_single_port_br(priv, port, true);
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
if (!br_vlan_enabled(br))
gswip_vlan_remove(priv, br, port, 0, true, false);
}
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
unsigned int max_ports = priv->hw_info->max_ports;
int pos = max_ports;
int i, idx = -1;
/* We only support VLAN filtering on bridges */
if (!dsa_is_cpu_port(ds, port) && !bridge)
return -EOPNOTSUPP;
/* Check if there is already a page for this VLAN */
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge &&
priv->vlans[i].vid == vlan->vid) {
idx = i;
break;
}
}
/* If this VLAN is not programmed yet, we have to reserve
* one entry in the VLAN table. Make sure we start at the
* next position round.
*/
if (idx == -1) {
/* Look for a free slot */
for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
if (!priv->vlans[pos].bridge) {
idx = pos;
pos++;
break;
}
}
if (idx == -1) {
NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
return -ENOSPC;
}
}
return 0;
}
static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int err;
err = gswip_port_vlan_prepare(ds, port, vlan, extack);
if (err)
return err;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
* NULL and then we do not know for which bridge to configure
* this.
*/
if (dsa_is_cpu_port(ds, port))
return 0;
return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
untagged, pvid);
}
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
/* We have to receive all packets on the CPU port and should not
* do any VLAN filtering here. This is also called with bridge
* NULL and then we do not know for which bridge to configure
* this.
*/
if (dsa_is_cpu_port(ds, port))
return 0;
return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
}
static void gswip_port_fast_age(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
int i;
int err;
for (i = 0; i < 2048; i++) {
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
mac_bridge.index = i;
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
dev_err(priv->dev, "failed to read mac bridge: %d\n",
err);
return;
}
if (!mac_bridge.valid)
continue;
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
continue;
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
continue;
mac_bridge.valid = false;
err = gswip_pce_table_entry_write(priv, &mac_bridge);
if (err) {
dev_err(priv->dev, "failed to write mac bridge: %d\n",
err);
return;
}
}
}
static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct gswip_priv *priv = ds->priv;
u32 stp_state;
switch (state) {
case BR_STATE_DISABLED:
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
GSWIP_SDMA_PCTRLp(port));
return;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
break;
case BR_STATE_LEARNING:
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
break;
case BR_STATE_FORWARDING:
stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
break;
default:
dev_err(priv->dev, "invalid STP state: %d\n", state);
return;
}
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
GSWIP_PCE_PCTRL_0p(port));
}
static int gswip_port_fdb(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid, bool add)
{
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
int fid = -1;
int i;
int err;
if (!bridge)
return -EINVAL;
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
fid = priv->vlans[i].fid;
break;
}
}
if (fid == -1) {
dev_err(priv->dev, "Port not part of a bridge\n");
return -EINVAL;
}
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
mac_bridge.key_mode = true;
mac_bridge.key[0] = addr[5] | (addr[4] << 8);
mac_bridge.key[1] = addr[3] | (addr[2] << 8);
mac_bridge.key[2] = addr[1] | (addr[0] << 8);
mac_bridge.key[3] = fid;
mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
mac_bridge.valid = add;
err = gswip_pce_table_entry_write(priv, &mac_bridge);
if (err)
dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
return err;
}
static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, true);
}
static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, false);
}
static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
unsigned char addr[6];
int i;
int err;
for (i = 0; i < 2048; i++) {
mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
mac_bridge.index = i;
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
dev_err(priv->dev,
"failed to read mac bridge entry %d: %d\n",
i, err);
return err;
}
if (!mac_bridge.valid)
continue;
addr[5] = mac_bridge.key[0] & 0xff;
addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
addr[3] = mac_bridge.key[1] & 0xff;
addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
addr[1] = mac_bridge.key[2] & 0xff;
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
if (mac_bridge.val[0] & BIT(port)) {
err = cb(addr, 0, true, data);
if (err)
return err;
}
} else {
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
err = cb(addr, 0, false, data);
if (err)
return err;
}
}
}
return 0;
}
static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
{
/* Includes 8 bytes for special header. */
return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct gswip_priv *priv = ds->priv;
int cpu_port = priv->hw_info->cpu_port;
/* CPU port always has maximum mtu of user ports, so use it to set
* switch frame size, including 8 byte special header.
*/
if (port == cpu_port) {
new_mtu += 8;
gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
GSWIP_MAC_FLEN);
}
/* Enable MLEN for ports with non-standard MTUs, including the special
* header on the CPU port added above.
*/
if (new_mtu != ETH_DATA_LEN)
gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
GSWIP_MAC_CTRL_2p(port));
else
gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
GSWIP_MAC_CTRL_2p(port));
return 0;
}
static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0:
case 1:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
break;
case 2:
case 3:
case 4:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
case 5:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
}
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
}
static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
break;
case 1:
case 2:
case 3:
case 4:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
case 5:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
break;
}
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
}
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
{
u32 mdio_phy;
if (link)
mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
else
mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
phy_interface_t interface)
{
u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
switch (speed) {
case SPEED_10:
mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
if (interface == PHY_INTERFACE_MODE_RMII)
mii_cfg = GSWIP_MII_CFG_RATE_M50;
else
mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
break;
case SPEED_100:
mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
if (interface == PHY_INTERFACE_MODE_RMII)
mii_cfg = GSWIP_MII_CFG_RATE_M50;
else
mii_cfg = GSWIP_MII_CFG_RATE_M25;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
break;
case SPEED_1000:
mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
mii_cfg = GSWIP_MII_CFG_RATE_M125;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
break;
}
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
GSWIP_MAC_CTRL_0p(port));
}
static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
{
u32 mac_ctrl_0, mdio_phy;
if (duplex == DUPLEX_FULL) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
} else {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
}
gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
GSWIP_MAC_CTRL_0p(port));
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
static void gswip_port_set_pause(struct gswip_priv *priv, int port,
bool tx_pause, bool rx_pause)
{
u32 mac_ctrl_0, mdio_phy;
if (tx_pause && rx_pause) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
GSWIP_MDIO_PHY_FCONRX_EN;
} else if (tx_pause) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
GSWIP_MDIO_PHY_FCONRX_DIS;
} else if (rx_pause) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
GSWIP_MDIO_PHY_FCONRX_EN;
} else {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
GSWIP_MDIO_PHY_FCONRX_DIS;
}
gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
gswip_mdio_mask(priv,
GSWIP_MDIO_PHY_FCONTX_MASK |
GSWIP_MDIO_PHY_FCONRX_MASK,
mdio_phy, GSWIP_MDIO_PHYp(port));
}
static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct gswip_priv *priv = ds->priv;
u32 miicfg = 0;
miicfg |= GSWIP_MII_CFG_LDCLKDIS;
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_INTERNAL:
miicfg |= GSWIP_MII_CFG_MODE_MIIM;
break;
case PHY_INTERFACE_MODE_REVMII:
miicfg |= GSWIP_MII_CFG_MODE_MIIP;
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
miicfg |= GSWIP_MII_CFG_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_GMII:
miicfg |= GSWIP_MII_CFG_MODE_GMII;
break;
default:
dev_err(ds->dev,
"Unsupported interface: %d\n", state->interface);
return;
}
gswip_mii_mask_cfg(priv,
GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
miicfg, port);
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
break;
default:
break;
}
}
static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct gswip_priv *priv = ds->priv;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
if (!dsa_is_cpu_port(ds, port))
gswip_port_set_link(priv, port, false);
}
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct gswip_priv *priv = ds->priv;
if (!dsa_is_cpu_port(ds, port)) {
gswip_port_set_link(priv, port, true);
gswip_port_set_speed(priv, port, speed, interface);
gswip_port_set_duplex(priv, port, duplex);
gswip_port_set_pause(priv, port, tx_pause, rx_pause);
}
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
ETH_GSTRING_LEN);
}
static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
u32 index)
{
u32 result;
int err;
gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
GSWIP_BM_RAM_CTRL_OPMOD,
table | GSWIP_BM_RAM_CTRL_BAS,
GSWIP_BM_RAM_CTRL);
err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
GSWIP_BM_RAM_CTRL_BAS);
if (err) {
dev_err(priv->dev, "timeout while reading table: %u, index: %u",
table, index);
return 0;
}
result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
return result;
}
static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct gswip_priv *priv = ds->priv;
const struct gswip_rmon_cnt_desc *rmon_cnt;
int i;
u64 high;
for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
rmon_cnt = &gswip_rmon_cnt[i];
data[i] = gswip_bcm_ram_entry_read(priv, port,
rmon_cnt->offset);
if (rmon_cnt->size == 2) {
high = gswip_bcm_ram_entry_read(priv, port,
rmon_cnt->offset + 1);
data[i] |= high << 32;
}
}
}
static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return 0;
return ARRAY_SIZE(gswip_rmon_cnt);
}
static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
.port_bridge_join = gswip_port_bridge_join,
.port_bridge_leave = gswip_port_bridge_leave,
.port_fast_age = gswip_port_fast_age,
.port_vlan_filtering = gswip_port_vlan_filtering,
.port_vlan_add = gswip_port_vlan_add,
.port_vlan_del = gswip_port_vlan_del,
.port_stp_state_set = gswip_port_stp_state_set,
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
};
static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
.port_bridge_join = gswip_port_bridge_join,
.port_bridge_leave = gswip_port_bridge_leave,
.port_fast_age = gswip_port_fast_age,
.port_vlan_filtering = gswip_port_vlan_filtering,
.port_vlan_add = gswip_port_vlan_add,
.port_vlan_del = gswip_port_vlan_del,
.port_stp_state_set = gswip_port_stp_state_set,
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
};
static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
.fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
.ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
};
static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
.fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
.ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
};
static const struct xway_gphy_match_data xrx300_gphy_data = {
.fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
.ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
};
static const struct of_device_id xway_gphy_match[] __maybe_unused = {
{ .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
{ .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
{ .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
{ .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
{ .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
{},
};
static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
{
struct device *dev = priv->dev;
const struct firmware *fw;
void *fw_addr;
dma_addr_t dma_addr;
dma_addr_t dev_addr;
size_t size;
int ret;
ret = clk_prepare_enable(gphy_fw->clk_gate);
if (ret)
return ret;
reset_control_assert(gphy_fw->reset);
/* The vendor BSP uses a 200ms delay after asserting the reset line.
* Without this some users are observing that the PHY is not coming up
* on the MDIO bus.
*/
msleep(200);
ret = request_firmware(&fw, gphy_fw->fw_name, dev);
if (ret) {
dev_err(dev, "failed to load firmware: %s, error: %i\n",
gphy_fw->fw_name, ret);
return ret;
}
/* GPHY cores need the firmware code in a persistent and contiguous
* memory area with a 16 kB boundary aligned start address.
*/
size = fw->size + XRX200_GPHY_FW_ALIGN;
fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
if (fw_addr) {
fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
memcpy(fw_addr, fw->data, fw->size);
} else {
dev_err(dev, "failed to alloc firmware memory\n");
release_firmware(fw);
return -ENOMEM;
}
release_firmware(fw);
ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
if (ret)
return ret;
reset_control_deassert(gphy_fw->reset);
return ret;
}
static int gswip_gphy_fw_probe(struct gswip_priv *priv,
struct gswip_gphy_fw *gphy_fw,
struct device_node *gphy_fw_np, int i)
{
struct device *dev = priv->dev;
u32 gphy_mode;
int ret;
char gphyname[10];
snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
if (IS_ERR(gphy_fw->clk_gate)) {
dev_err(dev, "Failed to lookup gate clock\n");
return PTR_ERR(gphy_fw->clk_gate);
}
ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
if (ret)
return ret;
ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
/* Default to GE mode */
if (ret)
gphy_mode = GPHY_MODE_GE;
switch (gphy_mode) {
case GPHY_MODE_FE:
gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
break;
case GPHY_MODE_GE:
gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
break;
default:
dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
return -EINVAL;
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
if (IS_ERR(gphy_fw->reset))
return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
"Failed to lookup gphy reset\n");
return gswip_gphy_fw_load(priv, gphy_fw);
}
static void gswip_gphy_fw_remove(struct gswip_priv *priv,
struct gswip_gphy_fw *gphy_fw)
{
int ret;
/* check if the device was fully probed */
if (!gphy_fw->fw_name)
return;
ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
if (ret)
dev_err(priv->dev, "can not reset GPHY FW pointer");
clk_disable_unprepare(gphy_fw->clk_gate);
reset_control_put(gphy_fw->reset);
}
static int gswip_gphy_fw_list(struct gswip_priv *priv,
struct device_node *gphy_fw_list_np, u32 version)
{
struct device *dev = priv->dev;
struct device_node *gphy_fw_np;
const struct of_device_id *match;
int err;
int i = 0;
/* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
* GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
* needs a different GPHY firmware.
*/
if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
switch (version) {
case GSWIP_VERSION_2_0:
priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
break;
case GSWIP_VERSION_2_1:
priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
break;
default:
dev_err(dev, "unknown GSWIP version: 0x%x", version);
return -ENOENT;
}
}
match = of_match_node(xway_gphy_match, gphy_fw_list_np);
if (match && match->data)
priv->gphy_fw_name_cfg = match->data;
if (!priv->gphy_fw_name_cfg) {
dev_err(dev, "GPHY compatible type not supported");
return -ENOENT;
}
priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
if (!priv->num_gphy_fw)
return -ENOENT;
priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
"lantiq,rcu");
if (IS_ERR(priv->rcu_regmap))
return PTR_ERR(priv->rcu_regmap);
priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
sizeof(*priv->gphy_fw),
GFP_KERNEL | __GFP_ZERO);
if (!priv->gphy_fw)
return -ENOMEM;
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
if (err) {
of_node_put(gphy_fw_np);
goto remove_gphy;
}
i++;
}
/* The standalone PHY11G requires 300ms to be fully
* initialized and ready for any MDIO communication after being
* taken out of reset. For the SoC-internal GPHY variant there
* is no (known) documentation for the minimum time after a
* reset. Use the same value as for the standalone variant as
* some users have reported internal PHYs not being detected
* without any delay.
*/
msleep(300);
return 0;
remove_gphy:
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
return err;
}
static int gswip_probe(struct platform_device *pdev)
{
struct gswip_priv *priv;
struct device_node *np, *mdio_np, *gphy_fw_np;
struct device *dev = &pdev->dev;
int err;
int i;
u32 version;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->gswip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->gswip))
return PTR_ERR(priv->gswip);
priv->mdio = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->mdio))
return PTR_ERR(priv->mdio);
priv->mii = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(priv->mii))
return PTR_ERR(priv->mii);
priv->hw_info = of_device_get_match_data(dev);
if (!priv->hw_info)
return -EINVAL;
priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = dev;
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = priv->hw_info->ops;
priv->dev = dev;
mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
np = dev->of_node;
switch (version) {
case GSWIP_VERSION_2_0:
case GSWIP_VERSION_2_1:
if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
return -EINVAL;
break;
case GSWIP_VERSION_2_2:
case GSWIP_VERSION_2_2_ETC:
if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
!of_device_is_compatible(np, "lantiq,xrx330-gswip"))
return -EINVAL;
break;
default:
dev_err(dev, "unknown GSWIP version: 0x%x", version);
return -ENOENT;
}
/* bring up the mdio bus */
gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
if (gphy_fw_np) {
err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
of_node_put(gphy_fw_np);
if (err) {
dev_err(dev, "gphy fw probe failed\n");
return err;
}
}
/* bring up the mdio bus */
mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
if (mdio_np) {
err = gswip_mdio(priv, mdio_np);
if (err) {
dev_err(dev, "mdio probe failed\n");
goto put_mdio_node;
}
}
err = dsa_register_switch(priv->ds);
if (err) {
dev_err(dev, "dsa switch register failed: %i\n", err);
goto mdio_bus;
}
if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
priv->hw_info->cpu_port);
err = -EINVAL;
goto disable_switch;
}
platform_set_drvdata(pdev, priv);
dev_info(dev, "probed GSWIP version %lx mod %lx\n",
(version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
(version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
return 0;
disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
mdiobus_free(priv->ds->slave_mii_bus);
}
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
return err;
}
static int gswip_remove(struct platform_device *pdev)
{
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
if (!priv)
return 0;
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
return 0;
}
static void gswip_shutdown(struct platform_device *pdev)
{
struct gswip_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
platform_set_drvdata(pdev, NULL);
}
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
.cpu_port = 6,
.ops = &gswip_xrx200_switch_ops,
};
static const struct gswip_hw_info gswip_xrx300 = {
.max_ports = 7,
.cpu_port = 6,
.ops = &gswip_xrx300_switch_ops,
};
static const struct of_device_id gswip_of_match[] = {
{ .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
{ .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
{ .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
{},
};
MODULE_DEVICE_TABLE(of, gswip_of_match);
static struct platform_driver gswip_driver = {
.probe = gswip_probe,
.remove = gswip_remove,
.shutdown = gswip_shutdown,
.driver = {
.name = "gswip",
.of_match_table = gswip_of_match,
},
};
module_platform_driver(gswip_driver);
MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
MODULE_AUTHOR("Hauke Mehrtens <[email protected]>");
MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/lantiq_gswip.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/gpio/consumer.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/pcs/pcs-mtk-lynxi.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
#include <net/dsa.h>
#include "mt7530.h"
static int
mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
{
struct mii_bus *bus = context;
u16 page, r, lo, hi;
int ret;
page = (reg >> 6) & 0x3ff;
r = (reg >> 2) & 0xf;
lo = val & 0xffff;
hi = val >> 16;
/* MT7530 uses 31 as the pseudo port */
ret = bus->write(bus, 0x1f, 0x1f, page);
if (ret < 0)
return ret;
ret = bus->write(bus, 0x1f, r, lo);
if (ret < 0)
return ret;
ret = bus->write(bus, 0x1f, 0x10, hi);
return ret;
}
static int
mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val)
{
struct mii_bus *bus = context;
u16 page, r, lo, hi;
int ret;
page = (reg >> 6) & 0x3ff;
r = (reg >> 2) & 0xf;
/* MT7530 uses 31 as the pseudo port */
ret = bus->write(bus, 0x1f, 0x1f, page);
if (ret < 0)
return ret;
lo = bus->read(bus, 0x1f, r);
hi = bus->read(bus, 0x1f, 0x10);
*val = (hi << 16) | (lo & 0xffff);
return 0;
}
static void
mt7530_mdio_regmap_lock(void *mdio_lock)
{
mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED);
}
static void
mt7530_mdio_regmap_unlock(void *mdio_lock)
{
mutex_unlock(mdio_lock);
}
static const struct regmap_bus mt7530_regmap_bus = {
.reg_write = mt7530_regmap_write,
.reg_read = mt7530_regmap_read,
};
static int
mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
{
struct regmap_config *mt7531_pcs_config[2] = {};
struct phylink_pcs *pcs;
struct regmap *regmap;
int i, ret = 0;
/* MT7531AE has two SGMII units for port 5 and port 6
* MT7531BE has only one SGMII unit for port 6
*/
for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
sizeof(struct regmap_config),
GFP_KERNEL);
if (!mt7531_pcs_config[i]) {
ret = -ENOMEM;
break;
}
mt7531_pcs_config[i]->name = i ? "port6" : "port5";
mt7531_pcs_config[i]->reg_bits = 16;
mt7531_pcs_config[i]->val_bits = 32;
mt7531_pcs_config[i]->reg_stride = 4;
mt7531_pcs_config[i]->reg_base = MT7531_SGMII_REG_BASE(5 + i);
mt7531_pcs_config[i]->max_register = 0x17c;
mt7531_pcs_config[i]->lock = mt7530_mdio_regmap_lock;
mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock;
mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock;
regmap = devm_regmap_init(priv->dev,
&mt7530_regmap_bus, priv->bus,
mt7531_pcs_config[i]);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
break;
}
pcs = mtk_pcs_lynxi_create(priv->dev, regmap,
MT7531_PHYA_CTRL_SIGNAL3, 0);
if (!pcs) {
ret = -ENXIO;
break;
}
priv->ports[5 + i].sgmii_pcs = pcs;
}
if (ret && i)
mtk_pcs_lynxi_destroy(priv->ports[5].sgmii_pcs);
return ret;
}
static const struct of_device_id mt7530_of_match[] = {
{ .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
{ .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
{ .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
static int
mt7530_probe(struct mdio_device *mdiodev)
{
static struct regmap_config *regmap_config;
struct mt7530_priv *priv;
struct device_node *dn;
int ret;
dn = mdiodev->dev.of_node;
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
ret = mt7530_probe_common(priv);
if (ret)
return ret;
/* Use medatek,mcm property to distinguish hardware type that would
* cause a little bit differences on power-on sequence.
* Not MCM that indicates switch works as the remote standalone
* integrated circuit so the GPIO pin would be used to complete
* the reset, otherwise memory-mapped register accessing used
* through syscon provides in the case of MCM.
*/
priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
if (priv->mcm) {
dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
if (IS_ERR(priv->rstc)) {
dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
return PTR_ERR(priv->rstc);
}
} else {
priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
return PTR_ERR(priv->reset);
}
}
if (priv->id == ID_MT7530) {
priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
if (IS_ERR(priv->core_pwr))
return PTR_ERR(priv->core_pwr);
priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
if (IS_ERR(priv->io_pwr))
return PTR_ERR(priv->io_pwr);
}
regmap_config = devm_kzalloc(&mdiodev->dev, sizeof(*regmap_config),
GFP_KERNEL);
if (!regmap_config)
return -ENOMEM;
regmap_config->reg_bits = 16;
regmap_config->val_bits = 32;
regmap_config->reg_stride = 4;
regmap_config->max_register = MT7530_CREV;
regmap_config->disable_locking = true;
priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus,
priv->bus, regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
if (priv->id == ID_MT7531)
priv->create_sgmii = mt7531_create_sgmii;
return dsa_register_switch(priv->ds);
}
static void
mt7530_remove(struct mdio_device *mdiodev)
{
struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
int ret = 0, i;
if (!priv)
return;
ret = regulator_disable(priv->core_pwr);
if (ret < 0)
dev_err(priv->dev,
"Failed to disable core power: %d\n", ret);
ret = regulator_disable(priv->io_pwr);
if (ret < 0)
dev_err(priv->dev, "Failed to disable io pwr: %d\n",
ret);
mt7530_remove_common(priv);
for (i = 0; i < 2; ++i)
mtk_pcs_lynxi_destroy(priv->ports[5 + i].sgmii_pcs);
}
static void mt7530_shutdown(struct mdio_device *mdiodev)
{
struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver mt7530_mdio_driver = {
.probe = mt7530_probe,
.remove = mt7530_remove,
.shutdown = mt7530_shutdown,
.mdiodrv.driver = {
.name = "mt7530-mdio",
.of_match_table = mt7530_of_match,
},
};
mdio_module_driver(mt7530_mdio_driver);
MODULE_AUTHOR("Sean Wang <[email protected]>");
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch (MDIO)");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/mt7530-mdio.c |
// SPDX-License-Identifier: GPL-2.0
/* DSA driver for:
* Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
*
* This driver takes control of the switch chip connected over CPU-attached
* address bus and configures it to route packages around when connected to
* a CPU port.
*
* Copyright (C) 2019 Pawel Dembicki <[email protected]>
* Based on vitesse-vsc-spi.c by:
* Copyright (C) 2018 Linus Wallej <[email protected]>
* Includes portions of code from the firmware uploader by:
* Copyright (C) 2009 Gabor Juhos <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "vitesse-vsc73xx.h"
#define VSC73XX_CMD_PLATFORM_BLOCK_SHIFT 14
#define VSC73XX_CMD_PLATFORM_BLOCK_MASK 0x7
#define VSC73XX_CMD_PLATFORM_SUBBLOCK_SHIFT 10
#define VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK 0xf
#define VSC73XX_CMD_PLATFORM_REGISTER_SHIFT 2
/*
* struct vsc73xx_platform - VSC73xx Platform state container
*/
struct vsc73xx_platform {
struct platform_device *pdev;
void __iomem *base_addr;
struct vsc73xx vsc;
};
static const struct vsc73xx_ops vsc73xx_platform_ops;
static u32 vsc73xx_make_addr(u8 block, u8 subblock, u8 reg)
{
u32 ret;
ret = (block & VSC73XX_CMD_PLATFORM_BLOCK_MASK)
<< VSC73XX_CMD_PLATFORM_BLOCK_SHIFT;
ret |= (subblock & VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK)
<< VSC73XX_CMD_PLATFORM_SUBBLOCK_SHIFT;
ret |= reg << VSC73XX_CMD_PLATFORM_REGISTER_SHIFT;
return ret;
}
static int vsc73xx_platform_read(struct vsc73xx *vsc, u8 block, u8 subblock,
u8 reg, u32 *val)
{
struct vsc73xx_platform *vsc_platform = vsc->priv;
u32 offset;
if (!vsc73xx_is_addr_valid(block, subblock))
return -EINVAL;
offset = vsc73xx_make_addr(block, subblock, reg);
/* By default vsc73xx running in big-endian mode.
* (See "Register Addressing" section 5.5.3 in the VSC7385 manual.)
*/
*val = ioread32be(vsc_platform->base_addr + offset);
return 0;
}
static int vsc73xx_platform_write(struct vsc73xx *vsc, u8 block, u8 subblock,
u8 reg, u32 val)
{
struct vsc73xx_platform *vsc_platform = vsc->priv;
u32 offset;
if (!vsc73xx_is_addr_valid(block, subblock))
return -EINVAL;
offset = vsc73xx_make_addr(block, subblock, reg);
iowrite32be(val, vsc_platform->base_addr + offset);
return 0;
}
static int vsc73xx_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vsc73xx_platform *vsc_platform;
int ret;
vsc_platform = devm_kzalloc(dev, sizeof(*vsc_platform), GFP_KERNEL);
if (!vsc_platform)
return -ENOMEM;
platform_set_drvdata(pdev, vsc_platform);
vsc_platform->pdev = pdev;
vsc_platform->vsc.dev = dev;
vsc_platform->vsc.priv = vsc_platform;
vsc_platform->vsc.ops = &vsc73xx_platform_ops;
/* obtain I/O memory space */
vsc_platform->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vsc_platform->base_addr)) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
return ret;
}
return vsc73xx_probe(&vsc_platform->vsc);
}
static int vsc73xx_platform_remove(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
if (!vsc_platform)
return 0;
vsc73xx_remove(&vsc_platform->vsc);
return 0;
}
static void vsc73xx_platform_shutdown(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
if (!vsc_platform)
return;
vsc73xx_shutdown(&vsc_platform->vsc);
platform_set_drvdata(pdev, NULL);
}
static const struct vsc73xx_ops vsc73xx_platform_ops = {
.read = vsc73xx_platform_read,
.write = vsc73xx_platform_write,
};
static const struct of_device_id vsc73xx_of_match[] = {
{
.compatible = "vitesse,vsc7385",
},
{
.compatible = "vitesse,vsc7388",
},
{
.compatible = "vitesse,vsc7395",
},
{
.compatible = "vitesse,vsc7398",
},
{ },
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct platform_driver vsc73xx_platform_driver = {
.probe = vsc73xx_platform_probe,
.remove = vsc73xx_platform_remove,
.shutdown = vsc73xx_platform_shutdown,
.driver = {
.name = "vsc73xx-platform",
.of_match_table = vsc73xx_of_match,
},
};
module_platform_driver(vsc73xx_platform_driver);
MODULE_AUTHOR("Pawel Dembicki <[email protected]>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 Platform driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/vitesse-vsc73xx-platform.c |
// SPDX-License-Identifier: GPL-2.0
/* DSA driver for:
* Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
*
* This driver takes control of the switch chip over SPI and
* configures it to route packages around when connected to a CPU port.
*
* Copyright (C) 2018 Linus Wallej <[email protected]>
* Includes portions of code from the firmware uploader by:
* Copyright (C) 2009 Gabor Juhos <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#include "vitesse-vsc73xx.h"
#define VSC73XX_CMD_SPI_MODE_READ 0
#define VSC73XX_CMD_SPI_MODE_WRITE 1
#define VSC73XX_CMD_SPI_MODE_SHIFT 4
#define VSC73XX_CMD_SPI_BLOCK_SHIFT 5
#define VSC73XX_CMD_SPI_BLOCK_MASK 0x7
#define VSC73XX_CMD_SPI_SUBBLOCK_MASK 0xf
/*
* struct vsc73xx_spi - VSC73xx SPI state container
*/
struct vsc73xx_spi {
struct spi_device *spi;
struct mutex lock; /* Protects SPI traffic */
struct vsc73xx vsc;
};
static const struct vsc73xx_ops vsc73xx_spi_ops;
static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
{
u8 ret;
ret =
(block & VSC73XX_CMD_SPI_BLOCK_MASK) << VSC73XX_CMD_SPI_BLOCK_SHIFT;
ret |= (mode & 1) << VSC73XX_CMD_SPI_MODE_SHIFT;
ret |= subblock & VSC73XX_CMD_SPI_SUBBLOCK_MASK;
return ret;
}
static int vsc73xx_spi_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val)
{
struct vsc73xx_spi *vsc_spi = vsc->priv;
struct spi_transfer t[2];
struct spi_message m;
u8 cmd[4];
u8 buf[4];
int ret;
if (!vsc73xx_is_addr_valid(block, subblock))
return -EINVAL;
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t[0].tx_buf = cmd;
t[0].len = sizeof(cmd);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = sizeof(buf);
spi_message_add_tail(&t[1], &m);
cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_READ, block, subblock);
cmd[1] = reg;
cmd[2] = 0;
cmd[3] = 0;
mutex_lock(&vsc_spi->lock);
ret = spi_sync(vsc_spi->spi, &m);
mutex_unlock(&vsc_spi->lock);
if (ret)
return ret;
*val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
return 0;
}
static int vsc73xx_spi_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 val)
{
struct vsc73xx_spi *vsc_spi = vsc->priv;
struct spi_transfer t[2];
struct spi_message m;
u8 cmd[2];
u8 buf[4];
int ret;
if (!vsc73xx_is_addr_valid(block, subblock))
return -EINVAL;
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t[0].tx_buf = cmd;
t[0].len = sizeof(cmd);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
t[1].len = sizeof(buf);
spi_message_add_tail(&t[1], &m);
cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_WRITE, block, subblock);
cmd[1] = reg;
buf[0] = (val >> 24) & 0xff;
buf[1] = (val >> 16) & 0xff;
buf[2] = (val >> 8) & 0xff;
buf[3] = val & 0xff;
mutex_lock(&vsc_spi->lock);
ret = spi_sync(vsc_spi->spi, &m);
mutex_unlock(&vsc_spi->lock);
return ret;
}
static int vsc73xx_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct vsc73xx_spi *vsc_spi;
int ret;
vsc_spi = devm_kzalloc(dev, sizeof(*vsc_spi), GFP_KERNEL);
if (!vsc_spi)
return -ENOMEM;
spi_set_drvdata(spi, vsc_spi);
vsc_spi->spi = spi_dev_get(spi);
vsc_spi->vsc.dev = dev;
vsc_spi->vsc.priv = vsc_spi;
vsc_spi->vsc.ops = &vsc73xx_spi_ops;
mutex_init(&vsc_spi->lock);
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(dev, "spi setup failed.\n");
return ret;
}
return vsc73xx_probe(&vsc_spi->vsc);
}
static void vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
return;
vsc73xx_remove(&vsc_spi->vsc);
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
return;
vsc73xx_shutdown(&vsc_spi->vsc);
spi_set_drvdata(spi, NULL);
}
static const struct vsc73xx_ops vsc73xx_spi_ops = {
.read = vsc73xx_spi_read,
.write = vsc73xx_spi_write,
};
static const struct of_device_id vsc73xx_of_match[] = {
{
.compatible = "vitesse,vsc7385",
},
{
.compatible = "vitesse,vsc7388",
},
{
.compatible = "vitesse,vsc7395",
},
{
.compatible = "vitesse,vsc7398",
},
{ },
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static const struct spi_device_id vsc73xx_spi_ids[] = {
{ "vsc7385" },
{ "vsc7388" },
{ "vsc7395" },
{ "vsc7398" },
{ },
};
MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown,
.id_table = vsc73xx_spi_ids,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
},
};
module_spi_driver(vsc73xx_spi_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 SPI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/vitesse-vsc73xx-spi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Broadcom Starfighter 2 DSA switch CFP support
*
* Copyright (C) 2016, Broadcom
*/
#include <linux/list.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/bitmap.h>
#include <net/flow_offload.h>
#include <net/switchdev.h>
#include <uapi/linux/if_bridge.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
struct cfp_rule {
int port;
struct ethtool_rx_flow_spec fs;
struct list_head next;
};
struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE];
u32 mask_value;
u32 base_offset;
};
struct cfp_udf_layout {
struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
};
static const u8 zero_slice[UDFS_PER_SLICE] = { };
/* UDF slices layout for a TCPv4/UDPv4 specification */
static const struct cfp_udf_layout udf_tcpip4_layout = {
.udfs = {
[1] = {
.slices = {
/* End of L2, byte offset 12, src IP[0:15] */
CFG_UDF_EOL2 | 6,
/* End of L2, byte offset 14, src IP[16:31] */
CFG_UDF_EOL2 | 7,
/* End of L2, byte offset 16, dst IP[0:15] */
CFG_UDF_EOL2 | 8,
/* End of L2, byte offset 18, dst IP[16:31] */
CFG_UDF_EOL2 | 9,
/* End of L3, byte offset 0, src port */
CFG_UDF_EOL3 | 0,
/* End of L3, byte offset 2, dst port */
CFG_UDF_EOL3 | 1,
0, 0, 0
},
.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
.base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
},
},
};
/* UDF slices layout for a TCPv6/UDPv6 specification */
static const struct cfp_udf_layout udf_tcpip6_layout = {
.udfs = {
[0] = {
.slices = {
/* End of L2, byte offset 8, src IP[0:15] */
CFG_UDF_EOL2 | 4,
/* End of L2, byte offset 10, src IP[16:31] */
CFG_UDF_EOL2 | 5,
/* End of L2, byte offset 12, src IP[32:47] */
CFG_UDF_EOL2 | 6,
/* End of L2, byte offset 14, src IP[48:63] */
CFG_UDF_EOL2 | 7,
/* End of L2, byte offset 16, src IP[64:79] */
CFG_UDF_EOL2 | 8,
/* End of L2, byte offset 18, src IP[80:95] */
CFG_UDF_EOL2 | 9,
/* End of L2, byte offset 20, src IP[96:111] */
CFG_UDF_EOL2 | 10,
/* End of L2, byte offset 22, src IP[112:127] */
CFG_UDF_EOL2 | 11,
/* End of L3, byte offset 0, src port */
CFG_UDF_EOL3 | 0,
},
.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
.base_offset = CORE_UDF_0_B_0_8_PORT_0,
},
[3] = {
.slices = {
/* End of L2, byte offset 24, dst IP[0:15] */
CFG_UDF_EOL2 | 12,
/* End of L2, byte offset 26, dst IP[16:31] */
CFG_UDF_EOL2 | 13,
/* End of L2, byte offset 28, dst IP[32:47] */
CFG_UDF_EOL2 | 14,
/* End of L2, byte offset 30, dst IP[48:63] */
CFG_UDF_EOL2 | 15,
/* End of L2, byte offset 32, dst IP[64:79] */
CFG_UDF_EOL2 | 16,
/* End of L2, byte offset 34, dst IP[80:95] */
CFG_UDF_EOL2 | 17,
/* End of L2, byte offset 36, dst IP[96:111] */
CFG_UDF_EOL2 | 18,
/* End of L2, byte offset 38, dst IP[112:127] */
CFG_UDF_EOL2 | 19,
/* End of L3, byte offset 2, dst port */
CFG_UDF_EOL3 | 1,
},
.mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
.base_offset = CORE_UDF_0_D_0_11_PORT_0,
},
},
};
static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
{
unsigned int i, count = 0;
for (i = 0; i < UDFS_PER_SLICE; i++) {
if (layout[i] != 0)
count++;
}
return count;
}
static inline u32 udf_upper_bits(int num_udf)
{
return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
}
static inline u32 udf_lower_bits(int num_udf)
{
return (u8)GENMASK(num_udf - 1, 0);
}
static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
unsigned int start)
{
const struct cfp_udf_slice_layout *slice_layout;
unsigned int slice_idx;
for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
slice_layout = &l->udfs[slice_idx];
if (memcmp(slice_layout->slices, zero_slice,
sizeof(zero_slice)))
break;
}
return slice_idx;
}
static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
const struct cfp_udf_layout *layout,
unsigned int slice_num)
{
u32 offset = layout->udfs[slice_num].base_offset;
unsigned int i;
for (i = 0; i < UDFS_PER_SLICE; i++)
core_writel(priv, layout->udfs[slice_num].slices[i],
offset + i * 4);
}
static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
{
unsigned int timeout = 1000;
u32 reg;
reg = core_readl(priv, CORE_CFP_ACC);
reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
reg |= OP_STR_DONE | op;
core_writel(priv, reg, CORE_CFP_ACC);
do {
reg = core_readl(priv, CORE_CFP_ACC);
if (!(reg & OP_STR_DONE))
break;
cpu_relax();
} while (timeout--);
if (!timeout)
return -ETIMEDOUT;
return 0;
}
static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
unsigned int addr)
{
u32 reg;
WARN_ON(addr >= priv->num_cfp_rules);
reg = core_readl(priv, CORE_CFP_ACC);
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
reg |= addr << XCESS_ADDR_SHIFT;
core_writel(priv, reg, CORE_CFP_ACC);
}
static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
{
/* Entry #0 is reserved */
return priv->num_cfp_rules - 1;
}
static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
unsigned int rule_index,
int src_port,
unsigned int port_num,
unsigned int queue_num,
bool fwd_map_change)
{
int ret;
u32 reg;
/* Replace ARL derived destination with DST_MAP derived, define
* which port and queue this should be forwarded to.
*/
if (fwd_map_change)
reg = CHANGE_FWRD_MAP_IB_REP_ARL |
BIT(port_num + DST_MAP_IB_SHIFT) |
CHANGE_TC | queue_num << NEW_TC_SHIFT;
else
reg = 0;
/* Enable looping back to the original port */
if (src_port == port_num)
reg |= LOOP_BK_EN;
core_writel(priv, reg, CORE_ACT_POL_DATA0);
/* Set classification ID that needs to be put in Broadcom tag */
core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
core_writel(priv, 0, CORE_ACT_POL_DATA2);
/* Configure policer RAM now */
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
if (ret) {
pr_err("Policer entry at %d failed\n", rule_index);
return ret;
}
/* Disable the policer */
core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
/* Now the rate meter */
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
if (ret) {
pr_err("Meter entry at %d failed\n", rule_index);
return ret;
}
return 0;
}
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
struct flow_dissector_key_ipv4_addrs *addrs,
struct flow_dissector_key_ports *ports,
const __be16 vlan_tci,
unsigned int slice_num, u8 num_udf,
bool mask)
{
u32 reg, offset;
/* UDF_Valid[7:0] [31:24]
* S-Tag [23:8]
* C-Tag [7:0]
*/
reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
if (mask)
core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
else
core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
/* C-Tag [31:24]
* UDF_n_A8 [23:8]
* UDF_n_A7 [7:0]
*/
reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
offset = CORE_CFP_DATA_PORT(4);
core_writel(priv, reg, offset);
/* UDF_n_A7 [31:24]
* UDF_n_A6 [23:8]
* UDF_n_A5 [7:0]
*/
reg = be16_to_cpu(ports->dst) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
offset = CORE_CFP_DATA_PORT(3);
core_writel(priv, reg, offset);
/* UDF_n_A5 [31:24]
* UDF_n_A4 [23:8]
* UDF_n_A3 [7:0]
*/
reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
(u32)be16_to_cpu(ports->src) << 8 |
(be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
offset = CORE_CFP_DATA_PORT(2);
core_writel(priv, reg, offset);
/* UDF_n_A3 [31:24]
* UDF_n_A2 [23:8]
* UDF_n_A1 [7:0]
*/
reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
(u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
(be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
offset = CORE_CFP_DATA_PORT(1);
core_writel(priv, reg, offset);
/* UDF_n_A1 [31:24]
* UDF_n_A0 [23:8]
* Reserved [7:4]
* Slice ID [3:2]
* Slice valid [1:0]
*/
reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
(u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
SLICE_NUM(slice_num) | SLICE_VALID;
if (mask)
offset = CORE_CFP_MASK_PORT(0);
else
offset = CORE_CFP_DATA_PORT(0);
core_writel(priv, reg, offset);
}
static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
struct ethtool_rx_flow_spec *fs)
{
__be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
struct ethtool_rx_flow_spec_input input = {};
const struct cfp_udf_layout *layout;
unsigned int slice_num, rule_index;
struct ethtool_rx_flow_rule *flow;
struct flow_match_ipv4_addrs ipv4;
struct flow_match_ports ports;
struct flow_match_ip ip;
u8 ip_proto, ip_frag;
u8 num_udf;
u32 reg;
int ret;
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
ip_proto = IPPROTO_TCP;
break;
case UDP_V4_FLOW:
ip_proto = IPPROTO_UDP;
break;
default:
return -EINVAL;
}
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
/* Extract VLAN TCI */
if (fs->flow_type & FLOW_EXT) {
vlan_tci = fs->h_ext.vlan_tci;
vlan_m_tci = fs->m_ext.vlan_tci;
}
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
rule_index = find_first_zero_bit(priv->cfp.used,
priv->num_cfp_rules);
else
rule_index = fs->location;
if (rule_index > bcm_sf2_cfp_rule_size(priv))
return -ENOSPC;
input.fs = fs;
flow = ethtool_rx_flow_rule_create(&input);
if (IS_ERR(flow))
return PTR_ERR(flow);
flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
flow_rule_match_ports(flow->rule, &ports);
flow_rule_match_ip(flow->rule, &ip);
layout = &udf_tcpip4_layout;
/* We only use one UDF slice for now */
slice_num = bcm_sf2_get_slice_number(layout, 0);
if (slice_num == UDF_NUM_SLICES) {
ret = -EINVAL;
goto out_err_flow_rule;
}
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
/* Apply the UDF layout for this filter */
bcm_sf2_cfp_udf_set(priv, layout, slice_num);
/* Apply to all packets received through this port */
core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
/* Source port map match */
core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
/* S-Tag status [31:30]
* C-Tag status [29:28]
* L2 framing [27:26]
* L3 framing [25:24]
* IP ToS [23:16]
* IP proto [15:08]
* IP Fragm [7]
* Non 1st frag [6]
* IP Authen [5]
* TTL range [4:3]
* PPPoE session [2]
* Reserved [1]
* UDF_Valid[8] [0]
*/
core_writel(priv, ip.key->tos << IPTOS_SHIFT |
ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
udf_upper_bits(num_udf),
CORE_CFP_DATA_PORT(6));
/* Mask with the specific layout for IPv4 packets */
core_writel(priv, layout->udfs[slice_num].mask_value |
udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
/* Program the match and the mask */
bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
slice_num, num_udf, false);
bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
SLICE_NUM_MASK, num_udf, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index);
goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now */
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
queue_num, true);
if (ret)
goto out_err_flow_rule;
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
reg |= BIT(port);
core_writel(priv, reg, CORE_CFP_CTL_REG);
/* Flag the rule as being used and return it */
set_bit(rule_index, priv->cfp.used);
set_bit(rule_index, priv->cfp.unique);
fs->location = rule_index;
return 0;
out_err_flow_rule:
ethtool_rx_flow_rule_destroy(flow);
return ret;
}
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
const __be32 *ip6_addr, const __be16 port,
const __be16 vlan_tci,
unsigned int slice_num, u32 udf_bits,
bool mask)
{
u32 reg, tmp, val, offset;
/* UDF_Valid[7:0] [31:24]
* S-Tag [23:8]
* C-Tag [7:0]
*/
reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
if (mask)
core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
else
core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
/* C-Tag [31:24]
* UDF_n_B8 [23:8] (port)
* UDF_n_B7 (upper) [7:0] (addr[15:8])
*/
reg = be32_to_cpu(ip6_addr[3]);
val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
offset = CORE_CFP_DATA_PORT(4);
core_writel(priv, val, offset);
/* UDF_n_B7 (lower) [31:24] (addr[7:0])
* UDF_n_B6 [23:8] (addr[31:16])
* UDF_n_B5 (upper) [7:0] (addr[47:40])
*/
tmp = be32_to_cpu(ip6_addr[2]);
val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
((tmp >> 8) & 0xff);
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
offset = CORE_CFP_DATA_PORT(3);
core_writel(priv, val, offset);
/* UDF_n_B5 (lower) [31:24] (addr[39:32])
* UDF_n_B4 [23:8] (addr[63:48])
* UDF_n_B3 (upper) [7:0] (addr[79:72])
*/
reg = be32_to_cpu(ip6_addr[1]);
val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
((reg >> 8) & 0xff);
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
offset = CORE_CFP_DATA_PORT(2);
core_writel(priv, val, offset);
/* UDF_n_B3 (lower) [31:24] (addr[71:64])
* UDF_n_B2 [23:8] (addr[95:80])
* UDF_n_B1 (upper) [7:0] (addr[111:104])
*/
tmp = be32_to_cpu(ip6_addr[0]);
val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
((tmp >> 8) & 0xff);
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
offset = CORE_CFP_DATA_PORT(1);
core_writel(priv, val, offset);
/* UDF_n_B1 (lower) [31:24] (addr[103:96])
* UDF_n_B0 [23:8] (addr[127:112])
* Reserved [7:4]
* Slice ID [3:2]
* Slice valid [1:0]
*/
reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
SLICE_NUM(slice_num) | SLICE_VALID;
if (mask)
offset = CORE_CFP_MASK_PORT(0);
else
offset = CORE_CFP_DATA_PORT(0);
core_writel(priv, reg, offset);
}
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
struct cfp_rule *rule;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
return rule;
}
return NULL;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct cfp_rule *rule = NULL;
size_t fs_size = 0;
int ret = 1;
if (list_empty(&priv->cfp.rules_list))
return ret;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
ret = 1;
if (rule->port != port)
continue;
if (rule->fs.flow_type != fs->flow_type ||
rule->fs.ring_cookie != fs->ring_cookie ||
rule->fs.h_ext.data[0] != fs->h_ext.data[0])
continue;
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V6_FLOW:
case UDP_V6_FLOW:
fs_size = sizeof(struct ethtool_tcpip6_spec);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
fs_size = sizeof(struct ethtool_tcpip4_spec);
break;
default:
continue;
}
ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
/* Compare VLAN TCI values as well */
if (rule->fs.flow_type & FLOW_EXT) {
ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
}
if (ret == 0)
break;
}
return ret;
}
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
struct ethtool_rx_flow_spec *fs)
{
__be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
struct ethtool_rx_flow_spec_input input = {};
unsigned int slice_num, rule_index[2];
const struct cfp_udf_layout *layout;
struct ethtool_rx_flow_rule *flow;
struct flow_match_ipv6_addrs ipv6;
struct flow_match_ports ports;
u8 ip_proto, ip_frag;
int ret = 0;
u8 num_udf;
u32 reg;
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V6_FLOW:
ip_proto = IPPROTO_TCP;
break;
case UDP_V6_FLOW:
ip_proto = IPPROTO_UDP;
break;
default:
return -EINVAL;
}
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
/* Extract VLAN TCI */
if (fs->flow_type & FLOW_EXT) {
vlan_tci = fs->h_ext.vlan_tci;
vlan_m_tci = fs->m_ext.vlan_tci;
}
layout = &udf_tcpip6_layout;
slice_num = bcm_sf2_get_slice_number(layout, 0);
if (slice_num == UDF_NUM_SLICES)
return -EINVAL;
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
/* Negotiate two indexes, one for the second half which we are chained
* from, which is what we will return to user-space, and a second one
* which is used to store its first half. That first half does not
* allow any choice of placement, so it just needs to find the next
* available bit. We return the second half as fs->location because
* that helps with the rule lookup later on since the second half is
* chained from its first half, we can easily identify IPv6 CFP rules
* by looking whether they carry a CHAIN_ID.
*
* We also want the second half to have a lower rule_index than its
* first half because the HW search is by incrementing addresses.
*/
if (fs->location == RX_CLS_LOC_ANY)
rule_index[1] = find_first_zero_bit(priv->cfp.used,
priv->num_cfp_rules);
else
rule_index[1] = fs->location;
if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
return -ENOSPC;
/* Flag it as used (cleared on error path) such that we can immediately
* obtain a second one to chain from.
*/
set_bit(rule_index[1], priv->cfp.used);
rule_index[0] = find_first_zero_bit(priv->cfp.used,
priv->num_cfp_rules);
if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
ret = -ENOSPC;
goto out_err;
}
input.fs = fs;
flow = ethtool_rx_flow_rule_create(&input);
if (IS_ERR(flow)) {
ret = PTR_ERR(flow);
goto out_err;
}
flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
flow_rule_match_ports(flow->rule, &ports);
/* Apply the UDF layout for this filter */
bcm_sf2_cfp_udf_set(priv, layout, slice_num);
/* Apply to all packets received through this port */
core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
/* Source port map match */
core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
/* S-Tag status [31:30]
* C-Tag status [29:28]
* L2 framing [27:26]
* L3 framing [25:24]
* IP ToS [23:16]
* IP proto [15:08]
* IP Fragm [7]
* Non 1st frag [6]
* IP Authen [5]
* TTL range [4:3]
* PPPoE session [2]
* Reserved [1]
* UDF_Valid[8] [0]
*/
reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
/* Mask with the specific layout for IPv6 packets including
* UDF_Valid[8]
*/
reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
/* Slice the IPv6 source address and port */
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
ports.key->src, vlan_tci, slice_num,
udf_lower_bits(num_udf), false);
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
udf_lower_bits(num_udf), true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now */
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
queue_num, false);
if (ret)
goto out_err_flow_rule;
/* Now deal with the second slice to chain this rule */
slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
if (slice_num == UDF_NUM_SLICES) {
ret = -EINVAL;
goto out_err_flow_rule;
}
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
/* Apply the UDF layout for this filter */
bcm_sf2_cfp_udf_set(priv, layout, slice_num);
/* Chained rule, source port match is coming from the rule we are
* chained from.
*/
core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
/*
* CHAIN ID [31:24] chain to previous slice
* Reserved [23:20]
* UDF_Valid[11:8] [19:16]
* UDF_Valid[7:0] [15:8]
* UDF_n_D11 [7:0]
*/
reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
udf_lower_bits(num_udf) << 8;
core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
/* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
udf_lower_bits(num_udf) << 8;
core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
ports.key->dst, 0, slice_num,
0, false);
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
ports.key->dst, 0, SLICE_NUM_MASK,
0, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now, set chain ID to
* the one we are chained to
*/
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
queue_num, true);
if (ret)
goto out_err_flow_rule;
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
reg |= BIT(port);
core_writel(priv, reg, CORE_CFP_CTL_REG);
/* Flag the second half rule as being used now, return it as the
* location, and flag it as unique while dumping rules
*/
set_bit(rule_index[0], priv->cfp.used);
set_bit(rule_index[1], priv->cfp.unique);
fs->location = rule_index[1];
return ret;
out_err_flow_rule:
ethtool_rx_flow_rule_destroy(flow);
out_err:
clear_bit(rule_index[1], priv->cfp.used);
return ret;
}
static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
struct switchdev_obj_port_vlan vlan;
unsigned int queue_num, port_num;
u16 vid;
int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
*/
if (ring_cookie == RX_CLS_FLOW_WAKE)
ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
/* We do not support discarding packets, check that the
* destination port is enabled and that we are within the
* number of ports supported by the switch
*/
port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
if (ring_cookie == RX_CLS_FLOW_DISC ||
!(dsa_is_user_port(ds, port_num) ||
dsa_is_cpu_port(ds, port_num)) ||
port_num >= priv->hw_params.num_ports)
return -EINVAL;
/* If the rule is matching a particular VLAN, make sure that we honor
* the matching and have it tagged or untagged on the destination port,
* we do this on egress with a VLAN entry. The egress tagging attribute
* is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
* a 0 means tagged.
*/
if (fs->flow_type & FLOW_EXT) {
/* We cannot support matching multiple VLAN IDs yet */
if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
VLAN_VID_MASK)
return -EINVAL;
vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
vlan.vid = vid;
if (be32_to_cpu(fs->h_ext.data[1]) & 1)
vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
else
vlan.flags = 0;
ret = ds->ops->port_vlan_add(ds, port_num, &vlan, NULL);
if (ret)
return ret;
}
/*
* We have a small oddity where Port 6 just does not have a
* valid bit here (so we substract by one).
*/
queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
if (port_num >= 7)
port_num -= 1;
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
queue_num, fs);
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
queue_num, fs);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct cfp_rule *rule = NULL;
int ret = -EINVAL;
/* Check for unsupported extensions */
if (fs->flow_type & FLOW_MAC_EXT)
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY &&
fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
if ((fs->flow_type & FLOW_EXT) &&
!(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
return -EOPNOTSUPP;
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
if (ret == 0)
return -EEXIST;
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
if (ret) {
kfree(rule);
return ret;
}
rule->port = port;
memcpy(&rule->fs, fs, sizeof(*fs));
list_add_tail(&rule->next, &priv->cfp.rules_list);
return ret;
}
static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
u32 loc, u32 *next_loc)
{
int ret;
u32 reg;
/* Indicate which rule we want to read */
bcm_sf2_cfp_rule_addr_set(priv, loc);
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
if (ret)
return ret;
/* Check if this is possibly an IPv6 rule that would
* indicate we need to delete its companion rule
* as well
*/
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
if (next_loc)
*next_loc = (reg >> 24) & CHAIN_ID_MASK;
/* Clear its valid bits */
reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
reg &= ~SLICE_VALID;
core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
/* Write back this entry into the TCAM now */
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret)
return ret;
clear_bit(loc, priv->cfp.used);
clear_bit(loc, priv->cfp.unique);
return 0;
}
static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
u32 loc)
{
u32 next_loc = 0;
int ret;
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
/* If this was an IPv6 rule, delete is companion rule too */
if (next_loc)
ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
return ret;
}
static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
{
struct cfp_rule *rule;
int ret;
if (loc > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
/* Refuse deleting unused rules, and those that are not unique since
* that could leave IPv6 rules with one of the chained rule in the
* table.
*/
if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
rule = bcm_sf2_cfp_rule_find(priv, port, loc);
if (!rule)
return -EINVAL;
ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
list_del(&rule->next);
kfree(rule);
return ret;
}
static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
{
unsigned int i;
for (i = 0; i < sizeof(flow->m_u); i++)
flow->m_u.hdata[i] ^= 0xff;
flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
flow->m_ext.data[0] ^= cpu_to_be32(~0);
flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc)
{
struct cfp_rule *rule;
rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
if (!rule)
return -EINVAL;
memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
return 0;
}
/* We implement the search doing a TCAM search operation */
static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int port, struct ethtool_rxnfc *nfc,
u32 *rule_locs)
{
unsigned int index = 1, rules_cnt = 0;
for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
rule_locs[rules_cnt] = index;
rules_cnt++;
}
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
nfc->rule_cnt = rules_cnt;
return 0;
}
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
mutex_lock(&priv->cfp.lock);
switch (nfc->cmd) {
case ETHTOOL_GRXCLSRLCNT:
/* Subtract the default, unusable rule */
nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
priv->num_cfp_rules) - 1;
/* We support specifying rule locations */
nfc->data |= RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRULE:
ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
break;
case ETHTOOL_GRXCLSRLALL:
ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
break;
default:
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&priv->cfp.lock);
if (ret)
return ret;
/* Pass up the commands to the attached master network device */
if (p->ethtool_ops->get_rxnfc) {
ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
if (ret == -EOPNOTSUPP)
ret = 0;
}
return ret;
}
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
mutex_lock(&priv->cfp.lock);
switch (nfc->cmd) {
case ETHTOOL_SRXCLSRLINS:
ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
break;
case ETHTOOL_SRXCLSRLDEL:
ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
break;
default:
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&priv->cfp.lock);
if (ret)
return ret;
/* Pass up the commands to the attached master network device.
* This can fail, so rollback the operation if we need to.
*/
if (p->ethtool_ops->set_rxnfc) {
ret = p->ethtool_ops->set_rxnfc(p, nfc);
if (ret && ret != -EOPNOTSUPP) {
mutex_lock(&priv->cfp.lock);
bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
mutex_unlock(&priv->cfp.lock);
} else {
ret = 0;
}
}
return ret;
}
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
reg = core_readl(priv, CORE_CFP_ACC);
reg |= TCAM_RESET;
core_writel(priv, reg, CORE_CFP_ACC);
do {
reg = core_readl(priv, CORE_CFP_ACC);
if (!(reg & TCAM_RESET))
break;
cpu_relax();
} while (timeout--);
if (!timeout)
return -ETIMEDOUT;
return 0;
}
void bcm_sf2_cfp_exit(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct cfp_rule *rule, *n;
if (list_empty(&priv->cfp.rules_list))
return;
list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
}
int bcm_sf2_cfp_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct cfp_rule *rule;
int ret = 0;
u32 reg;
if (list_empty(&priv->cfp.rules_list))
return ret;
reg = core_readl(priv, CORE_CFP_CTL_REG);
reg &= ~CFP_EN_MAP_MASK;
core_writel(priv, reg, CORE_CFP_CTL_REG);
ret = bcm_sf2_cfp_rst(priv);
if (ret)
return ret;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
rule->fs.location);
if (ret) {
dev_err(ds->dev, "failed to remove rule\n");
return ret;
}
ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
if (ret) {
dev_err(ds->dev, "failed to restore rule\n");
return ret;
}
}
return ret;
}
static const struct bcm_sf2_cfp_stat {
unsigned int offset;
unsigned int ram_loc;
const char *name;
} bcm_sf2_cfp_stats[] = {
{
.offset = CORE_STAT_GREEN_CNTR,
.ram_loc = GREEN_STAT_RAM,
.name = "Green"
},
{
.offset = CORE_STAT_YELLOW_CNTR,
.ram_loc = YELLOW_STAT_RAM,
.name = "Yellow"
},
{
.offset = CORE_STAT_RED_CNTR,
.ram_loc = RED_STAT_RAM,
.name = "Red"
},
};
void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
char buf[ETH_GSTRING_LEN];
unsigned int i, j, iter;
if (stringset != ETH_SS_STATS)
return;
for (i = 1; i < priv->num_cfp_rules; i++) {
for (j = 0; j < s; j++) {
snprintf(buf, sizeof(buf),
"CFP%03d_%sCntr",
i, bcm_sf2_cfp_stats[j].name);
iter = (i - 1) * s + j;
strscpy(data + iter * ETH_GSTRING_LEN,
buf, ETH_GSTRING_LEN);
}
}
}
void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
const struct bcm_sf2_cfp_stat *stat;
unsigned int i, j, iter;
struct cfp_rule *rule;
int ret;
mutex_lock(&priv->cfp.lock);
for (i = 1; i < priv->num_cfp_rules; i++) {
rule = bcm_sf2_cfp_rule_find(priv, port, i);
if (!rule)
continue;
for (j = 0; j < s; j++) {
stat = &bcm_sf2_cfp_stats[j];
bcm_sf2_cfp_rule_addr_set(priv, i);
ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
if (ret)
continue;
iter = (i - 1) * s + j;
data[iter] = core_readl(priv, stat->offset);
}
}
mutex_unlock(&priv->cfp.lock);
}
int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
if (sset != ETH_SS_STATS)
return 0;
/* 3 counters per CFP rules */
return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
}
| linux-master | drivers/net/dsa/bcm_sf2_cfp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mediatek MT7530 DSA Switch driver
* Copyright (C) 2017 Sean Wang <[email protected]>
*/
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <net/dsa.h>
#include "mt7530.h"
static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mt753x_pcs, pcs);
}
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0x00, "TxDrop"),
MIB_DESC(1, 0x04, "TxCrcErr"),
MIB_DESC(1, 0x08, "TxUnicast"),
MIB_DESC(1, 0x0c, "TxMulticast"),
MIB_DESC(1, 0x10, "TxBroadcast"),
MIB_DESC(1, 0x14, "TxCollision"),
MIB_DESC(1, 0x18, "TxSingleCollision"),
MIB_DESC(1, 0x1c, "TxMultipleCollision"),
MIB_DESC(1, 0x20, "TxDeferred"),
MIB_DESC(1, 0x24, "TxLateCollision"),
MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
MIB_DESC(1, 0x2c, "TxPause"),
MIB_DESC(1, 0x30, "TxPktSz64"),
MIB_DESC(1, 0x34, "TxPktSz65To127"),
MIB_DESC(1, 0x38, "TxPktSz128To255"),
MIB_DESC(1, 0x3c, "TxPktSz256To511"),
MIB_DESC(1, 0x40, "TxPktSz512To1023"),
MIB_DESC(1, 0x44, "Tx1024ToMax"),
MIB_DESC(2, 0x48, "TxBytes"),
MIB_DESC(1, 0x60, "RxDrop"),
MIB_DESC(1, 0x64, "RxFiltering"),
MIB_DESC(1, 0x68, "RxUnicast"),
MIB_DESC(1, 0x6c, "RxMulticast"),
MIB_DESC(1, 0x70, "RxBroadcast"),
MIB_DESC(1, 0x74, "RxAlignErr"),
MIB_DESC(1, 0x78, "RxCrcErr"),
MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
MIB_DESC(1, 0x80, "RxFragErr"),
MIB_DESC(1, 0x84, "RxOverSzErr"),
MIB_DESC(1, 0x88, "RxJabberErr"),
MIB_DESC(1, 0x8c, "RxPause"),
MIB_DESC(1, 0x90, "RxPktSz64"),
MIB_DESC(1, 0x94, "RxPktSz65To127"),
MIB_DESC(1, 0x98, "RxPktSz128To255"),
MIB_DESC(1, 0x9c, "RxPktSz256To511"),
MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
MIB_DESC(2, 0xa8, "RxBytes"),
MIB_DESC(1, 0xb0, "RxCtrlDrop"),
MIB_DESC(1, 0xb4, "RxIngressDrop"),
MIB_DESC(1, 0xb8, "RxArlDrop"),
};
/* Since phy_device has not yet been created and
* phy_{read,write}_mmd_indirect is not available, we provide our own
* core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
* to complete this function.
*/
static int
core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
{
struct mii_bus *bus = priv->bus;
int value, ret;
/* Write the desired MMD Devad */
ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
if (ret < 0)
goto err;
/* Read the content of the MMD's selected register */
value = bus->read(bus, 0, MII_MMD_DATA);
return value;
err:
dev_err(&bus->dev, "failed to read mmd register\n");
return ret;
}
static int
core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
int devad, u32 data)
{
struct mii_bus *bus = priv->bus;
int ret;
/* Write the desired MMD Devad */
ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
if (ret < 0)
goto err;
/* Write the data into MMD's selected register */
ret = bus->write(bus, 0, MII_MMD_DATA, data);
err:
if (ret < 0)
dev_err(&bus->dev,
"failed to write mmd register\n");
return ret;
}
static void
mt7530_mutex_lock(struct mt7530_priv *priv)
{
if (priv->bus)
mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
}
static void
mt7530_mutex_unlock(struct mt7530_priv *priv)
{
if (priv->bus)
mutex_unlock(&priv->bus->mdio_lock);
}
static void
core_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
mt7530_mutex_lock(priv);
core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
mt7530_mutex_unlock(priv);
}
static void
core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
{
u32 val;
mt7530_mutex_lock(priv);
val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
val &= ~mask;
val |= set;
core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
mt7530_mutex_unlock(priv);
}
static void
core_set(struct mt7530_priv *priv, u32 reg, u32 val)
{
core_rmw(priv, reg, 0, val);
}
static void
core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
{
core_rmw(priv, reg, val, 0);
}
static int
mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
int ret;
ret = regmap_write(priv->regmap, reg, val);
if (ret < 0)
dev_err(priv->dev,
"failed to write mt7530 register\n");
return ret;
}
static u32
mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
{
int ret;
u32 val;
ret = regmap_read(priv->regmap, reg, &val);
if (ret) {
WARN_ON_ONCE(1);
dev_err(priv->dev,
"failed to read mt7530 register\n");
return 0;
}
return val;
}
static void
mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
mt7530_mutex_lock(priv);
mt7530_mii_write(priv, reg, val);
mt7530_mutex_unlock(priv);
}
static u32
_mt7530_unlocked_read(struct mt7530_dummy_poll *p)
{
return mt7530_mii_read(p->priv, p->reg);
}
static u32
_mt7530_read(struct mt7530_dummy_poll *p)
{
u32 val;
mt7530_mutex_lock(p->priv);
val = mt7530_mii_read(p->priv, p->reg);
mt7530_mutex_unlock(p->priv);
return val;
}
static u32
mt7530_read(struct mt7530_priv *priv, u32 reg)
{
struct mt7530_dummy_poll p;
INIT_MT7530_DUMMY_POLL(&p, priv, reg);
return _mt7530_read(&p);
}
static void
mt7530_rmw(struct mt7530_priv *priv, u32 reg,
u32 mask, u32 set)
{
mt7530_mutex_lock(priv);
regmap_update_bits(priv->regmap, reg, mask, set);
mt7530_mutex_unlock(priv);
}
static void
mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
{
mt7530_rmw(priv, reg, val, val);
}
static void
mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val)
{
mt7530_rmw(priv, reg, val, 0);
}
static int
mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp)
{
u32 val;
int ret;
struct mt7530_dummy_poll p;
/* Set the command operating upon the MAC address entries */
val = ATC_BUSY | ATC_MAT(0) | cmd;
mt7530_write(priv, MT7530_ATC, val);
INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
ret = readx_poll_timeout(_mt7530_read, &p, val,
!(val & ATC_BUSY), 20, 20000);
if (ret < 0) {
dev_err(priv->dev, "reset timeout\n");
return ret;
}
/* Additional sanity for read command if the specified
* entry is invalid
*/
val = mt7530_read(priv, MT7530_ATC);
if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID))
return -EINVAL;
if (rsp)
*rsp = val;
return 0;
}
static void
mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb)
{
u32 reg[3];
int i;
/* Read from ARL table into an array */
for (i = 0; i < 3; i++) {
reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4));
dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n",
__func__, __LINE__, i, reg[i]);
}
fdb->vid = (reg[1] >> CVID) & CVID_MASK;
fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK;
fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK;
fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK;
fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK;
fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK;
fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK;
fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK;
fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK;
fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT;
}
static void
mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
u8 port_mask, const u8 *mac,
u8 aging, u8 type)
{
u32 reg[3] = { 0 };
int i;
reg[1] |= vid & CVID_MASK;
reg[1] |= ATA2_IVL;
reg[1] |= ATA2_FID(FID_BRIDGED);
reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
/* STATIC_ENT indicate that entry is static wouldn't
* be aged out and STATIC_EMP specified as erasing an
* entry
*/
reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS;
reg[1] |= mac[5] << MAC_BYTE_5;
reg[1] |= mac[4] << MAC_BYTE_4;
reg[0] |= mac[3] << MAC_BYTE_3;
reg[0] |= mac[2] << MAC_BYTE_2;
reg[0] |= mac[1] << MAC_BYTE_1;
reg[0] |= mac[0] << MAC_BYTE_0;
/* Write array into the ARL table */
for (i = 0; i < 3; i++)
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
/* Set up switch core clock for MT7530 */
static void mt7530_pll_setup(struct mt7530_priv *priv)
{
/* Disable core clock */
core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
/* Disable PLL */
core_write(priv, CORE_GSWPLL_GRP1, 0);
/* Set core clock into 500Mhz */
core_write(priv, CORE_GSWPLL_GRP2,
RG_GSWPLL_POSDIV_500M(1) |
RG_GSWPLL_FBKDIV_500M(25));
/* Enable PLL */
core_write(priv, CORE_GSWPLL_GRP1,
RG_GSWPLL_EN_PRE |
RG_GSWPLL_POSDIV_200M(2) |
RG_GSWPLL_FBKDIV_200M(32));
udelay(20);
/* Enable core clock */
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
}
/* If port 6 is available as a CPU port, always prefer that as the default,
* otherwise don't care.
*/
static struct dsa_port *
mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
{
struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
if (dsa_port_is_cpu(cpu_dp))
return cpu_dp;
return NULL;
}
/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
u32 ncpo1, ssc_delta, trgint, xtal;
xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
if (xtal == HWTRAP_XTAL_20MHZ) {
dev_err(priv->dev,
"%s: MT7530 with a 20MHz XTAL is not supported!\n",
__func__);
return -EINVAL;
}
switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
break;
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
if (xtal == HWTRAP_XTAL_25MHZ)
ssc_delta = 0x57;
else
ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 125MHz: 1.0GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
ncpo1 = 0x0640;
if (xtal == HWTRAP_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
if (xtal == HWTRAP_XTAL_40MHZ)
ncpo1 = 0x0c80;
if (xtal == HWTRAP_XTAL_25MHZ)
ncpo1 = 0x1400;
}
break;
default:
dev_err(priv->dev, "xMII interface %d not supported\n",
interface);
return -EINVAL;
}
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
P6_INTF_MODE(trgint));
if (trgint) {
/* Disable the MT7530 TRGMII clocks */
core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
/* Setup the MT7530 TRGMII Tx Clock */
core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
core_write(priv, CORE_PLL_GROUP4,
RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
RG_SYSPLL_BIAS_LPF_EN);
core_write(priv, CORE_PLL_GROUP2,
RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
RG_SYSPLL_POSDIV(1));
core_write(priv, CORE_PLL_GROUP7,
RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
/* Enable the MT7530 TRGMII clocks */
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
}
return 0;
}
static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
{
u32 val;
val = mt7530_read(priv, MT7531_TOP_SIG_SR);
return (val & PAD_DUAL_SGMII_EN) != 0;
}
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
return 0;
}
static void
mt7531_pll_setup(struct mt7530_priv *priv)
{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
hwstrap = mt7530_read(priv, MT7531_HWTRAP);
if ((val & CHIP_REV_M) > 0)
xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
HWTRAP_XTAL_FSEL_25MHZ;
else
xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
/* Step 1 : Disable MT7531 COREPLL */
val = mt7530_read(priv, MT7531_PLLGP_EN);
val &= ~EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
/* Step 2: switch to XTAL output */
val = mt7530_read(priv, MT7531_PLLGP_EN);
val |= SW_CLKSW;
mt7530_write(priv, MT7531_PLLGP_EN, val);
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_EN;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
/* Step 3: disable PLLGP and enable program PLLGP */
val = mt7530_read(priv, MT7531_PLLGP_EN);
val |= SW_PLLGP;
mt7530_write(priv, MT7531_PLLGP_EN, val);
/* Step 4: program COREPLL output frequency to 500MHz */
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_POSDIV_M;
val |= 2 << RG_COREPLL_POSDIV_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
usleep_range(25, 35);
switch (xtal) {
case HWTRAP_XTAL_FSEL_25MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
case HWTRAP_XTAL_FSEL_40MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
}
/* Set feedback divide ratio update signal to high */
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val |= RG_COREPLL_SDM_PCW_CHG;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
/* Wait for at least 16 XTAL clocks */
usleep_range(10, 20);
/* Step 5: set feedback divide ratio update signal to low */
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_CHG;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
/* Enable 325M clock for SGMII */
mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
/* Enable 250SSC clock for RGMII */
mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
/* Step 6: Enable MT7531 PLL */
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val |= RG_COREPLL_EN;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
val = mt7530_read(priv, MT7531_PLLGP_EN);
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
}
static void
mt7530_mib_reset(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH);
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
{
return mdiobus_read_nested(priv->bus, port, regnum);
}
static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
u16 val)
{
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
int devad, int regnum)
{
return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
}
static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
int regnum, u16 val)
{
return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
}
static int
mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
int regnum)
{
struct mt7530_dummy_poll p;
u32 reg, val;
int ret;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
MT7531_MDIO_DEV_ADDR(devad) | regnum;
mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
MT7531_MDIO_DEV_ADDR(devad);
mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
ret = val & MT7531_MDIO_RW_DATA_MASK;
out:
mt7530_mutex_unlock(priv);
return ret;
}
static int
mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
int regnum, u16 data)
{
struct mt7530_dummy_poll p;
u32 val, reg;
int ret;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
MT7531_MDIO_DEV_ADDR(devad) | regnum;
mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
MT7531_MDIO_DEV_ADDR(devad) | data;
mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
out:
mt7530_mutex_unlock(priv);
return ret;
}
static int
mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
{
struct mt7530_dummy_poll p;
int ret;
u32 val;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
MT7531_MDIO_REG_ADDR(regnum);
mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
ret = val & MT7531_MDIO_RW_DATA_MASK;
out:
mt7530_mutex_unlock(priv);
return ret;
}
static int
mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
u16 data)
{
struct mt7530_dummy_poll p;
int ret;
u32 reg;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
!(reg & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
MT7531_MDIO_REG_ADDR(regnum) | data;
mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
!(reg & MT7531_PHY_ACS_ST), 20, 100000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
goto out;
}
out:
mt7530_mutex_unlock(priv);
return ret;
}
static int
mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
{
struct mt7530_priv *priv = bus->priv;
return priv->info->phy_read_c22(priv, port, regnum);
}
static int
mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
{
struct mt7530_priv *priv = bus->priv;
return priv->info->phy_read_c45(priv, port, devad, regnum);
}
static int
mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
{
struct mt7530_priv *priv = bus->priv;
return priv->info->phy_write_c22(priv, port, regnum, val);
}
static int
mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
u16 val)
{
struct mt7530_priv *priv = bus->priv;
return priv->info->phy_write_c45(priv, port, devad, regnum, val);
}
static void
mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
ETH_GSTRING_LEN);
}
static void
mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mt7530_priv *priv = ds->priv;
const struct mt7530_mib_desc *mib;
u32 reg, i;
u64 hi;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
mib = &mt7530_mib[i];
reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
data[i] = mt7530_read(priv, reg);
if (mib->size == 2) {
hi = mt7530_read(priv, reg + 4);
data[i] |= hi << 32;
}
}
}
static int
mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return 0;
return ARRAY_SIZE(mt7530_mib);
}
static int
mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct mt7530_priv *priv = ds->priv;
unsigned int secs = msecs / 1000;
unsigned int tmp_age_count;
unsigned int error = -1;
unsigned int age_count;
unsigned int age_unit;
/* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */
if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1))
return -ERANGE;
/* iterate through all possible age_count to find the closest pair */
for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) {
unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1;
if (tmp_age_unit <= AGE_UNIT_MAX) {
unsigned int tmp_error = secs -
(tmp_age_count + 1) * (tmp_age_unit + 1);
/* found a closer pair */
if (error > tmp_error) {
error = tmp_error;
age_count = tmp_age_count;
age_unit = tmp_age_unit;
}
/* found the exact match, so break the loop */
if (!error)
break;
}
}
mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit));
return 0;
}
static const char *p5_intf_modes(unsigned int p5_interface)
{
switch (p5_interface) {
case P5_DISABLED:
return "DISABLED";
case P5_INTF_SEL_PHY_P0:
return "PHY P0";
case P5_INTF_SEL_PHY_P4:
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
case P5_INTF_SEL_GMAC5_SGMII:
return "GMAC5_SGMII";
default:
return "unknown";
}
}
static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
u8 tx_delay = 0;
int val;
mutex_lock(&priv->reg_mutex);
val = mt7530_read(priv, MT7530_MHWTRAP);
val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
switch (priv->p5_intf_sel) {
case P5_INTF_SEL_PHY_P0:
/* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
val |= MHWTRAP_PHY0_SEL;
fallthrough;
case P5_INTF_SEL_PHY_P4:
/* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
/* Setup the MAC by default for the cpu port */
mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
break;
case P5_INTF_SEL_GMAC5:
/* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
val &= ~MHWTRAP_P5_DIS;
break;
case P5_DISABLED:
interface = PHY_INTERFACE_MODE_NA;
break;
default:
dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
priv->p5_intf_sel);
goto unlock_exit;
}
/* Setup RGMII settings */
if (phy_interface_mode_is_rgmii(interface)) {
val |= MHWTRAP_P5_RGMII_MODE;
/* P5 RGMII RX Clock Control: delay setting for 1000M */
mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
/* Don't set delay in DSA mode */
if (!dsa_is_dsa_port(priv->ds, 5) &&
(interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_RGMII_ID))
tx_delay = 4; /* n * 0.5 ns */
/* P5 RGMII TX Clock Control: delay x */
mt7530_write(priv, MT7530_P5RGMIITXCR,
CSR_RGMII_TXC_CFG(0x10 + tx_delay));
/* reduce P5 RGMII Tx driving, 8mA */
mt7530_write(priv, MT7530_IO_DRV_CR,
P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
}
mt7530_write(priv, MT7530_MHWTRAP, val);
dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
priv->p5_interface = interface;
unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
static void
mt753x_trap_frames(struct mt7530_priv *priv)
{
/* Trap BPDUs to the CPU port(s) */
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
MT753X_BPDU_CPU_ONLY);
/* Trap 802.1X PAE frames to the CPU port(s) */
mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
}
static int
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
int ret;
/* Setup max capability of CPU port at first */
if (priv->info->cpu_port_config) {
ret = priv->info->cpu_port_config(ds, port);
if (ret)
return ret;
}
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
/* Enable flooding on the CPU port */
mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
*/
if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
* the switch.
*/
mt7530_write(priv, MT7530_PCR_P(port),
PCR_MATRIX(dsa_user_ports(priv->ds)));
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
return 0;
}
static int
mt7530_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also
* restore the port matrix if the port is the member of a certain
* bridge.
*/
if (dsa_port_is_user(dp)) {
struct dsa_port *cpu_dp = dp->cpu_dp;
priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
}
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
return 0;
}
static void
mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
* enablement for the port.
*/
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
static int
mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct mt7530_priv *priv = ds->priv;
int length;
u32 val;
/* When a new MTU is set, DSA always set the CPU port's MTU to the
* largest MTU of the slave ports. Because the switch only has a global
* RX length register, only allowing CPU port here is enough.
*/
if (!dsa_is_cpu_port(ds, port))
return 0;
mt7530_mutex_lock(priv);
val = mt7530_mii_read(priv, MT7530_GMACCR);
val &= ~MAX_RX_PKT_LEN_MASK;
/* RX length also includes Ethernet header, MTK tag, and FCS length */
length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
if (length <= 1522) {
val |= MAX_RX_PKT_LEN_1522;
} else if (length <= 1536) {
val |= MAX_RX_PKT_LEN_1536;
} else if (length <= 1552) {
val |= MAX_RX_PKT_LEN_1552;
} else {
val &= ~MAX_RX_JUMBO_MASK;
val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
val |= MAX_RX_PKT_LEN_JUMBO;
}
mt7530_mii_write(priv, MT7530_GMACCR, val);
mt7530_mutex_unlock(priv);
return 0;
}
static int
mt7530_port_max_mtu(struct dsa_switch *ds, int port)
{
return MT7530_MAX_MTU;
}
static void
mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct mt7530_priv *priv = ds->priv;
u32 stp_state;
switch (state) {
case BR_STATE_DISABLED:
stp_state = MT7530_STP_DISABLED;
break;
case BR_STATE_BLOCKING:
stp_state = MT7530_STP_BLOCKING;
break;
case BR_STATE_LISTENING:
stp_state = MT7530_STP_LISTENING;
break;
case BR_STATE_LEARNING:
stp_state = MT7530_STP_LEARNING;
break;
case BR_STATE_FORWARDING:
default:
stp_state = MT7530_STP_FORWARDING;
break;
}
mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED),
FID_PST(FID_BRIDGED, stp_state));
}
static int
mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD))
return -EINVAL;
return 0;
}
static int
mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
if (flags.mask & BR_LEARNING)
mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS,
flags.val & BR_LEARNING ? 0 : SA_DIS);
if (flags.mask & BR_FLOOD)
mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
if (flags.mask & BR_MCAST_FLOOD)
mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
if (flags.mask & BR_BCAST_FLOOD)
mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
return 0;
}
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct dsa_port *cpu_dp = dp->cpu_dp;
u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
dsa_switch_for_each_user_port(other_dp, ds) {
int other_port = other_dp->index;
if (dp == other_dp)
continue;
/* Add this port to the port matrix of the other ports in the
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
*/
if (!dsa_port_offloads_bridge(other_dp, &bridge))
continue;
if (priv->ports[other_port].enable)
mt7530_set(priv, MT7530_PCR_P(other_port),
PCR_MATRIX(BIT(port)));
priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
port_bitmap |= BIT(other_port);
}
/* Add the all other ports to this port matrix. */
if (priv->ports[port].enable)
mt7530_rmw(priv, MT7530_PCR_P(port),
PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
mutex_unlock(&priv->reg_mutex);
return 0;
}
static void
mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
bool all_user_ports_removed = true;
int i;
/* This is called after .port_bridge_leave when leaving a VLAN-aware
* bridge. Don't set standalone ports to fallback mode.
*/
if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
mt7530_rmw(priv, MT7530_PVC_P(port),
VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK,
VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) |
MT7530_VLAN_ACC_ALL);
/* Set PVID to 0 */
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
break;
}
}
/* CPU port also does the same thing until all user ports belonging to
* the CPU port get out of VLAN filtering mode.
*/
if (all_user_ports_removed) {
struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp = dp->cpu_dp;
mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
PCR_MATRIX(dsa_user_ports(priv->ds)));
mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
| PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
static void
mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
/* Trapped into security mode allows packet forwarding through VLAN
* table lookup.
*/
if (dsa_is_user_port(ds, port)) {
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_SECURITY_MODE);
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID(priv->ports[port].pvid));
/* Only accept tagged frames if PVID is not set */
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
/* Set the port as a user port which is to be able to recognize
* VID from incoming packets before fetching entry within the
* VLAN table.
*/
mt7530_rmw(priv, MT7530_PVC_P(port),
VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
VLAN_ATTR(MT7530_VLAN_USER) |
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
} else {
/* Also set CPU ports to the "user" VLAN port attribute, to
* allow VLAN classification, but keep the EG_TAG attribute as
* "consistent" (i.o.w. don't change its value) for packets
* received by the switch from the CPU, so that tagged packets
* are forwarded to user ports as tagged, and untagged as
* untagged.
*/
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
VLAN_ATTR(MT7530_VLAN_USER));
}
}
static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
dsa_switch_for_each_user_port(other_dp, ds) {
int other_port = other_dp->index;
if (dp == other_dp)
continue;
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
*/
if (!dsa_port_offloads_bridge(other_dp, &bridge))
continue;
if (priv->ports[other_port].enable)
mt7530_clear(priv, MT7530_PCR_P(other_port),
PCR_MATRIX(BIT(port)));
priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
}
/* Set the cpu port to be the only one in the port matrix of
* this port.
*/
if (priv->ports[port].enable)
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX(BIT(cpu_dp->index)));
priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
* port.
*/
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_MATRIX_MODE);
mutex_unlock(&priv->reg_mutex);
}
static int
mt7530_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
u8 port_mask = BIT(port);
mutex_lock(&priv->reg_mutex);
mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
mt7530_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
u8 port_mask = BIT(port);
mutex_lock(&priv->reg_mutex);
mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP);
ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mt7530_priv *priv = ds->priv;
struct mt7530_fdb _fdb = { 0 };
int cnt = MT7530_NUM_FDB_RECORDS;
int ret = 0;
u32 rsp = 0;
mutex_lock(&priv->reg_mutex);
ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp);
if (ret < 0)
goto err;
do {
if (rsp & ATC_SRCH_HIT) {
mt7530_fdb_read(priv, &_fdb);
if (_fdb.port_mask & BIT(port)) {
ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp,
data);
if (ret < 0)
break;
}
}
} while (--cnt &&
!(rsp & ATC_SRCH_END) &&
!mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp));
err:
mutex_unlock(&priv->reg_mutex);
return 0;
}
static int
mt7530_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
u8 port_mask = 0;
int ret;
mutex_lock(&priv->reg_mutex);
mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
& PORT_MAP_MASK;
port_mask |= BIT(port);
mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
mt7530_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
u8 port_mask = 0;
int ret;
mutex_lock(&priv->reg_mutex);
mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
& PORT_MAP_MASK;
port_mask &= ~BIT(port);
mt7530_fdb_write(priv, vid, port_mask, addr, -1,
port_mask ? STATIC_ENT : STATIC_EMP);
ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int
mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
{
struct mt7530_dummy_poll p;
u32 val;
int ret;
val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
mt7530_write(priv, MT7530_VTCR, val);
INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
ret = readx_poll_timeout(_mt7530_read, &p, val,
!(val & VTCR_BUSY), 20, 20000);
if (ret < 0) {
dev_err(priv->dev, "poll timeout\n");
return ret;
}
val = mt7530_read(priv, MT7530_VTCR);
if (val & VTCR_INVALID) {
dev_err(priv->dev, "read VTCR invalid\n");
return -EINVAL;
}
return 0;
}
static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp = dp->cpu_dp;
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
* port and the corresponding CPU port is required the setup
* for becoming a VLAN-aware port.
*/
mt7530_port_set_vlan_aware(ds, port);
mt7530_port_set_vlan_aware(ds, cpu_dp->index);
} else {
mt7530_port_set_vlan_unaware(ds, port);
}
return 0;
}
static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
{
struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
u8 new_members;
u32 val;
new_members = entry->old_members | BIT(entry->port);
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members.
*/
val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
/* Decide whether adding tag or not for those outgoing packets from the
* port inside the VLAN.
* CPU port is always taken as a tagged port for serving more than one
* VLANs across and also being applied with egress type stack mode for
* that VLAN tags would be appended after hardware special tag used as
* DSA tag.
*/
if (dsa_port_is_cpu(dp))
val = MT7530_VLAN_EGRESS_STACK;
else if (entry->untagged)
val = MT7530_VLAN_EGRESS_UNTAG;
else
val = MT7530_VLAN_EGRESS_TAG;
mt7530_rmw(priv, MT7530_VAWD2,
ETAG_CTRL_P_MASK(entry->port),
ETAG_CTRL_P(entry->port, val));
}
static void
mt7530_hw_vlan_del(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
{
u8 new_members;
u32 val;
new_members = entry->old_members & ~BIT(entry->port);
val = mt7530_read(priv, MT7530_VAWD1);
if (!(val & VLAN_VALID)) {
dev_err(priv->dev,
"Cannot be deleted due to invalid entry\n");
return;
}
if (new_members) {
val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
} else {
mt7530_write(priv, MT7530_VAWD1, 0);
mt7530_write(priv, MT7530_VAWD2, 0);
}
}
static void
mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
struct mt7530_hw_vlan_entry *entry,
mt7530_vlan_op vlan_op)
{
u32 val;
/* Fetch entry */
mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
val = mt7530_read(priv, MT7530_VAWD1);
entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
/* Manipulate entry */
vlan_op(priv, entry);
/* Flush result to hardware */
mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
}
static int
mt7530_setup_vlan0(struct mt7530_priv *priv)
{
u32 val;
/* Validate the entry with independent learning, keep the original
* ingress tag attribute.
*/
val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0);
}
static int
mt7530_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mt7530_hw_vlan_entry new_entry;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
if (pvid) {
priv->ports[port].pvid = vlan->vid;
/* Accept all frames if PVID is set */
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_ALL);
/* Only configure PVID if VLAN filtering is enabled */
if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PPBV1_P(port),
G0_PORT_VID_MASK,
G0_PORT_VID(vlan->vid));
} else if (vlan->vid && priv->ports[port].pvid == vlan->vid) {
/* This VLAN is overwritten without PVID, so unset it */
priv->ports[port].pvid = G0_PORT_VID_DEF;
/* Only accept tagged frames if the port is VLAN-aware */
if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
}
mutex_unlock(&priv->reg_mutex);
return 0;
}
static int
mt7530_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mt7530_hw_vlan_entry target_entry;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
mt7530_hw_vlan_entry_init(&target_entry, port, 0);
mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
mt7530_hw_vlan_del);
/* PVID is being restored to the default whenever the PVID port
* is being removed from the VLAN.
*/
if (priv->ports[port].pvid == vlan->vid) {
priv->ports[port].pvid = G0_PORT_VID_DEF;
/* Only accept tagged frames if the port is VLAN-aware */
if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
}
mutex_unlock(&priv->reg_mutex);
return 0;
}
static int mt753x_mirror_port_get(unsigned int id, u32 val)
{
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
MIRROR_PORT(val);
}
static int mt753x_mirror_port_set(unsigned int id, u32 val)
{
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
MIRROR_PORT(val);
}
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
int monitor_port;
u32 val;
/* Check for existent entry */
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
return -EEXIST;
val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
monitor_port = mt753x_mirror_port_get(priv->id, val);
if (val & MT753X_MIRROR_EN(priv->id) &&
monitor_port != mirror->to_local_port)
return -EEXIST;
val |= MT753X_MIRROR_EN(priv->id);
val &= ~MT753X_MIRROR_MASK(priv->id);
val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
if (ingress) {
val |= PORT_RX_MIR;
priv->mirror_rx |= BIT(port);
} else {
val |= PORT_TX_MIR;
priv->mirror_tx |= BIT(port);
}
mt7530_write(priv, MT7530_PCR_P(port), val);
return 0;
}
static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct mt7530_priv *priv = ds->priv;
u32 val;
val = mt7530_read(priv, MT7530_PCR_P(port));
if (mirror->ingress) {
val &= ~PORT_RX_MIR;
priv->mirror_rx &= ~BIT(port);
} else {
val &= ~PORT_TX_MIR;
priv->mirror_tx &= ~BIT(port);
}
mt7530_write(priv, MT7530_PCR_P(port), val);
if (!priv->mirror_rx && !priv->mirror_tx) {
val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
val &= ~MT753X_MIRROR_EN(priv->id);
mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
}
}
static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_MTK;
}
#ifdef CONFIG_GPIOLIB
static inline u32
mt7530_gpio_to_bit(unsigned int offset)
{
/* Map GPIO offset to register bit
* [ 2: 0] port 0 LED 0..2 as GPIO 0..2
* [ 6: 4] port 1 LED 0..2 as GPIO 3..5
* [10: 8] port 2 LED 0..2 as GPIO 6..8
* [14:12] port 3 LED 0..2 as GPIO 9..11
* [18:16] port 4 LED 0..2 as GPIO 12..14
*/
return BIT(offset + offset / 3);
}
static int
mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
u32 bit = mt7530_gpio_to_bit(offset);
return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
}
static void
mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
u32 bit = mt7530_gpio_to_bit(offset);
if (value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
}
static int
mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
u32 bit = mt7530_gpio_to_bit(offset);
return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ?
GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static int
mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
u32 bit = mt7530_gpio_to_bit(offset);
mt7530_clear(priv, MT7530_LED_GPIO_OE, bit);
mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit);
return 0;
}
static int
mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
u32 bit = mt7530_gpio_to_bit(offset);
mt7530_set(priv, MT7530_LED_GPIO_DIR, bit);
if (value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
mt7530_set(priv, MT7530_LED_GPIO_OE, bit);
return 0;
}
static int
mt7530_setup_gpio(struct mt7530_priv *priv)
{
struct device *dev = priv->dev;
struct gpio_chip *gc;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
mt7530_write(priv, MT7530_LED_GPIO_OE, 0);
mt7530_write(priv, MT7530_LED_GPIO_DIR, 0);
mt7530_write(priv, MT7530_LED_IO_MODE, 0);
gc->label = "mt7530";
gc->parent = dev;
gc->owner = THIS_MODULE;
gc->get_direction = mt7530_gpio_get_direction;
gc->direction_input = mt7530_gpio_direction_input;
gc->direction_output = mt7530_gpio_direction_output;
gc->get = mt7530_gpio_get;
gc->set = mt7530_gpio_set;
gc->base = -1;
gc->ngpio = 15;
gc->can_sleep = true;
return devm_gpiochip_add_data(dev, gc, priv);
}
#endif /* CONFIG_GPIOLIB */
static irqreturn_t
mt7530_irq_thread_fn(int irq, void *dev_id)
{
struct mt7530_priv *priv = dev_id;
bool handled = false;
u32 val;
int p;
mt7530_mutex_lock(priv);
val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
mt7530_mutex_unlock(priv);
for (p = 0; p < MT7530_NUM_PHYS; p++) {
if (BIT(p) & val) {
unsigned int irq;
irq = irq_find_mapping(priv->irq_domain, p);
handle_nested_irq(irq);
handled = true;
}
}
return IRQ_RETVAL(handled);
}
static void
mt7530_irq_mask(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
priv->irq_enable &= ~BIT(d->hwirq);
}
static void
mt7530_irq_unmask(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
priv->irq_enable |= BIT(d->hwirq);
}
static void
mt7530_irq_bus_lock(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
mt7530_mutex_lock(priv);
}
static void
mt7530_irq_bus_sync_unlock(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
mt7530_mutex_unlock(priv);
}
static struct irq_chip mt7530_irq_chip = {
.name = KBUILD_MODNAME,
.irq_mask = mt7530_irq_mask,
.irq_unmask = mt7530_irq_unmask,
.irq_bus_lock = mt7530_irq_bus_lock,
.irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
};
static int
mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, domain->host_data);
irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
irq_set_nested_thread(irq, true);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops mt7530_irq_domain_ops = {
.map = mt7530_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static void
mt7988_irq_mask(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
priv->irq_enable &= ~BIT(d->hwirq);
mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
}
static void
mt7988_irq_unmask(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
priv->irq_enable |= BIT(d->hwirq);
mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
}
static struct irq_chip mt7988_irq_chip = {
.name = KBUILD_MODNAME,
.irq_mask = mt7988_irq_mask,
.irq_unmask = mt7988_irq_unmask,
};
static int
mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, domain->host_data);
irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
irq_set_nested_thread(irq, true);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops mt7988_irq_domain_ops = {
.map = mt7988_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
struct dsa_switch *ds = priv->ds;
int p;
for (p = 0; p < MT7530_NUM_PHYS; p++) {
if (BIT(p) & ds->phys_mii_mask) {
unsigned int irq;
irq = irq_create_mapping(priv->irq_domain, p);
ds->slave_mii_bus->irq[p] = irq;
}
}
}
static int
mt7530_setup_irq(struct mt7530_priv *priv)
{
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
int ret;
if (!of_property_read_bool(np, "interrupt-controller")) {
dev_info(dev, "no interrupt support\n");
return 0;
}
priv->irq = of_irq_get(np, 0);
if (priv->irq <= 0) {
dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
return priv->irq ? : -EINVAL;
}
if (priv->id == ID_MT7988)
priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
&mt7988_irq_domain_ops,
priv);
else
priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
&mt7530_irq_domain_ops,
priv);
if (!priv->irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
/* This register must be set for MT7530 to properly fire interrupts */
if (priv->id != ID_MT7531)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
IRQF_ONESHOT, KBUILD_MODNAME, priv);
if (ret) {
irq_domain_remove(priv->irq_domain);
dev_err(dev, "failed to request IRQ: %d\n", ret);
return ret;
}
return 0;
}
static void
mt7530_free_mdio_irq(struct mt7530_priv *priv)
{
int p;
for (p = 0; p < MT7530_NUM_PHYS; p++) {
if (BIT(p) & priv->ds->phys_mii_mask) {
unsigned int irq;
irq = irq_find_mapping(priv->irq_domain, p);
irq_dispose_mapping(irq);
}
}
}
static void
mt7530_free_irq_common(struct mt7530_priv *priv)
{
free_irq(priv->irq, priv);
irq_domain_remove(priv->irq_domain);
}
static void
mt7530_free_irq(struct mt7530_priv *priv)
{
mt7530_free_mdio_irq(priv);
mt7530_free_irq_common(priv);
}
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
struct dsa_switch *ds = priv->ds;
struct device *dev = priv->dev;
struct mii_bus *bus;
static int idx;
int ret;
bus = devm_mdiobus_alloc(dev);
if (!bus)
return -ENOMEM;
ds->slave_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
bus->read = mt753x_phy_read_c22;
bus->write = mt753x_phy_write_c22;
bus->read_c45 = mt753x_phy_read_c45;
bus->write_c45 = mt753x_phy_write_c45;
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
if (priv->irq)
mt7530_setup_mdio_irq(priv);
ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
mt7530_free_mdio_irq(priv);
}
return ret;
}
static int
mt7530_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
struct device_node *dn = NULL;
struct device_node *phy_node;
struct device_node *mac_np;
struct mt7530_dummy_poll p;
phy_interface_t interface;
struct dsa_port *cpu_dp;
u32 id, val;
int ret, i;
/* The parent node of master netdev which holds the common system
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
dn = cpu_dp->master->dev.of_node->parent;
/* It doesn't matter which CPU port is found first,
* their masters should share the same parent OF node
*/
break;
}
if (!dn) {
dev_err(ds->dev, "parent OF node of DSA master not found");
return -EINVAL;
}
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
if (priv->id == ID_MT7530) {
regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
ret = regulator_enable(priv->core_pwr);
if (ret < 0) {
dev_err(priv->dev,
"Failed to enable core power: %d\n", ret);
return ret;
}
regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
ret = regulator_enable(priv->io_pwr);
if (ret < 0) {
dev_err(priv->dev, "Failed to enable io pwr: %d\n",
ret);
return ret;
}
}
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
usleep_range(1000, 1100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
usleep_range(1000, 1100);
gpiod_set_value_cansleep(priv->reset, 1);
}
/* Waiting for MT7530 got to stable */
INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
dev_err(priv->dev, "reset timeout\n");
return ret;
}
id = mt7530_read(priv, MT7530_CREV);
id >>= CHIP_NAME_SHIFT;
if (id != MT7530_ID) {
dev_err(priv->dev, "chip %x can't be supported\n", id);
return -ENODEV;
}
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
mt7530_pll_setup(priv);
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
TD_DM_DRVP(8) | TD_DM_DRVN(8));
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
/* Enable port 6 */
val = mt7530_read(priv, MT7530_MHWTRAP);
val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
priv->p6_interface = PHY_INTERFACE_MODE_NA;
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
/* Disable learning by default on all ports */
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
if (dsa_is_cpu_port(ds, i)) {
ret = mt753x_cpu_port_enable(ds, i);
if (ret)
return ret;
} else {
mt7530_port_disable(ds, i);
/* Set default PVID to 0 on all user ports */
mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
}
/* Enable consistent egress tag */
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
return ret;
/* Setup port 5 */
priv->p5_intf_sel = P5_DISABLED;
interface = PHY_INTERFACE_MODE_NA;
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
if (ret && ret != -ENODEV)
return ret;
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
continue;
ret = of_property_read_u32(mac_np, "reg", &id);
if (ret < 0 || id != 1)
continue;
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
if (!phy_node)
continue;
if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
if (id == 4)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
}
of_node_put(mac_np);
of_node_put(phy_node);
break;
}
}
#ifdef CONFIG_GPIOLIB
if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) {
ret = mt7530_setup_gpio(priv);
if (ret)
return ret;
}
#endif /* CONFIG_GPIOLIB */
mt7530_setup_port5(ds, interface);
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
return ret;
return 0;
}
static int
mt7531_setup_common(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
int ret, i;
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
/* Disable flooding on all ports */
mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
UNU_FFP_MASK);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
/* Disable learning by default on all ports */
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
if (dsa_is_cpu_port(ds, i)) {
ret = mt753x_cpu_port_enable(ds, i);
if (ret)
return ret;
} else {
mt7530_port_disable(ds, i);
/* Set default PVID to 0 on all user ports */
mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
}
/* Enable consistent egress tag */
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
return ret;
return 0;
}
static int
mt7531_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
struct mt7530_dummy_poll p;
u32 val, id;
int ret, i;
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
usleep_range(1000, 1100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
usleep_range(1000, 1100);
gpiod_set_value_cansleep(priv->reset, 1);
}
/* Waiting for MT7530 got to stable */
INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
dev_err(priv->dev, "reset timeout\n");
return ret;
}
id = mt7530_read(priv, MT7531_CREV);
id >>= CHIP_NAME_SHIFT;
if (id != MT7531_ID) {
dev_err(priv->dev, "chip %x can't be supported\n", id);
return -ENODEV;
}
/* all MACs must be forced link-down before sw reset */
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
mt7531_pll_setup(priv);
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
/* Let ds->slave_mii_bus be able to access external phy. */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
} else {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
}
dev_dbg(ds->dev, "P5 support %s interface\n",
p5_intf_modes(priv->p5_intf_sel));
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
/* Let phylink decide the interface later. */
priv->p5_interface = PHY_INTERFACE_MODE_NA;
priv->p6_interface = PHY_INTERFACE_MODE_NA;
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_[read,write]_mmd_indirect is called, we provide
* our own mt7531_ind_mmd_phy_[read,write] to complete this
* function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
val |= MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
return ret;
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
return 0;
}
static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0 ... 4: /* Internal phy */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
case 6: /* 1st cpu port */
__set_bit(PHY_INTERFACE_MODE_RGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_TRGMII,
config->supported_interfaces);
break;
}
}
static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
{
return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
case 0 ... 4: /* Internal phy */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
if (mt7531_is_rgmii_port(priv, port)) {
phy_interface_set_rgmii(config->supported_interfaces);
break;
}
fallthrough;
case 6: /* 1st cpu port supports sgmii/8023z only */
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
config->supported_interfaces);
config->mac_capabilities |= MAC_2500FD;
break;
}
}
static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
phy_interface_zero(config->supported_interfaces);
switch (port) {
case 0 ... 4: /* Internal phy */
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10000FD;
}
}
static int
mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
return priv->info->pad_setup(ds, state->interface);
}
static int
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
/* Only need to setup port5. */
if (port != 5)
return 0;
mt7530_setup_port5(priv->ds, interface);
return 0;
}
static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
phy_interface_t interface,
struct phy_device *phydev)
{
u32 val;
if (!mt7531_is_rgmii_port(priv, port)) {
dev_err(priv->dev, "RGMII mode is not available for port %d\n",
port);
return -EINVAL;
}
val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
val |= GP_CLK_EN;
val &= ~GP_MODE_MASK;
val |= GP_MODE(MT7531_GP_MODE_RGMII);
val &= ~CLK_SKEW_IN_MASK;
val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
val &= ~CLK_SKEW_OUT_MASK;
val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
/* Do not adjust rgmii delay when vendor phy driver presents. */
if (!phydev || phy_driver_is_genphy(phydev)) {
val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
val |= TXCLK_NO_REVERSE;
val |= RXCLK_NO_DELAY;
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
val |= TXCLK_NO_REVERSE;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
val |= RXCLK_NO_DELAY;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
break;
default:
return -EINVAL;
}
}
mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
return 0;
}
static bool mt753x_is_mac_port(u32 port)
{
return (port == 5 || port == 6);
}
static int
mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
if (dsa_is_cpu_port(ds, port) &&
interface == PHY_INTERFACE_MODE_INTERNAL)
return 0;
return -EINVAL;
}
static int
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
struct phy_device *phydev;
struct dsa_port *dp;
if (!mt753x_is_mac_port(port)) {
dev_err(priv->dev, "port %d is not a MAC port\n", port);
return -EINVAL;
}
switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
dp = dsa_to_port(ds, port);
phydev = dp->slave->phydev;
return mt7531_rgmii_setup(priv, port, interface, phydev);
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
/* handled in SGMII PCS driver */
return 0;
default:
return -EINVAL;
}
return -EINVAL;
}
static int
mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
return priv->info->mac_port_config(ds, port, mode, state->interface);
}
static struct phylink_pcs *
mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
switch (interface) {
case PHY_INTERFACE_MODE_TRGMII:
return &priv->pcs[port].pcs;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
return priv->ports[port].sgmii_pcs;
default:
return NULL;
}
}
static void
mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
u32 mcr_cur, mcr_new;
switch (port) {
case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII &&
state->interface != PHY_INTERFACE_MODE_INTERNAL)
goto unsupported;
break;
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
if (priv->p5_interface == state->interface)
break;
if (mt753x_mac_config(ds, port, mode, state) < 0)
goto unsupported;
if (priv->p5_intf_sel != P5_DISABLED)
priv->p5_interface = state->interface;
break;
case 6: /* 1st cpu port */
if (priv->p6_interface == state->interface)
break;
mt753x_pad_setup(ds, state);
if (mt753x_mac_config(ds, port, mode, state) < 0)
goto unsupported;
priv->p6_interface = state->interface;
break;
default:
unsupported:
dev_err(ds->dev, "%s: unsupported %s port: %i\n",
__func__, phy_modes(state->interface), port);
return;
}
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
mcr_new |= PMCR_EXT_PHY;
if (mcr_new != mcr_cur)
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
unsigned int mode,
phy_interface_t interface,
int speed, int duplex)
{
if (pcs->ops->pcs_link_up)
pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
}
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct mt7530_priv *priv = ds->priv;
u32 mcr;
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
/* MT753x MAC works in 1G full duplex mode for all up-clocked
* variants.
*/
if (interface == PHY_INTERFACE_MODE_INTERNAL ||
interface == PHY_INTERFACE_MODE_TRGMII ||
(phy_interface_mode_is_8023z(interface))) {
speed = SPEED_1000;
duplex = DUPLEX_FULL;
}
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
mcr |= PMCR_FORCE_SPEED_100;
break;
}
if (duplex == DUPLEX_FULL) {
mcr |= PMCR_FORCE_FDX;
if (tx_pause)
mcr |= PMCR_TX_FC_EN;
if (rx_pause)
mcr |= PMCR_RX_FC_EN;
}
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_EEE1G;
break;
case SPEED_100:
mcr |= PMCR_FORCE_EEE100;
break;
}
}
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
static int
mt7531_cpu_port_config(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
phy_interface_t interface;
int speed;
int ret;
switch (port) {
case 5:
if (mt7531_is_rgmii_port(priv, port))
interface = PHY_INTERFACE_MODE_RGMII;
else
interface = PHY_INTERFACE_MODE_2500BASEX;
priv->p5_interface = interface;
break;
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
priv->p6_interface = interface;
break;
default:
return -EINVAL;
}
if (interface == PHY_INTERFACE_MODE_2500BASEX)
speed = SPEED_2500;
else
speed = SPEED_1000;
ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
if (ret)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
return 0;
}
static int
mt7988_cpu_port_config(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED,
PHY_INTERFACE_MODE_INTERNAL, NULL,
SPEED_10000, DUPLEX_FULL, true, true);
return 0;
}
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
/* This switch only supports full-duplex at 1Gbps */
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
priv->info->mac_port_get_caps(ds, port, config);
}
static int mt753x_pcs_validate(struct phylink_pcs *pcs,
unsigned long *supported,
const struct phylink_link_state *state)
{
/* Autonegotiation is not supported in TRGMII nor 802.3z modes */
if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
phy_interface_mode_is_8023z(state->interface))
phylink_clear(supported, Autoneg);
return 0;
}
static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
state->an_complete = state->link;
state->duplex = !!(pmsr & PMSR_DPX);
switch (pmsr & PMSR_SPEED_MASK) {
case PMSR_SPEED_10:
state->speed = SPEED_10;
break;
case PMSR_SPEED_100:
state->speed = SPEED_100;
break;
case PMSR_SPEED_1000:
state->speed = SPEED_1000;
break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX);
if (pmsr & PMSR_RX_FC)
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
}
static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
return 0;
}
static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
{
}
static const struct phylink_pcs_ops mt7530_pcs_ops = {
.pcs_validate = mt753x_pcs_validate,
.pcs_get_state = mt7530_pcs_get_state,
.pcs_config = mt753x_pcs_config,
.pcs_an_restart = mt7530_pcs_an_restart,
};
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
int i, ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
priv->pcs[i].pcs.neg_mode = true;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
}
ret = priv->info->sw_setup(ds);
if (ret)
return ret;
ret = mt7530_setup_irq(priv);
if (ret)
return ret;
ret = mt7530_setup_mdio(priv);
if (ret && priv->irq)
mt7530_free_irq_common(priv);
if (priv->create_sgmii) {
ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
if (ret && priv->irq)
mt7530_free_irq(priv);
}
return ret;
}
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
return 0;
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
set = SET_LPI_THRESH(e->tx_lpi_timer);
if (!e->tx_lpi_enabled)
/* Force LPI Mode without a delay */
set |= LPI_MODE_EN;
mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
return 0;
}
static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
return 0;
}
static int mt7988_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
/* Reset the switch */
reset_control_assert(priv->rstc);
usleep_range(20, 50);
reset_control_deassert(priv->rstc);
usleep_range(20, 50);
/* Reset the switch PHYs */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
return mt7531_setup_common(ds);
}
const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
.preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
.set_ageing_time = mt7530_set_ageing_time,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
.port_change_mtu = mt7530_port_change_mtu,
.port_max_mtu = mt7530_port_max_mtu,
.port_stp_state_set = mt7530_stp_state_set,
.port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
.port_bridge_flags = mt7530_port_bridge_flags,
.port_bridge_join = mt7530_port_bridge_join,
.port_bridge_leave = mt7530_port_bridge_leave,
.port_fdb_add = mt7530_port_fdb_add,
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
.port_mdb_add = mt7530_port_mdb_add,
.port_mdb_del = mt7530_port_mdb_del,
.port_vlan_filtering = mt7530_port_vlan_filtering,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
.phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
.phylink_mac_config = mt753x_phylink_mac_config,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read_c22 = mt7530_phy_read_c22,
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read_c22 = mt7530_phy_read_c22,
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7531_setup,
.phy_read_c22 = mt7531_ind_c22_phy_read,
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
},
[ID_MT7988] = {
.id = ID_MT7988,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7988_setup,
.phy_read_c22 = mt7531_ind_c22_phy_read,
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
.pad_setup = mt7988_pad_setup,
.cpu_port_config = mt7988_cpu_port_config,
.mac_port_get_caps = mt7988_mac_port_get_caps,
.mac_port_config = mt7988_mac_config,
},
};
EXPORT_SYMBOL_GPL(mt753x_table);
int
mt7530_probe_common(struct mt7530_priv *priv)
{
struct device *dev = priv->dev;
priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = dev;
priv->ds->num_ports = MT7530_NUM_PORTS;
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup.
*/
priv->info = of_device_get_match_data(dev);
if (!priv->info)
return -EINVAL;
/* Sanity check if these required device operations are filled
* properly.
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
!priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
!priv->info->mac_port_get_caps ||
!priv->info->mac_port_config)
return -EINVAL;
priv->id = priv->info->id;
priv->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &mt7530_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(dev, priv);
return 0;
}
EXPORT_SYMBOL_GPL(mt7530_probe_common);
void
mt7530_remove_common(struct mt7530_priv *priv)
{
if (priv->irq)
mt7530_free_irq(priv);
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
}
EXPORT_SYMBOL_GPL(mt7530_remove_common);
MODULE_AUTHOR("Sean Wang <[email protected]>");
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/mt7530.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Pengutronix, Juergen Borleis <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/mutex.h>
#include <linux/mii.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
/* For the LAN9303 and LAN9354, only port 0 is an XMII port. */
#define IS_PORT_XMII(port) ((port) == 0)
#define LAN9303_NUM_PORTS 3
/* 13.2 System Control and Status Registers
* Multiply register number by 4 to get address offset.
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
# define LAN9352_CHIP_ID 0x9352
# define LAN9353_CHIP_ID 0x9353
# define LAN9354_CHIP_ID 0x9354
# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
# define LAN9303_IRQ_CFG_IRQ_TYPE BIT(0)
#define LAN9303_INT_STS 0x16
# define LAN9303_INT_STS_PHY_INT2 BIT(27)
# define LAN9303_INT_STS_PHY_INT1 BIT(26)
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
# define LAN9303_HW_CFG_AMDX_EN_PORT1 BIT(25)
#define LAN9303_PMI_DATA 0x29
#define LAN9303_PMI_ACCESS 0x2A
# define LAN9303_PMI_ACCESS_PHY_ADDR(x) (((x) & 0x1f) << 11)
# define LAN9303_PMI_ACCESS_MIIRINDA(x) (((x) & 0x1f) << 6)
# define LAN9303_PMI_ACCESS_MII_BUSY BIT(0)
# define LAN9303_PMI_ACCESS_MII_WRITE BIT(1)
#define LAN9303_MANUAL_FC_1 0x68
#define LAN9303_MANUAL_FC_2 0x69
#define LAN9303_MANUAL_FC_0 0x6a
# define LAN9303_BP_EN BIT(6)
# define LAN9303_RX_FC_EN BIT(2)
# define LAN9303_TX_FC_EN BIT(1)
#define LAN9303_SWITCH_CSR_DATA 0x6b
#define LAN9303_SWITCH_CSR_CMD 0x6c
#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
#define LAN9303_SWITCH_CSR_CMD_RW BIT(30)
#define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16))
#define LAN9303_VIRT_PHY_BASE 0x70
#define LAN9303_VIRT_SPECIAL_CTRL 0x77
#define LAN9303_VIRT_SPECIAL_TURBO BIT(10) /*Turbo MII Enable*/
/*13.4 Switch Fabric Control and Status Registers
* Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA.
*/
#define LAN9303_SW_DEV_ID 0x0000
#define LAN9303_SW_RESET 0x0001
#define LAN9303_SW_RESET_RESET BIT(0)
#define LAN9303_SW_IMR 0x0004
#define LAN9303_SW_IPR 0x0005
#define LAN9303_MAC_VER_ID_0 0x0400
#define LAN9303_MAC_RX_CFG_0 0x0401
# define LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES BIT(1)
# define LAN9303_MAC_RX_CFG_X_RX_ENABLE BIT(0)
#define LAN9303_MAC_RX_UNDSZE_CNT_0 0x0410
#define LAN9303_MAC_RX_64_CNT_0 0x0411
#define LAN9303_MAC_RX_127_CNT_0 0x0412
#define LAN9303_MAC_RX_255_CNT_0 0x413
#define LAN9303_MAC_RX_511_CNT_0 0x0414
#define LAN9303_MAC_RX_1023_CNT_0 0x0415
#define LAN9303_MAC_RX_MAX_CNT_0 0x0416
#define LAN9303_MAC_RX_OVRSZE_CNT_0 0x0417
#define LAN9303_MAC_RX_PKTOK_CNT_0 0x0418
#define LAN9303_MAC_RX_CRCERR_CNT_0 0x0419
#define LAN9303_MAC_RX_MULCST_CNT_0 0x041a
#define LAN9303_MAC_RX_BRDCST_CNT_0 0x041b
#define LAN9303_MAC_RX_PAUSE_CNT_0 0x041c
#define LAN9303_MAC_RX_FRAG_CNT_0 0x041d
#define LAN9303_MAC_RX_JABB_CNT_0 0x041e
#define LAN9303_MAC_RX_ALIGN_CNT_0 0x041f
#define LAN9303_MAC_RX_PKTLEN_CNT_0 0x0420
#define LAN9303_MAC_RX_GOODPKTLEN_CNT_0 0x0421
#define LAN9303_MAC_RX_SYMBL_CNT_0 0x0422
#define LAN9303_MAC_RX_CTLFRM_CNT_0 0x0423
#define LAN9303_MAC_TX_CFG_0 0x0440
# define LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT (21 << 2)
# define LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE BIT(1)
# define LAN9303_MAC_TX_CFG_X_TX_ENABLE BIT(0)
#define LAN9303_MAC_TX_DEFER_CNT_0 0x0451
#define LAN9303_MAC_TX_PAUSE_CNT_0 0x0452
#define LAN9303_MAC_TX_PKTOK_CNT_0 0x0453
#define LAN9303_MAC_TX_64_CNT_0 0x0454
#define LAN9303_MAC_TX_127_CNT_0 0x0455
#define LAN9303_MAC_TX_255_CNT_0 0x0456
#define LAN9303_MAC_TX_511_CNT_0 0x0457
#define LAN9303_MAC_TX_1023_CNT_0 0x0458
#define LAN9303_MAC_TX_MAX_CNT_0 0x0459
#define LAN9303_MAC_TX_UNDSZE_CNT_0 0x045a
#define LAN9303_MAC_TX_PKTLEN_CNT_0 0x045c
#define LAN9303_MAC_TX_BRDCST_CNT_0 0x045d
#define LAN9303_MAC_TX_MULCST_CNT_0 0x045e
#define LAN9303_MAC_TX_LATECOL_0 0x045f
#define LAN9303_MAC_TX_EXCOL_CNT_0 0x0460
#define LAN9303_MAC_TX_SNGLECOL_CNT_0 0x0461
#define LAN9303_MAC_TX_MULTICOL_CNT_0 0x0462
#define LAN9303_MAC_TX_TOTALCOL_CNT_0 0x0463
#define LAN9303_MAC_VER_ID_1 0x0800
#define LAN9303_MAC_RX_CFG_1 0x0801
#define LAN9303_MAC_TX_CFG_1 0x0840
#define LAN9303_MAC_VER_ID_2 0x0c00
#define LAN9303_MAC_RX_CFG_2 0x0c01
#define LAN9303_MAC_TX_CFG_2 0x0c40
#define LAN9303_SWE_ALR_CMD 0x1800
# define LAN9303_ALR_CMD_MAKE_ENTRY BIT(2)
# define LAN9303_ALR_CMD_GET_FIRST BIT(1)
# define LAN9303_ALR_CMD_GET_NEXT BIT(0)
#define LAN9303_SWE_ALR_WR_DAT_0 0x1801
#define LAN9303_SWE_ALR_WR_DAT_1 0x1802
# define LAN9303_ALR_DAT1_VALID BIT(26)
# define LAN9303_ALR_DAT1_END_OF_TABL BIT(25)
# define LAN9303_ALR_DAT1_AGE_OVERRID BIT(25)
# define LAN9303_ALR_DAT1_STATIC BIT(24)
# define LAN9303_ALR_DAT1_PORT_BITOFFS 16
# define LAN9303_ALR_DAT1_PORT_MASK (7 << LAN9303_ALR_DAT1_PORT_BITOFFS)
#define LAN9303_SWE_ALR_RD_DAT_0 0x1805
#define LAN9303_SWE_ALR_RD_DAT_1 0x1806
#define LAN9303_SWE_ALR_CMD_STS 0x1808
# define ALR_STS_MAKE_PEND BIT(0)
#define LAN9303_SWE_VLAN_CMD 0x180b
# define LAN9303_SWE_VLAN_CMD_RNW BIT(5)
# define LAN9303_SWE_VLAN_CMD_PVIDNVLAN BIT(4)
#define LAN9303_SWE_VLAN_WR_DATA 0x180c
#define LAN9303_SWE_VLAN_RD_DATA 0x180e
# define LAN9303_SWE_VLAN_MEMBER_PORT2 BIT(17)
# define LAN9303_SWE_VLAN_UNTAG_PORT2 BIT(16)
# define LAN9303_SWE_VLAN_MEMBER_PORT1 BIT(15)
# define LAN9303_SWE_VLAN_UNTAG_PORT1 BIT(14)
# define LAN9303_SWE_VLAN_MEMBER_PORT0 BIT(13)
# define LAN9303_SWE_VLAN_UNTAG_PORT0 BIT(12)
#define LAN9303_SWE_VLAN_CMD_STS 0x1810
#define LAN9303_SWE_GLB_INGRESS_CFG 0x1840
# define LAN9303_SWE_GLB_INGR_IGMP_TRAP BIT(7)
# define LAN9303_SWE_GLB_INGR_IGMP_PORT(p) BIT(10 + p)
#define LAN9303_SWE_PORT_STATE 0x1843
# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT2 (0)
# define LAN9303_SWE_PORT_STATE_LEARNING_PORT2 BIT(5)
# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT2 BIT(4)
# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT1 (0)
# define LAN9303_SWE_PORT_STATE_LEARNING_PORT1 BIT(3)
# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 BIT(2)
# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 (0)
# define LAN9303_SWE_PORT_STATE_LEARNING_PORT0 BIT(1)
# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT0 BIT(0)
# define LAN9303_SWE_PORT_STATE_DISABLED_PORT0 (3)
#define LAN9303_SWE_PORT_MIRROR 0x1846
# define LAN9303_SWE_PORT_MIRROR_SNIFF_ALL BIT(8)
# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT2 BIT(7)
# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT1 BIT(6)
# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 BIT(5)
# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 BIT(4)
# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 BIT(3)
# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT0 BIT(2)
# define LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING BIT(1)
# define LAN9303_SWE_PORT_MIRROR_ENABLE_TX_MIRRORING BIT(0)
# define LAN9303_SWE_PORT_MIRROR_DISABLED 0
#define LAN9303_SWE_INGRESS_PORT_TYPE 0x1847
#define LAN9303_SWE_INGRESS_PORT_TYPE_VLAN 3
#define LAN9303_BM_CFG 0x1c00
#define LAN9303_BM_EGRSS_PORT_TYPE 0x1c0c
# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT2 (BIT(17) | BIT(16))
# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8))
# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0))
#define LAN9303_SWITCH_PORT_REG(port, reg0) (0x400 * (port) + (reg0))
/* the built-in PHYs are of type LAN911X */
#define MII_LAN911X_SPECIAL_MODES 0x12
#define MII_LAN911X_SPECIAL_CONTROL_STATUS 0x1f
static const struct regmap_range lan9303_valid_regs[] = {
regmap_reg_range(0x14, 0x17), /* misc, interrupt */
regmap_reg_range(0x19, 0x19), /* endian test */
regmap_reg_range(0x1d, 0x1d), /* hardware config */
regmap_reg_range(0x23, 0x24), /* general purpose timer */
regmap_reg_range(0x27, 0x27), /* counter */
regmap_reg_range(0x29, 0x2a), /* PMI index regs */
regmap_reg_range(0x68, 0x6a), /* flow control */
regmap_reg_range(0x6b, 0x6c), /* switch fabric indirect regs */
regmap_reg_range(0x6d, 0x6f), /* misc */
regmap_reg_range(0x70, 0x77), /* virtual phy */
regmap_reg_range(0x78, 0x7a), /* GPIO */
regmap_reg_range(0x7c, 0x7e), /* MAC & reset */
regmap_reg_range(0x80, 0xb7), /* switch fabric direct regs (wr only) */
};
static const struct regmap_range lan9303_reserved_ranges[] = {
regmap_reg_range(0x00, 0x13),
regmap_reg_range(0x18, 0x18),
regmap_reg_range(0x1a, 0x1c),
regmap_reg_range(0x1e, 0x22),
regmap_reg_range(0x25, 0x26),
regmap_reg_range(0x28, 0x28),
regmap_reg_range(0x2b, 0x67),
regmap_reg_range(0x7b, 0x7b),
regmap_reg_range(0x7f, 0x7f),
regmap_reg_range(0xb8, 0xff),
};
const struct regmap_access_table lan9303_register_set = {
.yes_ranges = lan9303_valid_regs,
.n_yes_ranges = ARRAY_SIZE(lan9303_valid_regs),
.no_ranges = lan9303_reserved_ranges,
.n_no_ranges = ARRAY_SIZE(lan9303_reserved_ranges),
};
EXPORT_SYMBOL(lan9303_register_set);
/* Flow Control registers indexed by port number */
static unsigned int flow_ctl_reg[] = {
LAN9303_MANUAL_FC_0,
LAN9303_MANUAL_FC_1,
LAN9303_MANUAL_FC_2
};
static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
{
int ret, i;
/* we can lose arbitration for the I2C case, because the device
* tries to detect and read an external EEPROM after reset and acts as
* a master on the shared I2C bus itself. This conflicts with our
* attempts to access the device as a slave at the same moment.
*/
for (i = 0; i < 5; i++) {
ret = regmap_read(regmap, offset, reg);
if (!ret)
return 0;
if (ret != -EAGAIN)
break;
msleep(500);
}
return -EIO;
}
static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
{
int i;
for (i = 0; i < 25; i++) {
u32 reg;
int ret;
ret = lan9303_read(chip->regmap, offset, ®);
if (ret) {
dev_err(chip->dev, "%s failed to read offset %d: %d\n",
__func__, offset, ret);
return ret;
}
if (!(reg & mask))
return 0;
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
{
int ret;
u32 val;
if (regnum > MII_EXPANSION)
return -EINVAL;
ret = lan9303_read(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, &val);
if (ret)
return ret;
return val & 0xffff;
}
static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
{
if (regnum > MII_EXPANSION)
return -EINVAL;
return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val);
}
static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
{
return lan9303_read_wait(chip, LAN9303_PMI_ACCESS,
LAN9303_PMI_ACCESS_MII_BUSY);
}
static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum)
{
int ret;
u32 val;
val = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
val |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
mutex_lock(&chip->indirect_mutex);
ret = lan9303_indirect_phy_wait_for_completion(chip);
if (ret)
goto on_error;
/* start the MII read cycle */
ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, val);
if (ret)
goto on_error;
ret = lan9303_indirect_phy_wait_for_completion(chip);
if (ret)
goto on_error;
/* read the result of this operation */
ret = lan9303_read(chip->regmap, LAN9303_PMI_DATA, &val);
if (ret)
goto on_error;
mutex_unlock(&chip->indirect_mutex);
return val & 0xffff;
on_error:
mutex_unlock(&chip->indirect_mutex);
return ret;
}
static int lan9303_indirect_phy_write(struct lan9303 *chip, int addr,
int regnum, u16 val)
{
int ret;
u32 reg;
reg = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
reg |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
reg |= LAN9303_PMI_ACCESS_MII_WRITE;
mutex_lock(&chip->indirect_mutex);
ret = lan9303_indirect_phy_wait_for_completion(chip);
if (ret)
goto on_error;
/* write the data first... */
ret = regmap_write(chip->regmap, LAN9303_PMI_DATA, val);
if (ret)
goto on_error;
/* ...then start the MII write cycle */
ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, reg);
on_error:
mutex_unlock(&chip->indirect_mutex);
return ret;
}
const struct lan9303_phy_ops lan9303_indirect_phy_ops = {
.phy_read = lan9303_indirect_phy_read,
.phy_write = lan9303_indirect_phy_write,
};
EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops);
static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
{
return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD,
LAN9303_SWITCH_CSR_CMD_BUSY);
}
static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
{
u32 reg;
int ret;
reg = regnum;
reg |= LAN9303_SWITCH_CSR_CMD_LANES;
reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
mutex_lock(&chip->indirect_mutex);
ret = lan9303_switch_wait_for_completion(chip);
if (ret)
goto on_error;
ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
if (ret) {
dev_err(chip->dev, "Failed to write csr data reg: %d\n", ret);
goto on_error;
}
/* trigger write */
ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
if (ret)
dev_err(chip->dev, "Failed to write csr command reg: %d\n",
ret);
on_error:
mutex_unlock(&chip->indirect_mutex);
return ret;
}
static int lan9303_read_switch_reg(struct lan9303 *chip, u16 regnum, u32 *val)
{
u32 reg;
int ret;
reg = regnum;
reg |= LAN9303_SWITCH_CSR_CMD_LANES;
reg |= LAN9303_SWITCH_CSR_CMD_RW;
reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
mutex_lock(&chip->indirect_mutex);
ret = lan9303_switch_wait_for_completion(chip);
if (ret)
goto on_error;
/* trigger read */
ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
if (ret) {
dev_err(chip->dev, "Failed to write csr command reg: %d\n",
ret);
goto on_error;
}
ret = lan9303_switch_wait_for_completion(chip);
if (ret)
goto on_error;
ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
if (ret)
dev_err(chip->dev, "Failed to read csr data reg: %d\n", ret);
on_error:
mutex_unlock(&chip->indirect_mutex);
return ret;
}
static int lan9303_write_switch_reg_mask(struct lan9303 *chip, u16 regnum,
u32 val, u32 mask)
{
int ret;
u32 reg;
ret = lan9303_read_switch_reg(chip, regnum, ®);
if (ret)
return ret;
reg = (reg & ~mask) | val;
return lan9303_write_switch_reg(chip, regnum, reg);
}
static int lan9303_write_switch_port(struct lan9303 *chip, int port,
u16 regnum, u32 val)
{
return lan9303_write_switch_reg(
chip, LAN9303_SWITCH_PORT_REG(port, regnum), val);
}
static int lan9303_read_switch_port(struct lan9303 *chip, int port,
u16 regnum, u32 *val)
{
return lan9303_read_switch_reg(
chip, LAN9303_SWITCH_PORT_REG(port, regnum), val);
}
static int lan9303_detect_phy_setup(struct lan9303 *chip)
{
int reg;
/* Calculate chip->phy_addr_base:
* Depending on the 'phy_addr_sel_strap' setting, the three phys are
* using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
* 'phy_addr_sel_strap' setting directly, so we need a test, which
* configuration is active:
* Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0
* and the IDs are 0-1-2, else it contains something different from
* 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3.
* 0xffff is returned on MDIO read with no response.
*/
reg = chip->ops->phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES);
if (reg < 0) {
dev_err(chip->dev, "Failed to detect phy config: %d\n", reg);
return reg;
}
chip->phy_addr_base = reg != 0 && reg != 0xffff;
dev_dbg(chip->dev, "Phy setup '%s' detected\n",
chip->phy_addr_base ? "1-2-3" : "0-1-2");
return 0;
}
/* Map ALR-port bits to port bitmap, and back */
static const int alrport_2_portmap[] = {1, 2, 4, 0, 3, 5, 6, 7 };
static const int portmap_2_alrport[] = {3, 0, 1, 4, 2, 5, 6, 7 };
/* Return pointer to first free ALR cache entry, return NULL if none */
static struct lan9303_alr_cache_entry *
lan9303_alr_cache_find_free(struct lan9303 *chip)
{
int i;
struct lan9303_alr_cache_entry *entr = chip->alr_cache;
for (i = 0; i < LAN9303_NUM_ALR_RECORDS; i++, entr++)
if (entr->port_map == 0)
return entr;
return NULL;
}
/* Return pointer to ALR cache entry matching MAC address */
static struct lan9303_alr_cache_entry *
lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
{
int i;
struct lan9303_alr_cache_entry *entr = chip->alr_cache;
BUILD_BUG_ON_MSG(sizeof(struct lan9303_alr_cache_entry) & 1,
"ether_addr_equal require u16 alignment");
for (i = 0; i < LAN9303_NUM_ALR_RECORDS; i++, entr++)
if (ether_addr_equal(entr->mac_addr, mac_addr))
return entr;
return NULL;
}
static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
{
int i;
for (i = 0; i < 25; i++) {
u32 reg;
lan9303_read_switch_reg(chip, regno, ®);
if (!(reg & mask))
return 0;
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
{
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_0, dat0);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_MAKE_ENTRY);
lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
return 0;
}
typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
int portmap, void *ctx);
static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
{
int ret = 0, i;
mutex_lock(&chip->alr_mutex);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_GET_FIRST);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
for (i = 1; i < LAN9303_NUM_ALR_RECORDS; i++) {
u32 dat0, dat1;
int alrport, portmap;
lan9303_read_switch_reg(chip, LAN9303_SWE_ALR_RD_DAT_0, &dat0);
lan9303_read_switch_reg(chip, LAN9303_SWE_ALR_RD_DAT_1, &dat1);
if (dat1 & LAN9303_ALR_DAT1_END_OF_TABL)
break;
alrport = (dat1 & LAN9303_ALR_DAT1_PORT_MASK) >>
LAN9303_ALR_DAT1_PORT_BITOFFS;
portmap = alrport_2_portmap[alrport];
ret = cb(chip, dat0, dat1, portmap, ctx);
if (ret)
break;
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_GET_NEXT);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
}
mutex_unlock(&chip->alr_mutex);
return ret;
}
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
{
mac[0] = (dat0 >> 0) & 0xff;
mac[1] = (dat0 >> 8) & 0xff;
mac[2] = (dat0 >> 16) & 0xff;
mac[3] = (dat0 >> 24) & 0xff;
mac[4] = (dat1 >> 0) & 0xff;
mac[5] = (dat1 >> 8) & 0xff;
}
struct del_port_learned_ctx {
int port;
};
/* Clear learned (non-static) entry on given port */
static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
u32 dat1, int portmap, void *ctx)
{
struct del_port_learned_ctx *del_ctx = ctx;
int port = del_ctx->port;
if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
return 0;
/* learned entries has only one port, we can just delete */
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
lan9303_alr_make_entry_raw(chip, dat0, dat1);
return 0;
}
struct port_fdb_dump_ctx {
int port;
void *data;
dsa_fdb_dump_cb_t *cb;
};
static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
u32 dat1, int portmap, void *ctx)
{
struct port_fdb_dump_ctx *dump_ctx = ctx;
u8 mac[ETH_ALEN];
bool is_static;
if ((BIT(dump_ctx->port) & portmap) == 0)
return 0;
alr_reg_to_mac(dat0, dat1, mac);
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
}
/* Set a static ALR entry. Delete entry if port_map is zero */
static void lan9303_alr_set_entry(struct lan9303 *chip, const u8 *mac,
u8 port_map, bool stp_override)
{
u32 dat0, dat1, alr_port;
dev_dbg(chip->dev, "%s(%pM, %d)\n", __func__, mac, port_map);
dat1 = LAN9303_ALR_DAT1_STATIC;
if (port_map)
dat1 |= LAN9303_ALR_DAT1_VALID;
/* otherwise no ports: delete entry */
if (stp_override)
dat1 |= LAN9303_ALR_DAT1_AGE_OVERRID;
alr_port = portmap_2_alrport[port_map & 7];
dat1 &= ~LAN9303_ALR_DAT1_PORT_MASK;
dat1 |= alr_port << LAN9303_ALR_DAT1_PORT_BITOFFS;
dat0 = 0;
dat0 |= (mac[0] << 0);
dat0 |= (mac[1] << 8);
dat0 |= (mac[2] << 16);
dat0 |= (mac[3] << 24);
dat1 |= (mac[4] << 0);
dat1 |= (mac[5] << 8);
lan9303_alr_make_entry_raw(chip, dat0, dat1);
}
/* Add port to static ALR entry, create new static entry if needed */
static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
bool stp_override)
{
struct lan9303_alr_cache_entry *entr;
mutex_lock(&chip->alr_mutex);
entr = lan9303_alr_cache_find_mac(chip, mac);
if (!entr) { /*New entry */
entr = lan9303_alr_cache_find_free(chip);
if (!entr) {
mutex_unlock(&chip->alr_mutex);
return -ENOSPC;
}
ether_addr_copy(entr->mac_addr, mac);
}
entr->port_map |= BIT(port);
entr->stp_override = stp_override;
lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
mutex_unlock(&chip->alr_mutex);
return 0;
}
/* Delete static port from ALR entry, delete entry if last port */
static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
{
struct lan9303_alr_cache_entry *entr;
mutex_lock(&chip->alr_mutex);
entr = lan9303_alr_cache_find_mac(chip, mac);
if (!entr)
goto out; /* no static entry found */
entr->port_map &= ~BIT(port);
if (entr->port_map == 0) /* zero means its free again */
eth_zero_addr(entr->mac_addr);
lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
out:
mutex_unlock(&chip->alr_mutex);
return 0;
}
static int lan9303_disable_processing_port(struct lan9303 *chip,
unsigned int port)
{
int ret;
/* disable RX, but keep register reset default values else */
ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0,
LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES);
if (ret)
return ret;
/* disable TX, but keep register reset default values else */
return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0,
LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE);
}
static int lan9303_enable_processing_port(struct lan9303 *chip,
unsigned int port)
{
int ret;
/* enable RX and keep register reset default values else */
ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0,
LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES |
LAN9303_MAC_RX_CFG_X_RX_ENABLE);
if (ret)
return ret;
/* enable TX and keep register reset default values else */
return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0,
LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE |
LAN9303_MAC_TX_CFG_X_TX_ENABLE);
}
/* forward special tagged packets from port 0 to port 1 *or* port 2 */
static int lan9303_setup_tagging(struct lan9303 *chip)
{
int ret;
u32 val;
/* enable defining the destination port via special VLAN tagging
* for port 0
*/
ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE,
LAN9303_SWE_INGRESS_PORT_TYPE_VLAN);
if (ret)
return ret;
/* tag incoming packets at port 1 and 2 on their way to port 0 to be
* able to discover their source port
*/
val = LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0;
return lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE, val);
}
/* We want a special working switch:
* - do not forward packets between port 1 and 2
* - forward everything from port 1 to port 0
* - forward everything from port 2 to port 0
*/
static int lan9303_separate_ports(struct lan9303 *chip)
{
int ret;
lan9303_alr_del_port(chip, eth_stp_addr, 0);
ret = lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 |
LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 |
LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 |
LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING |
LAN9303_SWE_PORT_MIRROR_SNIFF_ALL);
if (ret)
return ret;
/* prevent port 1 and 2 from forwarding packets by their own */
return lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 |
LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 |
LAN9303_SWE_PORT_STATE_BLOCKING_PORT2);
}
static void lan9303_bridge_ports(struct lan9303 *chip)
{
/* ports bridged: remove mirroring */
lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
LAN9303_SWE_PORT_MIRROR_DISABLED);
lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
chip->swe_port_state);
lan9303_alr_add_port(chip, eth_stp_addr, 0, true);
}
static void lan9303_handle_reset(struct lan9303 *chip)
{
if (!chip->reset_gpio)
return;
if (chip->reset_duration != 0)
msleep(chip->reset_duration);
/* release (deassert) reset and activate the device */
gpiod_set_value_cansleep(chip->reset_gpio, 0);
}
/* stop processing packets for all ports */
static int lan9303_disable_processing(struct lan9303 *chip)
{
int p;
for (p = 1; p < LAN9303_NUM_PORTS; p++) {
int ret = lan9303_disable_processing_port(chip, p);
if (ret)
return ret;
}
return 0;
}
static int lan9303_check_device(struct lan9303 *chip)
{
int ret;
u32 reg;
ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, ®);
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
return ret;
}
if (((reg >> 16) != LAN9303_CHIP_ID) &&
((reg >> 16) != LAN9354_CHIP_ID)) {
dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
/* The default state of the LAN9303 device is to forward packets between
* all ports (if not configured differently by an external EEPROM).
* The initial state of a DSA device must be forwarding packets only
* between the external and the internal ports and no forwarding
* between the external ports. In preparation we stop packet handling
* at all for now until the LAN9303 device is re-programmed accordingly.
*/
ret = lan9303_disable_processing(chip);
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
dev_err(chip->dev,
"failed to discover phy bootstrap setup: %d\n", ret);
return ret;
}
return 0;
}
/* ---------------------------- DSA -----------------------------------*/
static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_LAN9303;
}
static int lan9303_setup(struct dsa_switch *ds)
{
struct lan9303 *chip = ds->priv;
int ret;
u32 reg;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
dev_err(chip->dev, "port 0 is not the CPU port\n");
return -EINVAL;
}
/* Virtual Phy: Remove Turbo 200Mbit mode */
ret = lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ®);
if (ret)
return (ret);
/* Clear the TURBO Mode bit if it was set. */
if (reg & LAN9303_VIRT_SPECIAL_TURBO) {
reg &= ~LAN9303_VIRT_SPECIAL_TURBO;
regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, reg);
}
ret = lan9303_setup_tagging(chip);
if (ret)
dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
ret = lan9303_separate_ports(chip);
if (ret)
dev_err(chip->dev, "failed to separate ports %d\n", ret);
ret = lan9303_enable_processing_port(chip, 0);
if (ret)
dev_err(chip->dev, "failed to re-enable switching %d\n", ret);
/* Trap IGMP to port 0 */
ret = lan9303_write_switch_reg_mask(chip, LAN9303_SWE_GLB_INGRESS_CFG,
LAN9303_SWE_GLB_INGR_IGMP_TRAP |
LAN9303_SWE_GLB_INGR_IGMP_PORT(0),
LAN9303_SWE_GLB_INGR_IGMP_PORT(1) |
LAN9303_SWE_GLB_INGR_IGMP_PORT(2));
if (ret)
dev_err(chip->dev, "failed to setup IGMP trap %d\n", ret);
return 0;
}
struct lan9303_mib_desc {
unsigned int offset; /* offset of first MAC */
const char *name;
};
static const struct lan9303_mib_desc lan9303_mib[] = {
{ .offset = LAN9303_MAC_RX_BRDCST_CNT_0, .name = "RxBroad", },
{ .offset = LAN9303_MAC_RX_PAUSE_CNT_0, .name = "RxPause", },
{ .offset = LAN9303_MAC_RX_MULCST_CNT_0, .name = "RxMulti", },
{ .offset = LAN9303_MAC_RX_PKTOK_CNT_0, .name = "RxOk", },
{ .offset = LAN9303_MAC_RX_CRCERR_CNT_0, .name = "RxCrcErr", },
{ .offset = LAN9303_MAC_RX_ALIGN_CNT_0, .name = "RxAlignErr", },
{ .offset = LAN9303_MAC_RX_JABB_CNT_0, .name = "RxJabber", },
{ .offset = LAN9303_MAC_RX_FRAG_CNT_0, .name = "RxFragment", },
{ .offset = LAN9303_MAC_RX_64_CNT_0, .name = "Rx64Byte", },
{ .offset = LAN9303_MAC_RX_127_CNT_0, .name = "Rx128Byte", },
{ .offset = LAN9303_MAC_RX_255_CNT_0, .name = "Rx256Byte", },
{ .offset = LAN9303_MAC_RX_511_CNT_0, .name = "Rx512Byte", },
{ .offset = LAN9303_MAC_RX_1023_CNT_0, .name = "Rx1024Byte", },
{ .offset = LAN9303_MAC_RX_MAX_CNT_0, .name = "RxMaxByte", },
{ .offset = LAN9303_MAC_RX_PKTLEN_CNT_0, .name = "RxByteCnt", },
{ .offset = LAN9303_MAC_RX_SYMBL_CNT_0, .name = "RxSymbolCnt", },
{ .offset = LAN9303_MAC_RX_CTLFRM_CNT_0, .name = "RxCfs", },
{ .offset = LAN9303_MAC_RX_OVRSZE_CNT_0, .name = "RxOverFlow", },
{ .offset = LAN9303_MAC_TX_UNDSZE_CNT_0, .name = "TxShort", },
{ .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
{ .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
{ .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
{ .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", },
{ .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
{ .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
{ .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
{ .offset = LAN9303_MAC_TX_511_CNT_0, .name = "Tx512Byte", },
{ .offset = LAN9303_MAC_TX_1023_CNT_0, .name = "Tx1024Byte", },
{ .offset = LAN9303_MAC_TX_MAX_CNT_0, .name = "TxMaxByte", },
{ .offset = LAN9303_MAC_TX_PKTLEN_CNT_0, .name = "TxByteCnt", },
{ .offset = LAN9303_MAC_TX_PKTOK_CNT_0, .name = "TxOk", },
{ .offset = LAN9303_MAC_TX_TOTALCOL_CNT_0, .name = "TxCollision", },
{ .offset = LAN9303_MAC_TX_MULTICOL_CNT_0, .name = "TxMultiCol", },
{ .offset = LAN9303_MAC_TX_SNGLECOL_CNT_0, .name = "TxSingleCol", },
{ .offset = LAN9303_MAC_TX_EXCOL_CNT_0, .name = "TxExcCol", },
{ .offset = LAN9303_MAC_TX_DEFER_CNT_0, .name = "TxDefer", },
{ .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
};
static void lan9303_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
unsigned int u;
if (stringset != ETH_SS_STATS)
return;
for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
ETH_GSTRING_LEN);
}
}
static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct lan9303 *chip = ds->priv;
unsigned int u;
for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
u32 reg;
int ret;
ret = lan9303_read_switch_port(
chip, port, lan9303_mib[u].offset, ®);
if (ret) {
dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
port, lan9303_mib[u].offset);
reg = 0;
}
data[u] = reg;
}
}
static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return 0;
return ARRAY_SIZE(lan9303_mib);
}
static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
if (phy == phy_base)
return lan9303_virt_phy_reg_read(chip, regnum);
if (phy > phy_base + 2)
return -ENODEV;
return chip->ops->phy_read(chip, phy, regnum);
}
static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
if (phy == phy_base)
return lan9303_virt_phy_reg_write(chip, regnum, val);
if (phy > phy_base + 2)
return -ENODEV;
return chip->ops->phy_write(chip, phy, regnum, val);
}
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
if (!dsa_port_is_user(dp))
return 0;
vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
if (!dsa_port_is_user(dp))
return;
vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) {
lan9303_bridge_ports(chip);
chip->is_bridged = true; /* unleash stp_state_set() */
}
return 0;
}
static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
if (chip->is_bridged) {
lan9303_separate_ports(chip);
chip->is_bridged = false;
}
}
static void lan9303_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
int portmask, portstate;
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(port %d, state %d)\n",
__func__, port, state);
switch (state) {
case BR_STATE_DISABLED:
portstate = LAN9303_SWE_PORT_STATE_DISABLED_PORT0;
break;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
portstate = LAN9303_SWE_PORT_STATE_BLOCKING_PORT0;
break;
case BR_STATE_LEARNING:
portstate = LAN9303_SWE_PORT_STATE_LEARNING_PORT0;
break;
case BR_STATE_FORWARDING:
portstate = LAN9303_SWE_PORT_STATE_FORWARDING_PORT0;
break;
default:
portstate = LAN9303_SWE_PORT_STATE_DISABLED_PORT0;
dev_err(chip->dev, "unknown stp state: port %d, state %d\n",
port, state);
}
portmask = 0x3 << (port * 2);
portstate <<= (port * 2);
chip->swe_port_state = (chip->swe_port_state & ~portmask) | portstate;
if (chip->is_bridged)
lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
chip->swe_port_state);
/* else: touching SWE_PORT_STATE would break port separation */
}
static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
{
struct lan9303 *chip = ds->priv;
struct del_port_learned_ctx del_ctx = {
.port = port,
};
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
lan9303_alr_loop(chip, alr_loop_cb_del_port_learned, &del_ctx);
}
static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
return lan9303_alr_add_port(chip, addr, port, false);
}
static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
lan9303_alr_del_port(chip, addr, port);
return 0;
}
static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct lan9303 *chip = ds->priv;
struct port_fdb_dump_ctx dump_ctx = {
.port = port,
.data = data,
.cb = cb,
};
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
}
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
mdb->vid);
if (mdb->vid)
return -EOPNOTSUPP;
if (lan9303_alr_cache_find_mac(chip, mdb->addr))
return 0;
if (!lan9303_alr_cache_find_free(chip))
return -ENOSPC;
return 0;
}
static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
int err;
err = lan9303_port_mdb_prepare(ds, port, mdb);
if (err)
return err;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
mdb->vid);
return lan9303_alr_add_port(chip, mdb->addr, port, false);
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
mdb->vid);
if (mdb->vid)
return -EOPNOTSUPP;
lan9303_alr_del_port(chip, mdb->addr, port);
return 0;
}
static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d) entered.", __func__, port);
config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
MAC_SYM_PAUSE;
if (port == 0) {
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
} else {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
/* Compatibility for phylib's default interface type when the
* phy-mode property is absent
*/
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
}
}
static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev, int speed,
int duplex, bool tx_pause,
bool rx_pause)
{
struct lan9303 *chip = ds->priv;
u32 ctl;
u32 reg;
/* On this device, we are only interested in doing something here if
* this is the xMII port. All other ports are 10/100 phys using MDIO
* to control there link settings.
*/
if (!IS_PORT_XMII(port))
return;
/* Disable auto-negotiation and force the speed/duplex settings. */
ctl = lan9303_phy_read(ds, port, MII_BMCR);
ctl &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
if (speed == SPEED_100)
ctl |= BMCR_SPEED100;
if (duplex == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
lan9303_phy_write(ds, port, MII_BMCR, ctl);
/* Force the flow control settings. */
lan9303_read(chip->regmap, flow_ctl_reg[port], ®);
reg &= ~(LAN9303_BP_EN | LAN9303_RX_FC_EN | LAN9303_TX_FC_EN);
if (rx_pause)
reg |= (LAN9303_RX_FC_EN | LAN9303_BP_EN);
if (tx_pause)
reg |= LAN9303_TX_FC_EN;
regmap_write(chip->regmap, flow_ctl_reg[port], reg);
}
static const struct dsa_switch_ops lan9303_switch_ops = {
.get_tag_protocol = lan9303_get_tag_protocol,
.setup = lan9303_setup,
.get_strings = lan9303_get_strings,
.phy_read = lan9303_phy_read,
.phy_write = lan9303_phy_write,
.phylink_get_caps = lan9303_phylink_get_caps,
.phylink_mac_link_up = lan9303_phylink_mac_link_up,
.get_ethtool_stats = lan9303_get_ethtool_stats,
.get_sset_count = lan9303_get_sset_count,
.port_enable = lan9303_port_enable,
.port_disable = lan9303_port_disable,
.port_bridge_join = lan9303_port_bridge_join,
.port_bridge_leave = lan9303_port_bridge_leave,
.port_stp_state_set = lan9303_port_stp_state_set,
.port_fast_age = lan9303_port_fast_age,
.port_fdb_add = lan9303_port_fdb_add,
.port_fdb_del = lan9303_port_fdb_del,
.port_fdb_dump = lan9303_port_fdb_dump,
.port_mdb_add = lan9303_port_mdb_add,
.port_mdb_del = lan9303_port_mdb_del,
};
static int lan9303_register_switch(struct lan9303 *chip)
{
int base;
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
chip->ds->dev = chip->dev;
chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
base = chip->phy_addr_base;
chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
return dsa_register_switch(chip->ds);
}
static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
if (!chip->reset_gpio) {
dev_dbg(chip->dev, "No reset GPIO defined\n");
return 0;
}
chip->reset_duration = 200;
if (np) {
of_property_read_u32(np, "reset-duration",
&chip->reset_duration);
} else {
dev_dbg(chip->dev, "reset duration defaults to 200 ms\n");
}
/* A sane reset duration should not be longer than 1s */
if (chip->reset_duration > 1000)
chip->reset_duration = 1000;
return 0;
}
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
ret = lan9303_probe_reset_gpio(chip, np);
if (ret)
return ret;
lan9303_handle_reset(chip);
/* First read to the device. This is a Dummy read to ensure MDIO */
/* access is in 32-bit sync. */
ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, ®);
if (ret) {
dev_err(chip->dev, "failed to access the device: %d\n",
ret);
if (!chip->reset_gpio) {
dev_dbg(chip->dev,
"hint: maybe failed due to missing reset GPIO\n");
}
return ret;
}
ret = lan9303_check_device(chip);
if (ret)
return ret;
ret = lan9303_register_switch(chip);
if (ret) {
dev_dbg(chip->dev, "Failed to register switch: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(lan9303_probe);
int lan9303_remove(struct lan9303 *chip)
{
int rc;
rc = lan9303_disable_processing(chip);
if (rc != 0)
dev_warn(chip->dev, "shutting down failed\n");
dsa_unregister_switch(chip->ds);
/* assert reset to the whole device to prevent it from doing anything */
gpiod_set_value_cansleep(chip->reset_gpio, 1);
return 0;
}
EXPORT_SYMBOL(lan9303_remove);
void lan9303_shutdown(struct lan9303 *chip)
{
dsa_switch_shutdown(chip->ds);
}
EXPORT_SYMBOL(lan9303_shutdown);
MODULE_AUTHOR("Juergen Borleis <[email protected]>");
MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/lan9303-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Pengutronix, Juergen Borleis <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include "lan9303.h"
struct lan9303_i2c {
struct i2c_client *device;
struct lan9303 chip;
};
static const struct regmap_config lan9303_i2c_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 1,
.can_multi_write = true,
.max_register = 0x0ff, /* address bits 0..1 are not used */
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.volatile_table = &lan9303_register_set,
.wr_table = &lan9303_register_set,
.rd_table = &lan9303_register_set,
.cache_type = REGCACHE_NONE,
};
static int lan9303_i2c_probe(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev;
int ret;
sw_dev = devm_kzalloc(&client->dev, sizeof(struct lan9303_i2c),
GFP_KERNEL);
if (!sw_dev)
return -ENOMEM;
sw_dev->chip.regmap = devm_regmap_init_i2c(client,
&lan9303_i2c_regmap_config);
if (IS_ERR(sw_dev->chip.regmap)) {
ret = PTR_ERR(sw_dev->chip.regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
/* link forward and backward */
sw_dev->device = client;
i2c_set_clientdata(client, sw_dev);
sw_dev->chip.dev = &client->dev;
sw_dev->chip.ops = &lan9303_indirect_phy_ops;
ret = lan9303_probe(&sw_dev->chip, client->dev.of_node);
if (ret != 0)
return ret;
dev_info(&client->dev, "LAN9303 I2C driver loaded successfully\n");
return 0;
}
static void lan9303_i2c_remove(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
return;
lan9303_remove(&sw_dev->chip);
}
static void lan9303_i2c_shutdown(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
return;
lan9303_shutdown(&sw_dev->chip);
i2c_set_clientdata(client, NULL);
}
/*-------------------------------------------------------------------------*/
static const struct i2c_device_id lan9303_i2c_id[] = {
{ "lan9303", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
static const struct of_device_id lan9303_i2c_of_match[] = {
{ .compatible = "smsc,lan9303-i2c", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match);
static struct i2c_driver lan9303_i2c_driver = {
.driver = {
.name = "LAN9303_I2C",
.of_match_table = lan9303_i2c_of_match,
},
.probe = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
.shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
};
module_i2c_driver(lan9303_i2c_driver);
MODULE_AUTHOR("Juergen Borleis <[email protected]>");
MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in I2C managed mode");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/lan9303_i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/phy.h>
#include <net/dsa.h>
#include "dsa_loop.h"
static struct dsa_loop_pdata dsa_loop_pdata = {
.cd = {
.port_names[0] = "lan1",
.port_names[1] = "lan2",
.port_names[2] = "lan3",
.port_names[3] = "lan4",
.port_names[DSA_LOOP_CPU_PORT] = "cpu",
},
.name = "DSA mockup driver",
.enabled_ports = 0x1f,
.netdev = "eth0",
};
static const struct mdio_board_info bdinfo = {
.bus_id = "fixed-0",
.modalias = "dsa-loop",
.mdio_addr = 31,
.platform_data = &dsa_loop_pdata,
};
static int __init dsa_loop_bdinfo_init(void)
{
return mdiobus_register_board_info(&bdinfo, 1);
}
arch_initcall(dsa_loop_bdinfo_init)
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/dsa_loop_bdinfo.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
* Copyright (c) 2008-2009 Marvell Semiconductor
*/
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <net/dsa.h>
#include "mv88e6060.h"
static int reg_read(struct mv88e6060_priv *priv, int addr, int reg)
{
return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
}
static int reg_write(struct mv88e6060_priv *priv, int addr, int reg, u16 val)
{
return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
}
static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
{
int ret;
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
if (ret == PORT_SWITCH_ID_6060)
return "Marvell 88E6060 (A0)";
if (ret == PORT_SWITCH_ID_6060_R1 ||
ret == PORT_SWITCH_ID_6060_R2)
return "Marvell 88E6060 (B0)";
if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
return "Marvell 88E6060";
}
return NULL;
}
static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol m)
{
return DSA_TAG_PROTO_TRAILER;
}
static int mv88e6060_switch_reset(struct mv88e6060_priv *priv)
{
int i;
int ret;
unsigned long timeout;
/* Set all ports to the disabled state. */
for (i = 0; i < MV88E6060_PORTS; i++) {
ret = reg_read(priv, REG_PORT(i), PORT_CONTROL);
if (ret < 0)
return ret;
ret = reg_write(priv, REG_PORT(i), PORT_CONTROL,
ret & ~PORT_CONTROL_STATE_MASK);
if (ret)
return ret;
}
/* Wait for transmit queues to drain. */
usleep_range(2000, 4000);
/* Reset the switch. */
ret = reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_SWRESET |
GLOBAL_ATU_CONTROL_LEARNDIS);
if (ret)
return ret;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
ret = reg_read(priv, REG_GLOBAL, GLOBAL_STATUS);
if (ret < 0)
return ret;
if (ret & GLOBAL_STATUS_INIT_READY)
break;
usleep_range(1000, 2000);
}
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
}
static int mv88e6060_setup_global(struct mv88e6060_priv *priv)
{
int ret;
/* Disable discarding of frames with excessive collisions,
* set the maximum frame size to 1536 bytes, and mask all
* interrupt sources.
*/
ret = reg_write(priv, REG_GLOBAL, GLOBAL_CONTROL,
GLOBAL_CONTROL_MAX_FRAME_1536);
if (ret)
return ret;
/* Disable automatic address learning.
*/
return reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_LEARNDIS);
}
static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
{
int addr = REG_PORT(p);
int ret;
if (dsa_is_unused_port(priv->ds, p))
return 0;
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
* port, enable Ingress and Egress Trailer tagging mode.
*/
ret = reg_write(priv, addr, PORT_CONTROL,
dsa_is_cpu_port(priv->ds, p) ?
PORT_CONTROL_TRAILER |
PORT_CONTROL_INGRESS_MODE |
PORT_CONTROL_STATE_FORWARDING :
PORT_CONTROL_STATE_FORWARDING);
if (ret)
return ret;
/* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the CPU port.
*/
ret = reg_write(priv, addr, PORT_VLAN_MAP,
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
(dsa_is_cpu_port(priv->ds, p) ?
dsa_user_ports(priv->ds) :
BIT(dsa_to_port(priv->ds, p)->cpu_dp->index)));
if (ret)
return ret;
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p));
}
static int mv88e6060_setup_addr(struct mv88e6060_priv *priv)
{
u8 addr[ETH_ALEN];
int ret;
u16 val;
eth_random_addr(addr);
val = addr[0] << 8 | addr[1];
/* The multicast bit is always transmitted as a zero, so the switch uses
* bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
*/
val &= 0xfeff;
ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_01, val);
if (ret)
return ret;
ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_23,
(addr[2] << 8) | addr[3]);
if (ret)
return ret;
return reg_write(priv, REG_GLOBAL, GLOBAL_MAC_45,
(addr[4] << 8) | addr[5]);
}
static int mv88e6060_setup(struct dsa_switch *ds)
{
struct mv88e6060_priv *priv = ds->priv;
int ret;
int i;
priv->ds = ds;
ret = mv88e6060_switch_reset(priv);
if (ret < 0)
return ret;
/* @@@ initialise atu */
ret = mv88e6060_setup_global(priv);
if (ret < 0)
return ret;
ret = mv88e6060_setup_addr(priv);
if (ret < 0)
return ret;
for (i = 0; i < MV88E6060_PORTS; i++) {
ret = mv88e6060_setup_port(priv, i);
if (ret < 0)
return ret;
}
return 0;
}
static int mv88e6060_port_to_phy_addr(int port)
{
if (port >= 0 && port < MV88E6060_PORTS)
return port;
return -1;
}
static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct mv88e6060_priv *priv = ds->priv;
int addr;
addr = mv88e6060_port_to_phy_addr(port);
if (addr == -1)
return 0xffff;
return reg_read(priv, addr, regnum);
}
static int
mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
{
struct mv88e6060_priv *priv = ds->priv;
int addr;
addr = mv88e6060_port_to_phy_addr(port);
if (addr == -1)
return 0xffff;
return reg_write(priv, addr, regnum, val);
}
static void mv88e6060_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
unsigned long *interfaces = config->supported_interfaces;
struct mv88e6060_priv *priv = ds->priv;
int addr = REG_PORT(port);
int ret;
ret = reg_read(priv, addr, PORT_STATUS);
if (ret < 0) {
dev_err(ds->dev,
"port %d: unable to read status register: %pe\n",
port, ERR_PTR(ret));
return;
}
/* If the port is configured in SNI mode (acts as a 10Mbps PHY),
* it should have phy-mode = "sni", but that doesn't yet exist, so
* forcibly fail validation until the need arises to introduce it.
*/
if (!(ret & PORT_STATUS_PORTMODE)) {
dev_warn(ds->dev, "port %d: SNI mode not supported\n", port);
return;
}
config->mac_capabilities = MAC_100 | MAC_10 | MAC_SYM_PAUSE;
if (port >= 4) {
/* Ports 4 and 5 can support MII, REVMII and REVRMII modes */
__set_bit(PHY_INTERFACE_MODE_MII, interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
__set_bit(PHY_INTERFACE_MODE_REVRMII, interfaces);
}
if (port <= 4) {
/* Ports 0 to 3 have internal PHYs, and port 4 can optionally
* use an internal PHY.
*/
/* Internal PHY */
__set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
/* Default phylib interface mode */
__set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
}
}
static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
.setup = mv88e6060_setup,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
.phylink_get_caps = mv88e6060_phylink_get_caps,
};
static int mv88e6060_probe(struct mdio_device *mdiodev)
{
struct device *dev = &mdiodev->dev;
struct mv88e6060_priv *priv;
struct dsa_switch *ds;
const char *name;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = mdiodev->bus;
priv->sw_addr = mdiodev->addr;
name = mv88e6060_get_name(priv->bus, priv->sw_addr);
if (!name)
return -ENODEV;
dev_info(dev, "switch %s detected\n", name);
ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = dev;
ds->num_ports = MV88E6060_PORTS;
ds->priv = priv;
ds->dev = dev;
ds->ops = &mv88e6060_switch_ops;
dev_set_drvdata(dev, ds);
return dsa_register_switch(ds);
}
static void mv88e6060_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
if (!ds)
return;
dsa_unregister_switch(ds);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
if (!ds)
return;
dsa_switch_shutdown(ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6060_of_match[] = {
{
.compatible = "marvell,mv88e6060",
},
{ /* sentinel */ },
};
static struct mdio_driver mv88e6060_driver = {
.probe = mv88e6060_probe,
.remove = mv88e6060_remove,
.shutdown = mv88e6060_shutdown,
.mdiodrv.driver = {
.name = "mv88e6060",
.of_match_table = mv88e6060_of_match,
},
};
mdio_module_driver(mv88e6060_driver);
MODULE_AUTHOR("Lennert Buytenhek <[email protected]>");
MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mv88e6060");
| linux-master | drivers/net/dsa/mv88e6060.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Schneider-Electric
*
* Clément Léger <[email protected]>
*/
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <net/dsa.h>
#include "rzn1_a5psw.h"
struct a5psw_stats {
u16 offset;
const char name[ETH_GSTRING_LEN];
};
#define STAT_DESC(_offset) { \
.offset = A5PSW_##_offset, \
.name = __stringify(_offset), \
}
static const struct a5psw_stats a5psw_stats[] = {
STAT_DESC(aFramesTransmittedOK),
STAT_DESC(aFramesReceivedOK),
STAT_DESC(aFrameCheckSequenceErrors),
STAT_DESC(aAlignmentErrors),
STAT_DESC(aOctetsTransmittedOK),
STAT_DESC(aOctetsReceivedOK),
STAT_DESC(aTxPAUSEMACCtrlFrames),
STAT_DESC(aRxPAUSEMACCtrlFrames),
STAT_DESC(ifInErrors),
STAT_DESC(ifOutErrors),
STAT_DESC(ifInUcastPkts),
STAT_DESC(ifInMulticastPkts),
STAT_DESC(ifInBroadcastPkts),
STAT_DESC(ifOutDiscards),
STAT_DESC(ifOutUcastPkts),
STAT_DESC(ifOutMulticastPkts),
STAT_DESC(ifOutBroadcastPkts),
STAT_DESC(etherStatsDropEvents),
STAT_DESC(etherStatsOctets),
STAT_DESC(etherStatsPkts),
STAT_DESC(etherStatsUndersizePkts),
STAT_DESC(etherStatsOversizePkts),
STAT_DESC(etherStatsPkts64Octets),
STAT_DESC(etherStatsPkts65to127Octets),
STAT_DESC(etherStatsPkts128to255Octets),
STAT_DESC(etherStatsPkts256to511Octets),
STAT_DESC(etherStatsPkts1024to1518Octets),
STAT_DESC(etherStatsPkts1519toXOctets),
STAT_DESC(etherStatsJabbers),
STAT_DESC(etherStatsFragments),
STAT_DESC(VLANReceived),
STAT_DESC(VLANTransmitted),
STAT_DESC(aDeferred),
STAT_DESC(aMultipleCollisions),
STAT_DESC(aSingleCollisions),
STAT_DESC(aLateCollisions),
STAT_DESC(aExcessiveCollisions),
STAT_DESC(aCarrierSenseErrors),
};
static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
{
writel(value, a5psw->base + offset);
}
static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
{
return readl(a5psw->base + offset);
}
static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
{
u32 reg;
spin_lock(&a5psw->reg_lock);
reg = a5psw_reg_readl(a5psw, offset);
reg &= ~mask;
reg |= val;
a5psw_reg_writel(a5psw, offset, reg);
spin_unlock(&a5psw->reg_lock);
}
static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_RZN1_A5PSW;
}
static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
bool enable)
{
u32 rx_match = 0;
if (enable)
rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
}
static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
{
/* Enable "management forward" pattern matching, this will forward
* packets from this port only towards the management port and thus
* isolate the port.
*/
a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
}
static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
{
u32 mask = A5PSW_PORT_ENA_TX(port);
u32 reg = enable ? mask : 0;
/* Even though the port TX is disabled through TXENA bit in the
* PORT_ENA register, it can still send BPDUs. This depends on the tag
* configuration added when sending packets from the CPU port to the
* switch port. Indeed, when using forced forwarding without filtering,
* even disabled ports will be able to send packets that are tagged.
* This allows to implement STP support when ports are in a state where
* forwarding traffic should be stopped but BPDUs should still be sent.
*/
a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
}
static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
{
u32 port_ena = 0;
if (enable)
port_ena |= A5PSW_PORT_ENA_TX_RX(port);
a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
port_ena);
}
static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
{
int ret;
a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
!(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
if (ret)
dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
return ret;
}
static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
{
u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
mutex_lock(&a5psw->lk_lock);
a5psw_lk_execute_ctrl(a5psw, &ctrl);
mutex_unlock(&a5psw->lk_lock);
}
static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
bool authorize)
{
u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
if (authorize)
reg |= A5PSW_AUTH_PORT_AUTHORIZED;
else
reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
}
static void a5psw_port_disable(struct dsa_switch *ds, int port)
{
struct a5psw *a5psw = ds->priv;
a5psw_port_authorize_set(a5psw, port, false);
a5psw_port_enable_set(a5psw, port, false);
}
static int a5psw_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct a5psw *a5psw = ds->priv;
a5psw_port_authorize_set(a5psw, port, true);
a5psw_port_enable_set(a5psw, port, true);
return 0;
}
static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct a5psw *a5psw = ds->priv;
new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
return 0;
}
static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
{
return A5PSW_MAX_MTU;
}
static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
unsigned long *intf = config->supported_interfaces;
config->mac_capabilities = MAC_1000FD;
if (dsa_is_cpu_port(ds, port)) {
/* GMII is used internally and GMAC2 is connected to the switch
* using 1000Mbps Full-Duplex mode only (cf ethernet manual)
*/
__set_bit(PHY_INTERFACE_MODE_GMII, intf);
} else {
config->mac_capabilities |= MAC_100 | MAC_10;
phy_interface_set_rgmii(intf);
__set_bit(PHY_INTERFACE_MODE_RMII, intf);
__set_bit(PHY_INTERFACE_MODE_MII, intf);
}
}
static struct phylink_pcs *
a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct a5psw *a5psw = ds->priv;
if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
return a5psw->pcs[port];
return NULL;
}
static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct a5psw *a5psw = ds->priv;
u32 cmd_cfg;
cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
}
static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev, int speed,
int duplex, bool tx_pause, bool rx_pause)
{
u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
A5PSW_CMD_CFG_TX_CRC_APPEND;
struct a5psw *a5psw = ds->priv;
if (speed == SPEED_1000)
cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
if (duplex == DUPLEX_HALF)
cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
if (!rx_pause)
cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
}
static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct a5psw *a5psw = ds->priv;
unsigned long rate;
u64 max, tmp;
u32 agetime;
rate = clk_get_rate(a5psw->clk);
max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
rate) * 1000;
if (msecs > max)
return -EINVAL;
tmp = div_u64(rate, MSEC_PER_SEC);
agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
return 0;
}
static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
{
u32 mask = A5PSW_INPUT_LEARN_DIS(port);
u32 reg = !learn ? mask : 0;
a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
}
static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
{
u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
u32 reg = block ? mask : 0;
a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
}
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
A5PSW_MCAST_DEF_MASK};
int i;
for (i = 0; i < ARRAY_SIZE(offsets); i++)
a5psw_reg_rmw(a5psw, offsets[i], BIT(port),
set ? BIT(port) : 0);
}
static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
bool standalone)
{
a5psw_port_learning_set(a5psw, port, !standalone);
a5psw_flooding_set_resolution(a5psw, port, !standalone);
a5psw_port_mgmtfwd_set(a5psw, port, standalone);
}
static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct a5psw *a5psw = ds->priv;
/* We only support 1 bridge device */
if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Forwarding offload supported for a single bridge");
return -EOPNOTSUPP;
}
a5psw->br_dev = bridge.dev;
a5psw_port_set_standalone(a5psw, port, false);
a5psw->bridged_ports |= BIT(port);
return 0;
}
static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct a5psw *a5psw = ds->priv;
a5psw->bridged_ports &= ~BIT(port);
a5psw_port_set_standalone(a5psw, port, true);
/* No more ports bridged */
if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
a5psw->br_dev = NULL;
}
static int a5psw_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD))
return -EINVAL;
return 0;
}
static int
a5psw_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct a5psw *a5psw = ds->priv;
u32 val;
/* If a port is set as standalone, we do not want to be able to
* configure flooding nor learning which would result in joining the
* unique bridge. This can happen when a port leaves the bridge, in
* which case the DSA core will try to "clear" all flags for the
* standalone port (ie enable flooding, disable learning). In that case
* do not fail but do not apply the flags.
*/
if (!(a5psw->bridged_ports & BIT(port)))
return 0;
if (flags.mask & BR_LEARNING) {
val = flags.val & BR_LEARNING ? 0 : A5PSW_INPUT_LEARN_DIS(port);
a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN,
A5PSW_INPUT_LEARN_DIS(port), val);
}
if (flags.mask & BR_FLOOD) {
val = flags.val & BR_FLOOD ? BIT(port) : 0;
a5psw_reg_rmw(a5psw, A5PSW_UCAST_DEF_MASK, BIT(port), val);
}
if (flags.mask & BR_MCAST_FLOOD) {
val = flags.val & BR_MCAST_FLOOD ? BIT(port) : 0;
a5psw_reg_rmw(a5psw, A5PSW_MCAST_DEF_MASK, BIT(port), val);
}
if (flags.mask & BR_BCAST_FLOOD) {
val = flags.val & BR_BCAST_FLOOD ? BIT(port) : 0;
a5psw_reg_rmw(a5psw, A5PSW_BCAST_DEF_MASK, BIT(port), val);
}
return 0;
}
static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
bool learning_enabled, rx_enabled, tx_enabled;
struct dsa_port *dp = dsa_to_port(ds, port);
struct a5psw *a5psw = ds->priv;
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
rx_enabled = false;
tx_enabled = false;
learning_enabled = false;
break;
case BR_STATE_LEARNING:
rx_enabled = false;
tx_enabled = false;
learning_enabled = dp->learning;
break;
case BR_STATE_FORWARDING:
rx_enabled = true;
tx_enabled = true;
learning_enabled = dp->learning;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
return;
}
a5psw_port_learning_set(a5psw, port, learning_enabled);
a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
a5psw_port_tx_enable(a5psw, port, tx_enabled);
}
static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
{
struct a5psw *a5psw = ds->priv;
a5psw_port_fdb_flush(a5psw, port);
}
static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
u16 *entry)
{
u32 ctrl;
int ret;
a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
if (ret)
return ret;
*entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
return 0;
}
static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct a5psw *a5psw = ds->priv;
union lk_data lk_data = {0};
bool inc_learncount = false;
int ret = 0;
u16 entry;
u32 reg;
ether_addr_copy(lk_data.entry.mac, addr);
lk_data.entry.port_mask = BIT(port);
mutex_lock(&a5psw->lk_lock);
/* Set the value to be written in the lookup table */
ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
if (ret)
goto lk_unlock;
lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
if (!lk_data.entry.valid) {
inc_learncount = true;
/* port_mask set to 0x1f when entry is not valid, clear it */
lk_data.entry.port_mask = 0;
lk_data.entry.prio = 0;
}
lk_data.entry.port_mask |= BIT(port);
lk_data.entry.is_static = 1;
lk_data.entry.valid = 1;
a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
ret = a5psw_lk_execute_ctrl(a5psw, ®);
if (ret)
goto lk_unlock;
if (inc_learncount) {
reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
}
lk_unlock:
mutex_unlock(&a5psw->lk_lock);
return ret;
}
static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct a5psw *a5psw = ds->priv;
union lk_data lk_data = {0};
bool clear = false;
u16 entry;
u32 reg;
int ret;
ether_addr_copy(lk_data.entry.mac, addr);
mutex_lock(&a5psw->lk_lock);
ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
if (ret)
goto lk_unlock;
lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
/* Our hardware does not associate any VID to the FDB entries so this
* means that if two entries were added for the same mac but for
* different VID, then, on the deletion of the first one, we would also
* delete the second one. Since there is unfortunately nothing we can do
* about that, do not return an error...
*/
if (!lk_data.entry.valid)
goto lk_unlock;
lk_data.entry.port_mask &= ~BIT(port);
/* If there is no more port in the mask, clear the entry */
if (lk_data.entry.port_mask == 0)
clear = true;
a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
reg = entry;
if (clear)
reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
else
reg |= A5PSW_LK_ADDR_CTRL_WRITE;
ret = a5psw_lk_execute_ctrl(a5psw, ®);
if (ret)
goto lk_unlock;
/* Decrement LEARNCOUNT */
if (clear) {
reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
}
lk_unlock:
mutex_unlock(&a5psw->lk_lock);
return ret;
}
static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct a5psw *a5psw = ds->priv;
union lk_data lk_data;
int i = 0, ret = 0;
u32 reg;
mutex_lock(&a5psw->lk_lock);
for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
ret = a5psw_lk_execute_ctrl(a5psw, ®);
if (ret)
goto out_unlock;
lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
/* If entry is not valid or does not contain the port, skip */
if (!lk_data.entry.valid ||
!(lk_data.entry.port_mask & BIT(port)))
continue;
lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
if (ret)
goto out_unlock;
}
out_unlock:
mutex_unlock(&a5psw->lk_lock);
return ret;
}
static int a5psw_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
u32 mask = BIT(port + A5PSW_VLAN_VERI_SHIFT) |
BIT(port + A5PSW_VLAN_DISC_SHIFT);
u32 val = vlan_filtering ? mask : 0;
struct a5psw *a5psw = ds->priv;
/* Disable/enable vlan tagging */
a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE_ENA, BIT(port),
vlan_filtering ? BIT(port) : 0);
/* Disable/enable vlan input filtering */
a5psw_reg_rmw(a5psw, A5PSW_VLAN_VERIFY, mask, val);
return 0;
}
static int a5psw_find_vlan_entry(struct a5psw *a5psw, u16 vid)
{
u32 vlan_res;
int i;
/* Find vlan for this port */
for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
if (FIELD_GET(A5PSW_VLAN_RES_VLANID, vlan_res) == vid)
return i;
}
return -1;
}
static int a5psw_new_vlan_res_entry(struct a5psw *a5psw, u16 newvid)
{
u32 vlan_res;
int i;
/* Find a free VLAN entry */
for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
if (!(FIELD_GET(A5PSW_VLAN_RES_PORTMASK, vlan_res))) {
vlan_res = FIELD_PREP(A5PSW_VLAN_RES_VLANID, newvid);
a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(i), vlan_res);
return i;
}
}
return -1;
}
static void a5psw_port_vlan_tagged_cfg(struct a5psw *a5psw,
unsigned int vlan_res_id, int port,
bool set)
{
u32 mask = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_RD_TAGMASK |
BIT(port);
u32 vlan_res_off = A5PSW_VLAN_RES(vlan_res_id);
u32 val = A5PSW_VLAN_RES_WR_TAGMASK, reg;
if (set)
val |= BIT(port);
/* Toggle tag mask read */
a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
reg = a5psw_reg_readl(a5psw, vlan_res_off);
a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
reg &= ~mask;
reg |= val;
a5psw_reg_writel(a5psw, vlan_res_off, reg);
}
static void a5psw_port_vlan_cfg(struct a5psw *a5psw, unsigned int vlan_res_id,
int port, bool set)
{
u32 mask = A5PSW_VLAN_RES_WR_TAGMASK | BIT(port);
u32 reg = A5PSW_VLAN_RES_WR_PORTMASK;
if (set)
reg |= BIT(port);
a5psw_reg_rmw(a5psw, A5PSW_VLAN_RES(vlan_res_id), mask, reg);
}
static int a5psw_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool tagged = !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct a5psw *a5psw = ds->priv;
u16 vid = vlan->vid;
int vlan_res_id;
vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
if (vlan_res_id < 0) {
vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid);
if (vlan_res_id < 0)
return -ENOSPC;
}
a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, true);
if (tagged)
a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, true);
/* Configure port to tag with corresponding VID, but do not enable it
* yet: wait for vlan filtering to be enabled to enable vlan port
* tagging
*/
if (pvid)
a5psw_reg_writel(a5psw, A5PSW_SYSTEM_TAGINFO(port), vid);
return 0;
}
static int a5psw_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct a5psw *a5psw = ds->priv;
u16 vid = vlan->vid;
int vlan_res_id;
vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
if (vlan_res_id < 0)
return -EINVAL;
a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, false);
a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, false);
return 0;
}
static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
{
u32 reg_lo, reg_hi;
reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
/* A5PSW_STATS_HIWORD is latched on stat read */
reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
return ((u64)reg_hi << 32) | reg_lo;
}
static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
unsigned int u;
if (stringset != ETH_SS_STATS)
return;
for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
ETH_GSTRING_LEN);
}
}
static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct a5psw *a5psw = ds->priv;
unsigned int u;
for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
}
static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return 0;
return ARRAY_SIZE(a5psw_stats);
}
static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_mac_stats *mac_stats)
{
struct a5psw *a5psw = ds->priv;
#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
mac_stats->AlignmentErrors = RD(aAlignmentErrors);
mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
mac_stats->LateCollisions = RD(aLateCollisions);
mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
#undef RD
}
static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
{ 0, 64 },
{ 65, 127 },
{ 128, 255 },
{ 256, 511 },
{ 512, 1023 },
{ 1024, 1518 },
{ 1519, A5PSW_MAX_MTU },
{}
};
static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
struct a5psw *a5psw = ds->priv;
#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
rmon_stats->fragments = RD(etherStatsFragments);
rmon_stats->jabbers = RD(etherStatsJabbers);
rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
#undef RD
*ranges = a5psw_rmon_ranges;
}
static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
struct a5psw *a5psw = ds->priv;
u64 stat;
stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
ctrl_stats->MACControlFramesTransmitted = stat;
stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
ctrl_stats->MACControlFramesReceived = stat;
}
static void a5psw_vlan_setup(struct a5psw *a5psw, int port)
{
u32 reg;
/* Enable TAG always mode for the port, this is actually controlled
* by VLAN_IN_MODE_ENA field which will be used for PVID insertion
*/
reg = A5PSW_VLAN_IN_MODE_TAG_ALWAYS;
reg <<= A5PSW_VLAN_IN_MODE_PORT_SHIFT(port);
a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE, A5PSW_VLAN_IN_MODE_PORT(port),
reg);
/* Set transparent mode for output frame manipulation, this will depend
* on the VLAN_RES configuration mode
*/
reg = A5PSW_VLAN_OUT_MODE_TRANSPARENT;
reg <<= A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port);
a5psw_reg_rmw(a5psw, A5PSW_VLAN_OUT_MODE,
A5PSW_VLAN_OUT_MODE_PORT(port), reg);
}
static int a5psw_setup(struct dsa_switch *ds)
{
struct a5psw *a5psw = ds->priv;
int port, vlan, ret;
struct dsa_port *dp;
u32 reg;
/* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
dsa_switch_for_each_cpu_port(dp, ds) {
if (dp->index != A5PSW_CPU_PORT) {
dev_err(a5psw->dev, "Invalid CPU port\n");
return -EINVAL;
}
}
/* Configure management port */
reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
/* Set pattern 0 to forward all frame to mgmt port */
a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
A5PSW_PATTERN_CTRL_MGMTFWD);
/* Enable port tagging */
reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
/* Enable normal switch operation */
reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
!(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
if (ret) {
dev_err(a5psw->dev, "Failed to clear lookup table\n");
return ret;
}
/* Reset learn count to 0 */
reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
/* Clear VLAN resource table */
reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
/* Reset all ports */
dsa_switch_for_each_port(dp, ds) {
port = dp->index;
/* Reset the port */
a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
A5PSW_CMD_CFG_SW_RESET);
/* Enable only CPU port */
a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
if (dsa_port_is_unused(dp))
continue;
/* Enable egress flooding and learning for CPU port */
if (dsa_port_is_cpu(dp)) {
a5psw_flooding_set_resolution(a5psw, port, true);
a5psw_port_learning_set(a5psw, port, true);
}
/* Enable standalone mode for user ports */
if (dsa_port_is_user(dp))
a5psw_port_set_standalone(a5psw, port, true);
a5psw_vlan_setup(a5psw, port);
}
return 0;
}
static const struct dsa_switch_ops a5psw_switch_ops = {
.get_tag_protocol = a5psw_get_tag_protocol,
.setup = a5psw_setup,
.port_disable = a5psw_port_disable,
.port_enable = a5psw_port_enable,
.phylink_get_caps = a5psw_phylink_get_caps,
.phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
.phylink_mac_link_down = a5psw_phylink_mac_link_down,
.phylink_mac_link_up = a5psw_phylink_mac_link_up,
.port_change_mtu = a5psw_port_change_mtu,
.port_max_mtu = a5psw_port_max_mtu,
.get_sset_count = a5psw_get_sset_count,
.get_strings = a5psw_get_strings,
.get_ethtool_stats = a5psw_get_ethtool_stats,
.get_eth_mac_stats = a5psw_get_eth_mac_stats,
.get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
.get_rmon_stats = a5psw_get_rmon_stats,
.set_ageing_time = a5psw_set_ageing_time,
.port_bridge_join = a5psw_port_bridge_join,
.port_bridge_leave = a5psw_port_bridge_leave,
.port_pre_bridge_flags = a5psw_port_pre_bridge_flags,
.port_bridge_flags = a5psw_port_bridge_flags,
.port_stp_state_set = a5psw_port_stp_state_set,
.port_fast_age = a5psw_port_fast_age,
.port_vlan_filtering = a5psw_port_vlan_filtering,
.port_vlan_add = a5psw_port_vlan_add,
.port_vlan_del = a5psw_port_vlan_del,
.port_fdb_add = a5psw_port_fdb_add,
.port_fdb_del = a5psw_port_fdb_del,
.port_fdb_dump = a5psw_port_fdb_dump,
};
static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
{
u32 status;
int err;
err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
!(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
1000 * USEC_PER_MSEC);
if (err)
dev_err(a5psw->dev, "MDIO command timeout\n");
return err;
}
static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
{
struct a5psw *a5psw = bus->priv;
u32 cmd, status;
int ret;
cmd = A5PSW_MDIO_COMMAND_READ;
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
ret = a5psw_mdio_wait_busy(a5psw);
if (ret)
return ret;
ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
if (status & A5PSW_MDIO_CFG_STATUS_READERR)
return -EIO;
return ret;
}
static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
u16 phy_data)
{
struct a5psw *a5psw = bus->priv;
u32 cmd;
cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
return a5psw_mdio_wait_busy(a5psw);
}
static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
{
unsigned long rate;
unsigned long div;
u32 cfgstatus;
rate = clk_get_rate(a5psw->hclk);
div = ((rate / mdio_freq) / 2);
if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
div < A5PSW_MDIO_CLK_DIV_MIN) {
dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
return -ERANGE;
}
cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
return 0;
}
static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
{
struct device *dev = a5psw->dev;
struct mii_bus *bus;
u32 mdio_freq;
int ret;
if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
mdio_freq = A5PSW_MDIO_DEF_FREQ;
ret = a5psw_mdio_config(a5psw, mdio_freq);
if (ret)
return ret;
bus = devm_mdiobus_alloc(dev);
if (!bus)
return -ENOMEM;
bus->name = "a5psw_mdio";
bus->read = a5psw_mdio_read;
bus->write = a5psw_mdio_write;
bus->priv = a5psw;
bus->parent = dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
a5psw->mii_bus = bus;
return devm_of_mdiobus_register(dev, bus, node);
}
static void a5psw_pcs_free(struct a5psw *a5psw)
{
int i;
for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
if (a5psw->pcs[i])
miic_destroy(a5psw->pcs[i]);
}
}
static int a5psw_pcs_get(struct a5psw *a5psw)
{
struct device_node *ports, *port, *pcs_node;
struct phylink_pcs *pcs;
int ret;
u32 reg;
ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
pcs_node = of_parse_phandle(port, "pcs-handle", 0);
if (!pcs_node)
continue;
if (of_property_read_u32(port, "reg", ®)) {
ret = -EINVAL;
goto free_pcs;
}
if (reg >= ARRAY_SIZE(a5psw->pcs)) {
ret = -ENODEV;
goto free_pcs;
}
pcs = miic_create(a5psw->dev, pcs_node);
if (IS_ERR(pcs)) {
dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
reg);
ret = PTR_ERR(pcs);
goto free_pcs;
}
a5psw->pcs[reg] = pcs;
of_node_put(pcs_node);
}
of_node_put(ports);
return 0;
free_pcs:
of_node_put(pcs_node);
of_node_put(port);
of_node_put(ports);
a5psw_pcs_free(a5psw);
return ret;
}
static int a5psw_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *mdio;
struct dsa_switch *ds;
struct a5psw *a5psw;
int ret;
a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
if (!a5psw)
return -ENOMEM;
a5psw->dev = dev;
mutex_init(&a5psw->lk_lock);
spin_lock_init(&a5psw->reg_lock);
a5psw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(a5psw->base))
return PTR_ERR(a5psw->base);
a5psw->bridged_ports = BIT(A5PSW_CPU_PORT);
ret = a5psw_pcs_get(a5psw);
if (ret)
return ret;
a5psw->hclk = devm_clk_get(dev, "hclk");
if (IS_ERR(a5psw->hclk)) {
dev_err(dev, "failed get hclk clock\n");
ret = PTR_ERR(a5psw->hclk);
goto free_pcs;
}
a5psw->clk = devm_clk_get(dev, "clk");
if (IS_ERR(a5psw->clk)) {
dev_err(dev, "failed get clk_switch clock\n");
ret = PTR_ERR(a5psw->clk);
goto free_pcs;
}
ret = clk_prepare_enable(a5psw->clk);
if (ret)
goto free_pcs;
ret = clk_prepare_enable(a5psw->hclk);
if (ret)
goto clk_disable;
mdio = of_get_child_by_name(dev->of_node, "mdio");
if (of_device_is_available(mdio)) {
ret = a5psw_probe_mdio(a5psw, mdio);
if (ret) {
of_node_put(mdio);
dev_err(dev, "Failed to register MDIO: %d\n", ret);
goto hclk_disable;
}
}
of_node_put(mdio);
ds = &a5psw->ds;
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
ds->ops = &a5psw_switch_ops;
ds->priv = a5psw;
ret = dsa_register_switch(ds);
if (ret) {
dev_err(dev, "Failed to register DSA switch: %d\n", ret);
goto hclk_disable;
}
return 0;
hclk_disable:
clk_disable_unprepare(a5psw->hclk);
clk_disable:
clk_disable_unprepare(a5psw->clk);
free_pcs:
a5psw_pcs_free(a5psw);
return ret;
}
static int a5psw_remove(struct platform_device *pdev)
{
struct a5psw *a5psw = platform_get_drvdata(pdev);
if (!a5psw)
return 0;
dsa_unregister_switch(&a5psw->ds);
a5psw_pcs_free(a5psw);
clk_disable_unprepare(a5psw->hclk);
clk_disable_unprepare(a5psw->clk);
return 0;
}
static void a5psw_shutdown(struct platform_device *pdev)
{
struct a5psw *a5psw = platform_get_drvdata(pdev);
if (!a5psw)
return;
dsa_switch_shutdown(&a5psw->ds);
platform_set_drvdata(pdev, NULL);
}
static const struct of_device_id a5psw_of_mtable[] = {
{ .compatible = "renesas,rzn1-a5psw", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
static struct platform_driver a5psw_driver = {
.driver = {
.name = "rzn1_a5psw",
.of_match_table = a5psw_of_mtable,
},
.probe = a5psw_probe,
.remove = a5psw_remove,
.shutdown = a5psw_shutdown,
};
module_platform_driver(a5psw_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
MODULE_AUTHOR("Clément Léger <[email protected]>");
| linux-master | drivers/net/dsa/rzn1_a5psw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Broadcom Starfighter 2 DSA switch driver
*
* Copyright (C) 2014, Broadcom Corporation
*/
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/phylink.h>
#include <linux/mii.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
#include <linux/platform_data/b53.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
{
switch (priv->type) {
case BCM4908_DEVICE_ID:
switch (port) {
case 7:
return REG_RGMII_11_CNTRL;
default:
break;
}
break;
default:
switch (port) {
case 0:
return REG_RGMII_0_CNTRL;
case 1:
return REG_RGMII_1_CNTRL;
case 2:
return REG_RGMII_2_CNTRL;
default:
break;
}
}
WARN_ONCE(1, "Unsupported port %d\n", port);
/* RO fallback reg */
return REG_SWITCH_STATUS;
}
static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
{
switch (port) {
case 0:
return REG_LED_0_CNTRL;
case 1:
return REG_LED_1_CNTRL;
case 2:
return REG_LED_2_CNTRL;
}
switch (priv->type) {
case BCM4908_DEVICE_ID:
switch (port) {
case 3:
return REG_LED_3_CNTRL;
case 7:
return REG_LED_4_CNTRL;
default:
break;
}
break;
default:
break;
}
WARN_ONCE(1, "Unsupported port %d\n", port);
/* RO fallback reg */
return REG_SWITCH_STATUS;
}
static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
{
switch (priv->type) {
case BCM4908_DEVICE_ID:
case BCM7445_DEVICE_ID:
return port == 8 ? CORE_STS_OVERRIDE_IMP :
CORE_STS_OVERRIDE_GMIIP_PORT(port);
case BCM7278_DEVICE_ID:
return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
CORE_STS_OVERRIDE_GMIIP2_PORT(port);
default:
WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
}
/* RO fallback register */
return REG_SWITCH_STATUS;
}
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port, count = 0;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
continue;
if (priv->port_sts[port].enabled)
count++;
}
return count;
}
static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned long new_rate;
unsigned int ports_active;
/* Frequenty in Mhz */
static const unsigned long rate_table[] = {
59220000,
60820000,
62500000,
62500000,
};
ports_active = bcm_sf2_num_active_ports(ds);
if (ports_active == 0 || !priv->clk_mdiv)
return;
/* If we overflow our table, just use the recommended operational
* frequency
*/
if (ports_active > ARRAY_SIZE(rate_table))
new_rate = 90000000;
else
new_rate = rate_table[ports_active - 1];
clk_set_rate(priv->clk_mdiv, new_rate);
}
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
/* Enable forwarding */
core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
/* Enable IMP port in dumb mode */
reg = core_readl(priv, CORE_SWITCH_CTRL);
reg |= MII_DUMB_FWDG_EN;
core_writel(priv, reg, CORE_SWITCH_CTRL);
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
*/
reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
reg |= i << (PRT_TO_QID_SHIFT * i);
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
reg &= ~(RX_DIS | TX_DIS);
core_writel(priv, reg, CORE_IMP_CTL);
} else {
reg = core_readl(priv, CORE_G_PCTL_PORT(port));
reg &= ~(RX_DIS | TX_DIS);
core_writel(priv, reg, CORE_G_PCTL_PORT(port));
}
priv->port_sts[port].enabled = true;
}
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);
reg &= ~PHY_RESET;
} else {
reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
reg_writel(priv, reg, REG_SPHY_CNTRL);
mdelay(1);
reg |= CK25_DIS;
}
reg_writel(priv, reg, REG_SPHY_CNTRL);
/* Use PHY-driven LED signaling */
if (!enable) {
u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
if (priv->type == BCM7278_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID) {
reg = reg_led_readl(priv, led_ctrl, 0);
reg |= LED_CNTRL_SPDLNK_SRC_SEL;
reg_led_writel(priv, reg, led_ctrl, 0);
}
}
}
static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
int port)
{
unsigned int off;
switch (port) {
case 7:
off = P7_IRQ_OFF;
break;
case 0:
/* Port 0 interrupts are located on the first bank */
intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
return;
default:
off = P_IRQ_OFF(port);
break;
}
intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
}
static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
int port)
{
unsigned int off;
switch (port) {
case 7:
off = P7_IRQ_OFF;
break;
case 0:
/* Port 0 interrupts are located on the first bank */
intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
return;
default:
off = P_IRQ_OFF(port);
break;
}
intrl2_1_mask_set(priv, P_IRQ_MASK(off));
intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
}
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
u32 reg;
if (!dsa_is_user_port(ds, port))
return 0;
priv->port_sts[port].enabled = true;
bcm_sf2_recalc_clock(ds);
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
/* Enable Broadcom tags for that port if requested */
if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
*/
reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
reg |= i << (PRT_TO_QID_SHIFT * i);
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
/* Re-enable the GPHY and re-apply workarounds */
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
bcm_sf2_gphy_enable_set(ds, true);
if (phy) {
/* if phy_stop() has been called before, phy
* will be in halted state, and phy_start()
* will call resume.
*
* the resume path does not configure back
* autoneg settings, and since we hard reset
* the phy manually here, we need to reset the
* state machine also.
*/
phy->state = PHY_READY;
phy_init_hw(phy);
}
}
/* Enable MoCA port interrupts to get notified */
if (port == priv->moca_port)
bcm_sf2_port_intr_enable(priv, port);
/* Set per-queue pause threshold to 32 */
core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
/* Set ACB threshold to 24 */
for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
reg = acb_readl(priv, ACB_QUEUE_CFG(port *
SF2_NUM_EGRESS_QUEUES + i));
reg &= ~XOFF_THRESHOLD_MASK;
reg |= 24;
acb_writel(priv, reg, ACB_QUEUE_CFG(port *
SF2_NUM_EGRESS_QUEUES + i));
}
return b53_enable_port(ds, port, phy);
}
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
/* Disable learning while in WoL mode */
if (priv->wol_ports_mask & (1 << port)) {
reg = core_readl(priv, CORE_DIS_LEARN);
reg |= BIT(port);
core_writel(priv, reg, CORE_DIS_LEARN);
return;
}
if (port == priv->moca_port)
bcm_sf2_port_intr_disable(priv, port);
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, false);
b53_disable_port(ds, port);
/* Power down the port memory */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg |= P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
priv->port_sts[port].enabled = false;
bcm_sf2_recalc_clock(ds);
}
static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
int regnum, u16 val)
{
int ret = 0;
u32 reg;
reg = reg_readl(priv, REG_SWITCH_CNTRL);
reg |= MDIO_MASTER_SEL;
reg_writel(priv, reg, REG_SWITCH_CNTRL);
/* Page << 8 | offset */
reg = 0x70;
reg <<= 2;
core_writel(priv, addr, reg);
/* Page << 8 | offset */
reg = 0x80 << 8 | regnum << 1;
reg <<= 2;
if (op)
ret = core_readl(priv, reg);
else
core_writel(priv, val, reg);
reg = reg_readl(priv, REG_SWITCH_CNTRL);
reg &= ~MDIO_MASTER_SEL;
reg_writel(priv, reg, REG_SWITCH_CNTRL);
return ret & 0xffff;
}
static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
struct bcm_sf2_priv *priv = bus->priv;
/* Intercept reads from Broadcom pseudo-PHY address, else, send
* them to our master MDIO bus controller
*/
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
else
return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
}
static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct bcm_sf2_priv *priv = bus->priv;
/* Intercept writes to the Broadcom pseudo-PHY address, else,
* send them to our master MDIO bus controller
*/
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
else
return mdiobus_write_nested(priv->master_mii_bus, addr,
regnum, val);
}
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
struct dsa_switch *ds = dev_id;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~priv->irq0_mask;
intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
return IRQ_HANDLED;
}
static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
{
struct dsa_switch *ds = dev_id;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
~priv->irq1_mask;
intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
priv->port_sts[7].link = true;
dsa_port_phylink_mac_change(ds, 7, true);
}
if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
priv->port_sts[7].link = false;
dsa_port_phylink_mac_change(ds, 7, false);
}
return IRQ_HANDLED;
}
static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
int ret;
/* The watchdog reset does not work on 7278, we need to hit the
* "external" reset line through the reset controller.
*/
if (priv->type == BCM7278_DEVICE_ID) {
ret = reset_control_assert(priv->rcdev);
if (ret)
return ret;
return reset_control_deassert(priv->rcdev);
}
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
core_writel(priv, reg, CORE_WATCHDOG_CTRL);
do {
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
if (!(reg & SOFTWARE_RESET))
break;
usleep_range(1000, 2000);
} while (timeout-- > 0);
if (timeout == 0)
return -ETIMEDOUT;
return 0;
}
static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
{
struct device *dev = priv->dev->ds->dev;
int shift;
u32 mask;
u32 reg;
int i;
mask = BIT(priv->num_crossbar_int_ports) - 1;
reg = reg_readl(priv, REG_CROSSBAR);
switch (priv->type) {
case BCM4908_DEVICE_ID:
shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports;
reg &= ~(mask << shift);
if (0) /* FIXME */
reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
else if (priv->int_phy_mask & BIT(7))
reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift;
else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode))
reg |= CROSSBAR_BCM4908_EXT_RGMII << shift;
else if (WARN(1, "Invalid port mode\n"))
return;
break;
default:
return;
}
reg_writel(priv, reg, REG_CROSSBAR);
reg = reg_readl(priv, REG_CROSSBAR);
for (i = 0; i < priv->num_crossbar_int_ports; i++) {
shift = i * priv->num_crossbar_int_ports;
dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
(reg >> shift) & mask);
}
}
static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
{
intrl2_0_mask_set(priv, 0xffffffff);
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
intrl2_1_mask_set(priv, 0xffffffff);
intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
}
static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
struct device *dev = priv->dev->ds->dev;
struct bcm_sf2_port_status *port_st;
struct device_node *port;
unsigned int port_num;
struct property *prop;
int err;
priv->moca_port = -1;
for_each_available_child_of_node(dn, port) {
if (of_property_read_u32(port, "reg", &port_num))
continue;
if (port_num >= DSA_MAX_PORTS) {
dev_err(dev, "Invalid port number %d\n", port_num);
continue;
}
port_st = &priv->port_sts[port_num];
/* Internal PHYs get assigned a specific 'phy-mode' property
* value: "internal" to help flag them before MDIO probing
* has completed, since they might be turned off at that
* time
*/
err = of_get_phy_mode(port, &port_st->mode);
if (err)
continue;
if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL)
priv->int_phy_mask |= 1 << port_num;
if (port_st->mode == PHY_INTERFACE_MODE_MOCA)
priv->moca_port = port_num;
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
priv->brcm_tag_mask |= 1 << port_num;
/* Ensure that port 5 is not picked up as a DSA CPU port
* flavour but a regular port instead. We should be using
* devlink to be able to set the port flavour.
*/
if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
prop = of_find_property(port, "ethernet", NULL);
if (prop)
of_remove_property(port, prop);
}
}
}
static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct device_node *dn, *child;
struct phy_device *phydev;
struct property *prop;
static int index;
int err, reg;
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
if (!priv->master_mii_bus) {
of_node_put(dn);
return -EPROBE_DEFER;
}
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
}
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "sf2 slave mii";
priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
index++);
priv->slave_mii_bus->dev.of_node = dn;
/* Include the pseudo-PHY address to divert reads towards our
* workaround. This is only required for 7445D0, since 7445E0
* disconnects the internal switch pseudo-PHY such that we can use the
* regular SWITCH_MDIO master controller instead.
*
* Here we flag the pseudo PHY as needing special treatment and would
* otherwise make all other PHY read/writes go to the master MDIO bus
* controller that comes with this switch backed by the "mdio-unimac"
* driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
else
priv->indir_phy_mask = 0;
ds->phys_mii_mask = priv->indir_phy_mask;
ds->slave_mii_bus = priv->slave_mii_bus;
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
/* We need to make sure that of_phy_connect() will not work by
* removing the 'phandle' and 'linux,phandle' properties and
* unregister the existing PHY device that was already registered.
*/
for_each_available_child_of_node(dn, child) {
if (of_property_read_u32(child, "reg", ®) ||
reg >= PHY_MAX_ADDR)
continue;
if (!(priv->indir_phy_mask & BIT(reg)))
continue;
prop = of_find_property(child, "phandle", NULL);
if (prop)
of_remove_property(child, prop);
prop = of_find_property(child, "linux,phandle", NULL);
if (prop)
of_remove_property(child, prop);
phydev = of_phy_find_device(child);
if (phydev)
phy_device_remove(phydev);
}
err = mdiobus_register(priv->slave_mii_bus);
if (err && dn) {
mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
}
return err;
}
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
/* The BCM7xxx PHY driver expects to find the integrated PHY revision
* in bits 15:8 and the patch level in bits 7:0 which is exactly what
* the REG_PHY_REVISION register layout is.
*/
if (priv->int_phy_mask & BIT(port))
return priv->hw_params.gphy_rev;
else
return PHY_BRCM_AUTO_PWRDWN_ENABLE |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_IDDQ_SUSPEND;
}
static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
unsigned long *interfaces = config->supported_interfaces;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
if (priv->int_phy_mask & BIT(port)) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
} else if (priv->moca_port == port) {
__set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
} else {
__set_bit(PHY_INTERFACE_MODE_MII, interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
phy_interface_set_rgmii(interfaces);
}
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 id_mode_dis = 0, port_mode;
u32 reg_rgmii_ctrl;
u32 reg;
if (port == core_readl(priv, CORE_IMP0_PRT_ID))
return;
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
id_mode_dis = 1;
fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
port_mode = EXT_GPHY;
break;
case PHY_INTERFACE_MODE_MII:
port_mode = EXT_EPHY;
break;
case PHY_INTERFACE_MODE_REVMII:
port_mode = EXT_REVMII;
break;
default:
/* Nothing required for all other PHYs: internal and MoCA */
return;
}
reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
/* Clear id_mode_dis bit, and the existing port mode, let
* RGMII_MODE_EN bet set by mac_link_{up,down}
*/
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~ID_MODE_DIS;
reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
reg |= port_mode;
if (id_mode_dis)
reg |= ID_MODE_DIS;
reg_writel(priv, reg, reg_rgmii_ctrl);
}
static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
phy_interface_t interface, bool link)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg_rgmii_ctrl;
u32 reg;
if (!phy_interface_mode_is_rgmii(interface) &&
interface != PHY_INTERFACE_MODE_MII &&
interface != PHY_INTERFACE_MODE_REVMII)
return;
reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
/* If the link is down, just disable the interface to conserve power */
reg = reg_readl(priv, reg_rgmii_ctrl);
if (link)
reg |= RGMII_MODE_EN;
else
reg &= ~RGMII_MODE_EN;
reg_writel(priv, reg, reg_rgmii_ctrl);
}
static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
if (priv->wol_ports_mask & BIT(port))
return;
offset = bcm_sf2_port_override_offset(priv, port);
reg = core_readl(priv, offset);
reg &= ~LINK_STS;
core_writel(priv, reg, offset);
bcm_sf2_sw_mac_link_set(ds, port, interface, false);
}
static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
offset = bcm_sf2_port_override_offset(priv, port);
if (phy_interface_mode_is_rgmii(interface) ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
if (tx_pause)
reg |= TX_PAUSE_EN;
if (rx_pause)
reg |= RX_PAUSE_EN;
reg_writel(priv, reg, reg_rgmii_ctrl);
}
reg = LINK_STS;
if (port == 8) {
if (priv->type == BCM4908_DEVICE_ID)
reg |= GMII_SPEED_UP_2G;
reg |= MII_SW_OR;
} else {
reg |= SW_OVERRIDE;
}
switch (speed) {
case SPEED_1000:
reg |= SPDSTS_1000 << SPEED_SHIFT;
break;
case SPEED_100:
reg |= SPDSTS_100 << SPEED_SHIFT;
break;
}
if (duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
if (tx_pause)
reg |= TXFLOW_CNTL;
if (rx_pause)
reg |= RXFLOW_CNTL;
core_writel(priv, reg, offset);
if (mode == MLO_AN_PHY && phydev)
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
struct phylink_link_state *status)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
status->link = false;
/* MoCA port is special as we do not get link status from CORE_LNKSTS,
* which means that we need to force the link at the port override
* level to get the data to flow. We do use what the interrupt handler
* did determine before.
*
* For the other ports, we just force the link status, since this is
* a fixed PHY device.
*/
if (port == priv->moca_port) {
status->link = priv->port_sts[port].link;
/* For MoCA interfaces, also force a link down notification
* since some version of the user-space daemon (mocad) use
* cmd->autoneg to force the link, which messes up the PHY
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
netif_carrier_off(dsa_to_port(ds, port)->slave);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
}
}
static void bcm_sf2_enable_acb(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
/* Enable ACB globally */
reg = acb_readl(priv, ACB_CONTROL);
reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
acb_writel(priv, reg, ACB_CONTROL);
reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
reg |= ACB_EN | ACB_ALGORITHM;
acb_writel(priv, reg, ACB_CONTROL);
}
static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
bcm_sf2_intr_disable(priv);
/* Disable all ports physically present including the IMP
* port, the other ones have already been disabled during
* bcm_sf2_sw_setup
*/
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
bcm_sf2_port_disable(ds, port);
}
if (!priv->wol_ports_mask)
clk_disable_unprepare(priv->clk);
return 0;
}
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret;
if (!priv->wol_ports_mask)
clk_prepare_enable(priv->clk);
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("%s: failed to software reset switch\n", __func__);
return ret;
}
bcm_sf2_crossbar_setup(priv);
ret = bcm_sf2_cfp_resume(ds);
if (ret)
return ret;
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
ds->ops->setup(ds);
return 0;
}
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
/* Get the parent device WoL settings */
if (p->ethtool_ops->get_wol)
p->ethtool_ops->get_wol(p, &pwol);
/* Advertise the parent device supported settings */
wol->supported = pwol.supported;
memset(&wol->sopass, 0, sizeof(wol->sopass));
if (pwol.wolopts & WAKE_MAGICSECURE)
memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
if (priv->wol_ports_mask & (1 << port))
wol->wolopts = pwol.wolopts;
else
wol->wolopts = 0;
}
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
if (p->ethtool_ops->get_wol)
p->ethtool_ops->get_wol(p, &pwol);
if (wol->wolopts & ~pwol.supported)
return -EINVAL;
if (wol->wolopts)
priv->wol_ports_mask |= (1 << port);
else
priv->wol_ports_mask &= ~(1 << port);
/* If we have at least one port enabled, make sure the CPU port
* is also enabled. If the CPU port is the last one enabled, we disable
* it since this configuration does not make sense.
*/
if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
priv->wol_ports_mask |= (1 << cpu_port);
else
priv->wol_ports_mask &= ~(1 << cpu_port);
return p->ethtool_ops->set_wol(p, wol);
}
static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
/* Enable all valid ports and disable those unused */
for (port = 0; port < priv->hw_params.num_ports; port++) {
/* IMP port receives special treatment */
if (dsa_is_user_port(ds, port))
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
else
bcm_sf2_port_disable(ds, port);
}
b53_configure_vlan(ds);
bcm_sf2_enable_acb(ds);
return b53_setup_devlink_resources(ds);
}
static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
{
dsa_devlink_resources_unregister(ds);
}
/* The SWITCH_CORE register space is managed by b53 but operates on a page +
* register basis so we need to translate that into an address that the
* bus-glue understands.
*/
#define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
u8 *val)
{
struct bcm_sf2_priv *priv = dev->priv;
*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
u16 *val)
{
struct bcm_sf2_priv *priv = dev->priv;
*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
u32 *val)
{
struct bcm_sf2_priv *priv = dev->priv;
*val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
u64 *val)
{
struct bcm_sf2_priv *priv = dev->priv;
*val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
u8 value)
{
struct bcm_sf2_priv *priv = dev->priv;
core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
u16 value)
{
struct bcm_sf2_priv *priv = dev->priv;
core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
u32 value)
{
struct bcm_sf2_priv *priv = dev->priv;
core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
struct bcm_sf2_priv *priv = dev->priv;
core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
return 0;
}
static const struct b53_io_ops bcm_sf2_io_ops = {
.read8 = bcm_sf2_core_read8,
.read16 = bcm_sf2_core_read16,
.read32 = bcm_sf2_core_read32,
.read48 = bcm_sf2_core_read64,
.read64 = bcm_sf2_core_read64,
.write8 = bcm_sf2_core_write8,
.write16 = bcm_sf2_core_write16,
.write32 = bcm_sf2_core_write32,
.write48 = bcm_sf2_core_write64,
.write64 = bcm_sf2_core_write64,
};
static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
int cnt = b53_get_sset_count(ds, port, stringset);
b53_get_strings(ds, port, stringset, data);
bcm_sf2_cfp_get_strings(ds, port, stringset,
data + cnt * ETH_GSTRING_LEN);
}
static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
b53_get_ethtool_stats(ds, port, data);
bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
}
static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
int sset)
{
int cnt = b53_get_sset_count(ds, port, sset);
if (cnt < 0)
return cnt;
cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
return cnt;
}
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
.teardown = bcm_sf2_sw_teardown,
.get_strings = bcm_sf2_sw_get_strings,
.get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
.get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
.phylink_get_caps = bcm_sf2_sw_get_caps,
.phylink_mac_config = bcm_sf2_sw_mac_config,
.phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
.phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
.phylink_fixed_state = bcm_sf2_sw_fixed_state,
.suspend = bcm_sf2_sw_suspend,
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
.set_wol = bcm_sf2_sw_set_wol,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
.get_mac_eee = b53_get_mac_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
.port_bridge_flags = b53_br_flags,
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
.port_fdb_add = b53_fdb_add,
.port_fdb_del = b53_fdb_del,
.get_rxnfc = bcm_sf2_get_rxnfc,
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
};
struct bcm_sf2_of_data {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
unsigned int num_cfp_rules;
unsigned int num_crossbar_int_ports;
};
static const u16 bcm_sf2_4908_reg_offsets[] = {
[REG_SWITCH_CNTRL] = 0x00,
[REG_SWITCH_STATUS] = 0x04,
[REG_DIR_DATA_WRITE] = 0x08,
[REG_DIR_DATA_READ] = 0x0c,
[REG_SWITCH_REVISION] = 0x10,
[REG_PHY_REVISION] = 0x14,
[REG_SPHY_CNTRL] = 0x24,
[REG_CROSSBAR] = 0xc8,
[REG_RGMII_11_CNTRL] = 0x014c,
[REG_LED_0_CNTRL] = 0x40,
[REG_LED_1_CNTRL] = 0x4c,
[REG_LED_2_CNTRL] = 0x58,
[REG_LED_3_CNTRL] = 0x64,
[REG_LED_4_CNTRL] = 0x88,
[REG_LED_5_CNTRL] = 0xa0,
[REG_LED_AGGREGATE_CTRL] = 0xb8,
};
static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
.type = BCM4908_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_4908_reg_offsets,
.num_cfp_rules = 256,
.num_crossbar_int_ports = 2,
};
/* Register offsets for the SWITCH_REG_* block */
static const u16 bcm_sf2_7445_reg_offsets[] = {
[REG_SWITCH_CNTRL] = 0x00,
[REG_SWITCH_STATUS] = 0x04,
[REG_DIR_DATA_WRITE] = 0x08,
[REG_DIR_DATA_READ] = 0x0C,
[REG_SWITCH_REVISION] = 0x18,
[REG_PHY_REVISION] = 0x1C,
[REG_SPHY_CNTRL] = 0x2C,
[REG_RGMII_0_CNTRL] = 0x34,
[REG_RGMII_1_CNTRL] = 0x40,
[REG_RGMII_2_CNTRL] = 0x4c,
[REG_LED_0_CNTRL] = 0x90,
[REG_LED_1_CNTRL] = 0x94,
[REG_LED_2_CNTRL] = 0x98,
};
static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
.type = BCM7445_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_7445_reg_offsets,
.num_cfp_rules = 256,
};
static const u16 bcm_sf2_7278_reg_offsets[] = {
[REG_SWITCH_CNTRL] = 0x00,
[REG_SWITCH_STATUS] = 0x04,
[REG_DIR_DATA_WRITE] = 0x08,
[REG_DIR_DATA_READ] = 0x0c,
[REG_SWITCH_REVISION] = 0x10,
[REG_PHY_REVISION] = 0x14,
[REG_SPHY_CNTRL] = 0x24,
[REG_RGMII_0_CNTRL] = 0xe0,
[REG_RGMII_1_CNTRL] = 0xec,
[REG_RGMII_2_CNTRL] = 0xf8,
[REG_LED_0_CNTRL] = 0x40,
[REG_LED_1_CNTRL] = 0x4c,
[REG_LED_2_CNTRL] = 0x58,
};
static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
.type = BCM7278_DEVICE_ID,
.core_reg_align = 1,
.reg_offsets = bcm_sf2_7278_reg_offsets,
.num_cfp_rules = 128,
};
static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm4908-switch",
.data = &bcm_sf2_4908_data
},
{ .compatible = "brcm,bcm7445-switch-v4.0",
.data = &bcm_sf2_7445_data
},
{ .compatible = "brcm,bcm7278-switch-v4.0",
.data = &bcm_sf2_7278_data
},
{ .compatible = "brcm,bcm7278-switch-v4.8",
.data = &bcm_sf2_7278_data
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static int bcm_sf2_sw_probe(struct platform_device *pdev)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct device_node *dn = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
struct device_node *ports;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
void __iomem **base;
unsigned int i;
u32 reg, rev;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
if (!dev)
return -ENOMEM;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
of_id = of_match_node(bcm_sf2_of_match, dn);
if (!of_id || !of_id->data)
return -EINVAL;
data = of_id->data;
/* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
priv->type = data->type;
priv->reg_offsets = data->reg_offsets;
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
"switch");
if (IS_ERR(priv->rcdev))
return PTR_ERR(priv->rcdev);
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
*/
pdata->chip_id = priv->type;
dev->pdata = pdata;
priv->dev = dev;
ds = dev->ds;
ds->ops = &bcm_sf2_ops;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
dev_set_drvdata(&pdev->dev, priv);
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->cfp.lock);
INIT_LIST_HEAD(&priv->cfp.rules_list);
/* CFP rule #0 cannot be used for specific classifications, flag it as
* permanently used
*/
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
/* Balance of_node_put() done by of_find_node_by_name() */
of_node_get(dn);
ports = of_find_node_by_name(dn, "ports");
if (ports) {
bcm_sf2_identify_ports(priv, ports);
of_node_put(ports);
}
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
*base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(*base)) {
pr_err("unable to find register: %s\n", reg_names[i]);
return PTR_ERR(*base);
}
base++;
}
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
if (IS_ERR(priv->clk_mdiv)) {
ret = PTR_ERR(priv->clk_mdiv);
goto out_clk;
}
ret = clk_prepare_enable(priv->clk_mdiv);
if (ret)
goto out_clk;
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("unable to software reset switch: %d\n", ret);
goto out_clk_mdiv;
}
bcm_sf2_crossbar_setup(priv);
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, false);
ret = bcm_sf2_cfp_rst(priv);
if (ret) {
pr_err("failed to reset CFP\n");
goto out_mdio;
}
/* Disable all interrupts and request them */
bcm_sf2_intr_disable(priv);
ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
"switch_0", ds);
if (ret < 0) {
pr_err("failed to request switch_0 IRQ\n");
goto out_mdio;
}
ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
"switch_1", ds);
if (ret < 0) {
pr_err("failed to request switch_1 IRQ\n");
goto out_mdio;
}
/* Reset the MIB counters */
reg = core_readl(priv, CORE_GMNCFGCFG);
reg |= RST_MIB_CNT;
core_writel(priv, reg, CORE_GMNCFGCFG);
reg &= ~RST_MIB_CNT;
core_writel(priv, reg, CORE_GMNCFGCFG);
/* Get the maximum number of ports for this switch */
priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
if (priv->hw_params.num_ports > DSA_MAX_PORTS)
priv->hw_params.num_ports = DSA_MAX_PORTS;
/* Assume a single GPHY setup if we can't read that property */
if (of_property_read_u32(dn, "brcm,num-gphy",
&priv->hw_params.num_gphy))
priv->hw_params.num_gphy = 1;
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
SWITCH_TOP_REV_MASK;
priv->hw_params.core_rev = (rev & SF2_REV_MASK);
rev = reg_readl(priv, REG_PHY_REVISION);
priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
ret = b53_switch_register(dev);
if (ret)
goto out_mdio;
dev_info(&pdev->dev,
"Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
priv->irq0, priv->irq1);
return 0;
out_mdio:
bcm_sf2_mdio_unregister(priv);
out_clk_mdiv:
clk_disable_unprepare(priv->clk_mdiv);
out_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return 0;
priv->wol_ports_mask = 0;
/* Disable interrupts */
bcm_sf2_intr_disable(priv);
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
clk_disable_unprepare(priv->clk_mdiv);
clk_disable_unprepare(priv->clk);
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
return 0;
}
static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
/* For a kernel about to be kexec'd we want to keep the GPHY on for a
* successful MDIO bus scan to occur. If we did turn off the GPHY
* before (e.g: port_disable), this will also power it back on.
*
* Do not rely on kexec_in_progress, just power the PHY on.
*/
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
dsa_switch_shutdown(priv->dev->ds);
platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
return dsa_switch_suspend(priv->dev->ds);
}
static int bcm_sf2_resume(struct device *dev)
{
struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
return dsa_switch_resume(priv->dev->ds);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
bcm_sf2_suspend, bcm_sf2_resume);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
.shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
.pm = &bcm_sf2_pm_ops,
},
};
module_platform_driver(bcm_sf2_driver);
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:brcm-sf2");
| linux-master | drivers/net/dsa/bcm_sf2.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <net/dsa.h>
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7988_of_match);
static int
mt7988_probe(struct platform_device *pdev)
{
static struct regmap_config *sw_regmap_config;
struct mt7530_priv *priv;
void __iomem *base_addr;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = NULL;
priv->dev = &pdev->dev;
ret = mt7530_probe_common(priv);
if (ret)
return ret;
priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(priv->rstc)) {
dev_err(&pdev->dev, "Couldn't get our reset line\n");
return PTR_ERR(priv->rstc);
}
base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_addr)) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
return -ENXIO;
}
sw_regmap_config = devm_kzalloc(&pdev->dev, sizeof(*sw_regmap_config), GFP_KERNEL);
if (!sw_regmap_config)
return -ENOMEM;
sw_regmap_config->name = "switch";
sw_regmap_config->reg_bits = 16;
sw_regmap_config->val_bits = 32;
sw_regmap_config->reg_stride = 4;
sw_regmap_config->max_register = MT7530_CREV;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr, sw_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
return dsa_register_switch(priv->ds);
}
static int
mt7988_remove(struct platform_device *pdev)
{
struct mt7530_priv *priv = platform_get_drvdata(pdev);
if (priv)
mt7530_remove_common(priv);
return 0;
}
static void mt7988_shutdown(struct platform_device *pdev)
{
struct mt7530_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
dev_set_drvdata(&pdev->dev, NULL);
}
static struct platform_driver mt7988_platform_driver = {
.probe = mt7988_probe,
.remove = mt7988_remove,
.shutdown = mt7988_shutdown,
.driver = {
.name = "mt7530-mmio",
.of_match_table = mt7988_of_match,
},
};
module_platform_driver(mt7988_platform_driver);
MODULE_AUTHOR("Daniel Golle <[email protected]>");
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch (MMIO)");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/mt7530-mmio.c |
// SPDX-License-Identifier: GPL-2.0
/* DSA driver for:
* Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
* Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
*
* These switches have a built-in 8051 CPU and can download and execute a
* firmware in this CPU. They can also be configured to use an external CPU
* handling the switch in a memory-mapped manner by connecting to that external
* CPU's memory bus.
*
* Copyright (C) 2018 Linus Wallej <[email protected]>
* Includes portions of code from the firmware uploader by:
* Copyright (C) 2009 Gabor Juhos <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
#include <linux/etherdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/random.h>
#include <net/dsa.h>
#include "vitesse-vsc73xx.h"
#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
#define CPU_PORT 6 /* CPU port */
/* MAC Block registers */
#define VSC73XX_MAC_CFG 0x00
#define VSC73XX_MACHDXGAP 0x02
#define VSC73XX_FCCONF 0x04
#define VSC73XX_FCMACHI 0x08
#define VSC73XX_FCMACLO 0x0c
#define VSC73XX_MAXLEN 0x10
#define VSC73XX_ADVPORTM 0x19
#define VSC73XX_TXUPDCFG 0x24
#define VSC73XX_TXQ_SELECT_CFG 0x28
#define VSC73XX_RXOCT 0x50
#define VSC73XX_TXOCT 0x51
#define VSC73XX_C_RX0 0x52
#define VSC73XX_C_RX1 0x53
#define VSC73XX_C_RX2 0x54
#define VSC73XX_C_TX0 0x55
#define VSC73XX_C_TX1 0x56
#define VSC73XX_C_TX2 0x57
#define VSC73XX_C_CFG 0x58
#define VSC73XX_CAT_DROP 0x6e
#define VSC73XX_CAT_PR_MISC_L2 0x6f
#define VSC73XX_CAT_PR_USR_PRIO 0x75
#define VSC73XX_Q_MISC_CONF 0xdf
/* MAC_CFG register bits */
#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31)
#define VSC73XX_MAC_CFG_PORT_RST BIT(29)
#define VSC73XX_MAC_CFG_TX_EN BIT(28)
#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27)
#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19)
#define VSC73XX_MAC_CFG_SEED_OFFSET 19
#define VSC73XX_MAC_CFG_FDX BIT(18)
#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17)
#define VSC73XX_MAC_CFG_RX_EN BIT(16)
#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15)
#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14)
#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */
#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6)
#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6
#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5)
#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4)
#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0)
#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0
#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1
#define VSC73XX_MAC_CFG_CLK_SEL_100M 2
#define VSC73XX_MAC_CFG_CLK_SEL_10M 3
#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4
#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \
VSC73XX_MAC_CFG_GIGA_MODE | \
VSC73XX_MAC_CFG_TX_IPG_1000M | \
VSC73XX_MAC_CFG_CLK_SEL_EXT)
#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \
VSC73XX_MAC_CFG_TX_IPG_100_10M | \
VSC73XX_MAC_CFG_CLK_SEL_EXT)
#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \
VSC73XX_MAC_CFG_CLK_SEL_EXT)
#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \
VSC73XX_MAC_CFG_GIGA_MODE | \
VSC73XX_MAC_CFG_TX_IPG_1000M | \
VSC73XX_MAC_CFG_CLK_SEL_1000M)
#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \
VSC73XX_MAC_CFG_MAC_RX_RST | \
VSC73XX_MAC_CFG_MAC_TX_RST)
/* Flow control register bits */
#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17)
#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16)
#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0)
/* ADVPORTM advanced port setup register bits */
#define VSC73XX_ADVPORTM_IFG_PPM BIT(7)
#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6)
#define VSC73XX_ADVPORTM_EXT_PORT BIT(5)
#define VSC73XX_ADVPORTM_INV_GTX BIT(4)
#define VSC73XX_ADVPORTM_ENA_GTX BIT(3)
#define VSC73XX_ADVPORTM_DDR_MODE BIT(2)
#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
/* CAT_DROP categorizer frame dropping register bits */
#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3)
#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2)
#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1)
#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0)
#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31)
#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1)
#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
/* Frame analyzer block 2 registers */
#define VSC73XX_STORMLIMIT 0x02
#define VSC73XX_ADVLEARN 0x03
#define VSC73XX_IFLODMSK 0x04
#define VSC73XX_VLANMASK 0x05
#define VSC73XX_MACHDATA 0x06
#define VSC73XX_MACLDATA 0x07
#define VSC73XX_ANMOVED 0x08
#define VSC73XX_ANAGEFIL 0x09
#define VSC73XX_ANEVENTS 0x0a
#define VSC73XX_ANCNTMASK 0x0b
#define VSC73XX_ANCNTVAL 0x0c
#define VSC73XX_LEARNMASK 0x0d
#define VSC73XX_UFLODMASK 0x0e
#define VSC73XX_MFLODMASK 0x0f
#define VSC73XX_RECVMASK 0x10
#define VSC73XX_AGGRCTRL 0x20
#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */
#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */
#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */
#define VSC73XX_CAPENAB 0xa0
#define VSC73XX_MACACCESS 0xb0
#define VSC73XX_IPMCACCESS 0xb1
#define VSC73XX_MACTINDX 0xc0
#define VSC73XX_VLANACCESS 0xd0
#define VSC73XX_VLANTIDX 0xe0
#define VSC73XX_AGENCTRL 0xf0
#define VSC73XX_CAPRST 0xff
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
#define VSC73XX_MACACCESS_AGED_FLAG BIT(11)
#define VSC73XX_MACACCESS_VALID BIT(10)
#define VSC73XX_MACACCESS_LOCKED BIT(9)
#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3)
#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0)
#define VSC73XX_MACACCESS_CMD_IDLE 0
#define VSC73XX_MACACCESS_CMD_LEARN 1
#define VSC73XX_MACACCESS_CMD_FORGET 2
#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3
#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4
#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5
#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6
#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7
#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30)
#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
/* MII block 3 registers */
#define VSC73XX_MII_STAT 0x0
#define VSC73XX_MII_CMD 0x1
#define VSC73XX_MII_DATA 0x2
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
#define VSC73XX_ARBDISC 0x0e
#define VSC73XX_SBACKWDROP 0x12
#define VSC73XX_DBACKWDROP 0x13
#define VSC73XX_ARBBURSTPROB 0x15
/* System block 7 registers */
#define VSC73XX_ICPU_SIPAD 0x01
#define VSC73XX_GMIIDELAY 0x05
#define VSC73XX_ICPU_CTRL 0x10
#define VSC73XX_ICPU_ADDR 0x11
#define VSC73XX_ICPU_SRAM 0x12
#define VSC73XX_HWSEM 0x13
#define VSC73XX_GLORESET 0x14
#define VSC73XX_ICPU_MBOX_VAL 0x15
#define VSC73XX_ICPU_MBOX_SET 0x16
#define VSC73XX_ICPU_MBOX_CLR 0x17
#define VSC73XX_CHIPID 0x18
#define VSC73XX_GPIO 0x34
#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0
#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1
#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2
#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3
#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4)
#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4)
#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4)
#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4)
#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31)
#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8)
#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7)
#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6)
#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3)
#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2)
#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1)
#define VSC73XX_ICPU_CTRL_SRST BIT(0)
#define VSC73XX_CHIPID_ID_SHIFT 12
#define VSC73XX_CHIPID_ID_MASK 0xffff
#define VSC73XX_CHIPID_REV_SHIFT 28
#define VSC73XX_CHIPID_REV_MASK 0xf
#define VSC73XX_CHIPID_ID_7385 0x7385
#define VSC73XX_CHIPID_ID_7388 0x7388
#define VSC73XX_CHIPID_ID_7395 0x7395
#define VSC73XX_CHIPID_ID_7398 0x7398
#define VSC73XX_GLORESET_STROBE BIT(4)
#define VSC73XX_GLORESET_ICPU_LOCK BIT(3)
#define VSC73XX_GLORESET_MEM_LOCK BIT(2)
#define VSC73XX_GLORESET_PHY_RESET BIT(1)
#define VSC73XX_GLORESET_MASTER_RESET BIT(0)
#define VSC7385_CLOCK_DELAY ((3 << 4) | 3)
#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3)
#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \
VSC73XX_ICPU_CTRL_BOOT_EN | \
VSC73XX_ICPU_CTRL_EXT_ACC_EN)
#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \
VSC73XX_ICPU_CTRL_BOOT_EN | \
VSC73XX_ICPU_CTRL_CLK_EN | \
VSC73XX_ICPU_CTRL_SRST)
#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
struct vsc73xx_counter {
u8 counter;
const char *name;
};
/* Counters are named according to the MIB standards where applicable.
* Some counters are custom, non-standard. The standard counters are
* named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
* 30A Counters.
*/
static const struct vsc73xx_counter vsc73xx_rx_counters[] = {
{ 0, "RxEtherStatsPkts" },
{ 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */
{ 2, "RxTotalErrorPackets" }, /* non-standard counter */
{ 3, "RxEtherStatsBroadcastPkts" },
{ 4, "RxEtherStatsMulticastPkts" },
{ 5, "RxEtherStatsPkts64Octets" },
{ 6, "RxEtherStatsPkts65to127Octets" },
{ 7, "RxEtherStatsPkts128to255Octets" },
{ 8, "RxEtherStatsPkts256to511Octets" },
{ 9, "RxEtherStatsPkts512to1023Octets" },
{ 10, "RxEtherStatsPkts1024to1518Octets" },
{ 11, "RxJumboFrames" }, /* non-standard counter */
{ 12, "RxaPauseMACControlFramesTransmitted" },
{ 13, "RxFIFODrops" }, /* non-standard counter */
{ 14, "RxBackwardDrops" }, /* non-standard counter */
{ 15, "RxClassifierDrops" }, /* non-standard counter */
{ 16, "RxEtherStatsCRCAlignErrors" },
{ 17, "RxEtherStatsUndersizePkts" },
{ 18, "RxEtherStatsOversizePkts" },
{ 19, "RxEtherStatsFragments" },
{ 20, "RxEtherStatsJabbers" },
{ 21, "RxaMACControlFramesReceived" },
/* 22-24 are undefined */
{ 25, "RxaFramesReceivedOK" },
{ 26, "RxQoSClass0" }, /* non-standard counter */
{ 27, "RxQoSClass1" }, /* non-standard counter */
{ 28, "RxQoSClass2" }, /* non-standard counter */
{ 29, "RxQoSClass3" }, /* non-standard counter */
};
static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
{ 0, "TxEtherStatsPkts" },
{ 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */
{ 2, "TxTotalErrorPackets" }, /* non-standard counter */
{ 3, "TxEtherStatsBroadcastPkts" },
{ 4, "TxEtherStatsMulticastPkts" },
{ 5, "TxEtherStatsPkts64Octets" },
{ 6, "TxEtherStatsPkts65to127Octets" },
{ 7, "TxEtherStatsPkts128to255Octets" },
{ 8, "TxEtherStatsPkts256to511Octets" },
{ 9, "TxEtherStatsPkts512to1023Octets" },
{ 10, "TxEtherStatsPkts1024to1518Octets" },
{ 11, "TxJumboFrames" }, /* non-standard counter */
{ 12, "TxaPauseMACControlFramesTransmitted" },
{ 13, "TxFIFODrops" }, /* non-standard counter */
{ 14, "TxDrops" }, /* non-standard counter */
{ 15, "TxEtherStatsCollisions" },
{ 16, "TxEtherStatsCRCAlignErrors" },
{ 17, "TxEtherStatsUndersizePkts" },
{ 18, "TxEtherStatsOversizePkts" },
{ 19, "TxEtherStatsFragments" },
{ 20, "TxEtherStatsJabbers" },
/* 21-24 are undefined */
{ 25, "TxaFramesReceivedOK" },
{ 26, "TxQoSClass0" }, /* non-standard counter */
{ 27, "TxQoSClass1" }, /* non-standard counter */
{ 28, "TxQoSClass2" }, /* non-standard counter */
{ 29, "TxQoSClass3" }, /* non-standard counter */
};
int vsc73xx_is_addr_valid(u8 block, u8 subblock)
{
switch (block) {
case VSC73XX_BLOCK_MAC:
switch (subblock) {
case 0 ... 4:
case 6:
return 1;
}
break;
case VSC73XX_BLOCK_ANALYZER:
case VSC73XX_BLOCK_SYSTEM:
switch (subblock) {
case 0:
return 1;
}
break;
case VSC73XX_BLOCK_MII:
case VSC73XX_BLOCK_CAPTURE:
case VSC73XX_BLOCK_ARBITER:
switch (subblock) {
case 0 ... 1:
return 1;
}
break;
}
return 0;
}
EXPORT_SYMBOL(vsc73xx_is_addr_valid);
static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val)
{
return vsc->ops->read(vsc, block, subblock, reg, val);
}
static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 val)
{
return vsc->ops->write(vsc, block, subblock, reg, val);
}
static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
u8 reg, u32 mask, u32 val)
{
u32 tmp, orig;
int ret;
/* Same read-modify-write algorithm as e.g. regmap */
ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
if (ret)
return ret;
tmp = orig & ~mask;
tmp |= val & mask;
return vsc73xx_write(vsc, block, subblock, reg, tmp);
}
static int vsc73xx_detect(struct vsc73xx *vsc)
{
bool icpu_si_boot_en;
bool icpu_pi_en;
u32 val;
u32 rev;
int ret;
u32 id;
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_ICPU_MBOX_VAL, &val);
if (ret) {
dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret);
return ret;
}
if (val == 0xffffffff) {
dev_info(vsc->dev, "chip seems dead.\n");
return -EAGAIN;
}
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_CHIPID, &val);
if (ret) {
dev_err(vsc->dev, "unable to read chip id (%d)\n", ret);
return ret;
}
id = (val >> VSC73XX_CHIPID_ID_SHIFT) &
VSC73XX_CHIPID_ID_MASK;
switch (id) {
case VSC73XX_CHIPID_ID_7385:
case VSC73XX_CHIPID_ID_7388:
case VSC73XX_CHIPID_ID_7395:
case VSC73XX_CHIPID_ID_7398:
break;
default:
dev_err(vsc->dev, "unsupported chip, id=%04x\n", id);
return -ENODEV;
}
vsc->chipid = id;
rev = (val >> VSC73XX_CHIPID_REV_SHIFT) &
VSC73XX_CHIPID_REV_MASK;
dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev);
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_ICPU_CTRL, &val);
if (ret) {
dev_err(vsc->dev, "unable to read iCPU control\n");
return ret;
}
/* The iCPU can always be used but can boot in different ways.
* If it is initially disabled and has no external memory,
* we are in control and can do whatever we like, else we
* are probably in trouble (we need some way to communicate
* with the running firmware) so we bail out for now.
*/
icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN);
icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN);
if (icpu_si_boot_en && icpu_pi_en) {
dev_err(vsc->dev,
"iCPU enabled boots from SI, has external memory\n");
dev_err(vsc->dev, "no idea how to deal with this\n");
return -ENODEV;
}
if (icpu_si_boot_en && !icpu_pi_en) {
dev_err(vsc->dev,
"iCPU enabled boots from PI/SI, no external memory\n");
return -EAGAIN;
}
if (!icpu_si_boot_en && icpu_pi_en) {
dev_err(vsc->dev,
"iCPU enabled, boots from PI external memory\n");
dev_err(vsc->dev, "no idea how to deal with this\n");
return -ENODEV;
}
/* !icpu_si_boot_en && !cpu_pi_en */
dev_info(vsc->dev, "iCPU disabled, no external memory\n");
return 0;
}
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
u32 cmd;
u32 val;
int ret;
/* Setting bit 26 means "read" */
cmd = BIT(26) | (phy << 21) | (regnum << 16);
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
msleep(2);
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
if (ret)
return ret;
if (val & BIT(16)) {
dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
regnum, phy);
return -EIO;
}
val &= 0xFFFFU;
dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
regnum, phy, val);
return val;
}
static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u16 val)
{
struct vsc73xx *vsc = ds->priv;
u32 cmd;
int ret;
/* It was found through tedious experiments that this router
* chip really hates to have it's PHYs reset. They
* never recover if that happens: autonegotiation stops
* working after a reset. Just filter out this command.
* (Resetting the whole chip is OK.)
*/
if (regnum == 0 && (val & BIT(15))) {
dev_info(vsc->dev, "reset PHY - disallowed\n");
return 0;
}
cmd = (phy << 21) | (regnum << 16);
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n",
val, regnum, phy);
return 0;
}
static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
/* The switch internally uses a 8 byte header with length,
* source port, tag, LPA and priority. This is supposedly
* only accessible when operating the switch using the internal
* CPU or with an external CPU mapping the device in, but not
* when operating the switch over SPI and putting frames in/out
* on port 6 (the CPU port). So far we must assume that we
* cannot access the tag. (See "Internal frame header" section
* 3.9.1 in the manual.)
*/
return DSA_TAG_PROTO_NONE;
}
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
int i;
dev_info(vsc->dev, "set up the switch\n");
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_MASTER_RESET);
usleep_range(125, 200);
/* Initialize memory, initialize RAM bank 0..15 except 6 and 7
* This sequence appears in the
* VSC7385 SparX-G5 datasheet section 6.6.1
* VSC7395 SparX-G5e datasheet section 6.6.1
* "initialization sequence".
* No explanation is given to the 0x1010400 magic number.
*/
for (i = 0; i <= 15; i++) {
if (i != 6 && i != 7) {
vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT,
2,
0, 0x1010400 + i);
mdelay(1);
}
}
mdelay(30);
/* Clear MAC table */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_MACACCESS,
VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
/* Clear VLAN table */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_VLANACCESS,
VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
msleep(40);
/* Use 20KiB buffers on all ports on VSC7395
* The VSC7385 has 16KiB buffers and that is the
* default if we don't set this up explicitly.
* Port "31" is "all ports".
*/
if (IS_739X(vsc))
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f,
VSC73XX_Q_MISC_CONF,
VSC73XX_Q_MISC_CONF_EXTENT_MEM);
/* Put all ports into reset until enabled */
for (i = 0; i < 7; i++) {
if (i == 5)
continue;
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4,
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
/* MII delay, set both GTX and RX delay to 2 ns */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
/* Enable reception of frames on all ports */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
0x5f);
/* IP multicast flood mask (table 144) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
0xff);
mdelay(50);
/* Release reset from the internal PHYs */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_PHY_RESET);
udelay(4);
return 0;
}
static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
{
u32 val;
/* MAC configure, first reset the port and then write defaults */
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_MAC_CFG,
VSC73XX_MAC_CFG_RESET);
/* Take up the port in 1Gbit mode by default, this will be
* augmented after auto-negotiation on the PHY-facing
* ports.
*/
if (port == CPU_PORT)
val = VSC73XX_MAC_CFG_1000M_F_RGMII;
else
val = VSC73XX_MAC_CFG_1000M_F_PHY;
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_MAC_CFG,
val |
VSC73XX_MAC_CFG_TX_EN |
VSC73XX_MAC_CFG_RX_EN);
/* Flow control for the CPU port:
* Use a zero delay pause frame when pause condition is left
* Obey pause control frames
*/
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_FCCONF,
VSC73XX_FCCONF_ZERO_PAUSE_EN |
VSC73XX_FCCONF_FLOW_CTRL_OBEY);
/* Issue pause control frames on PHY facing ports.
* Allow early initiation of MAC transmission if the amount
* of egress data is below 512 bytes on CPU port.
* FIXME: enable 20KiB buffers?
*/
if (port == CPU_PORT)
val = VSC73XX_Q_MISC_CONF_EARLY_TX_512;
else
val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE;
val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM;
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_Q_MISC_CONF,
val);
/* Flow control MAC: a MAC address used in flow control frames */
val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]);
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_FCMACHI,
val);
val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]);
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_FCMACLO,
val);
/* Tell the categorizer to forward pause frames, not control
* frame. Do not drop anything.
*/
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port,
VSC73XX_CAT_DROP,
VSC73XX_CAT_DROP_FWD_PAUSE_ENA);
/* Clear all counters */
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
port, VSC73XX_C_RX0, 0);
}
static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
int port, struct phy_device *phydev,
u32 initval)
{
u32 val = initval;
u8 seed;
/* Reset this port FIXME: break out subroutine */
val |= VSC73XX_MAC_CFG_RESET;
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
/* Seed the port randomness with randomness */
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
val |= VSC73XX_MAC_CFG_WEXC_DIS;
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
/* Flow control for the PHY facing ports:
* Use a zero delay pause frame when pause condition is left
* Obey pause control frames
* When generating pause frames, use 0xff as pause value
*/
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
VSC73XX_FCCONF_ZERO_PAUSE_EN |
VSC73XX_FCCONF_FLOW_CTRL_OBEY |
0xff);
/* Disallow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), 0);
/* Enable TX, RX, deassert reset, stop loading seed */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
VSC73XX_MAC_CFG,
VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
}
static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct vsc73xx *vsc = ds->priv;
u32 val;
/* Special handling of the CPU-facing port */
if (port == CPU_PORT) {
/* Other ports are already initialized but not this one */
vsc73xx_init_port(vsc, CPU_PORT);
/* Select the external port for this interface (EXT_PORT)
* Enable the GMII GTX external clock
* Use double data rate (DDR mode)
*/
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
CPU_PORT,
VSC73XX_ADVPORTM,
VSC73XX_ADVPORTM_EXT_PORT |
VSC73XX_ADVPORTM_ENA_GTX |
VSC73XX_ADVPORTM_DDR_MODE);
}
/* This is the MAC confiuration that always need to happen
* after a PHY or the CPU port comes up or down.
*/
if (!phydev->link) {
int maxloop = 10;
dev_dbg(vsc->dev, "port %d: went down\n",
port);
/* Disable RX on this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
VSC73XX_MAC_CFG,
VSC73XX_MAC_CFG_RX_EN, 0);
/* Discard packets */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_ARBDISC, BIT(port), BIT(port));
/* Wait until queue is empty */
vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_ARBEMPTY, &val);
while (!(val & BIT(port))) {
msleep(1);
vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_ARBEMPTY, &val);
if (--maxloop == 0) {
dev_err(vsc->dev,
"timeout waiting for block arbiter\n");
/* Continue anyway */
break;
}
}
/* Put this port into reset */
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
VSC73XX_MAC_CFG_RESET);
/* Accept packets again */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_ARBDISC, BIT(port), 0);
/* Allow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), BIT(port));
/* Receive mask (disable forwarding) */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_RECVMASK, BIT(port), 0);
return;
}
/* Figure out what speed was negotiated */
if (phydev->speed == SPEED_1000) {
dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
port);
/* Set up default for internal port or external RGMII */
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
val = VSC73XX_MAC_CFG_1000M_F_RGMII;
else
val = VSC73XX_MAC_CFG_1000M_F_PHY;
vsc73xx_adjust_enable_port(vsc, port, phydev, val);
} else if (phydev->speed == SPEED_100) {
if (phydev->duplex == DUPLEX_FULL) {
val = VSC73XX_MAC_CFG_100_10M_F_PHY;
dev_dbg(vsc->dev,
"port %d: 100 Mbit full duplex mode\n",
port);
} else {
val = VSC73XX_MAC_CFG_100_10M_H_PHY;
dev_dbg(vsc->dev,
"port %d: 100 Mbit half duplex mode\n",
port);
}
vsc73xx_adjust_enable_port(vsc, port, phydev, val);
} else if (phydev->speed == SPEED_10) {
if (phydev->duplex == DUPLEX_FULL) {
val = VSC73XX_MAC_CFG_100_10M_F_PHY;
dev_dbg(vsc->dev,
"port %d: 10 Mbit full duplex mode\n",
port);
} else {
val = VSC73XX_MAC_CFG_100_10M_H_PHY;
dev_dbg(vsc->dev,
"port %d: 10 Mbit half duplex mode\n",
port);
}
vsc73xx_adjust_enable_port(vsc, port, phydev, val);
} else {
dev_err(vsc->dev,
"could not adjust link: unknown speed\n");
}
/* Enable port (forwarding) in the receieve mask */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_RECVMASK, BIT(port), BIT(port));
}
static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct vsc73xx *vsc = ds->priv;
dev_info(vsc->dev, "enable port %d\n", port);
vsc73xx_init_port(vsc, port);
return 0;
}
static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
{
struct vsc73xx *vsc = ds->priv;
/* Just put the port into reset */
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
static const struct vsc73xx_counter *
vsc73xx_find_counter(struct vsc73xx *vsc,
u8 counter,
bool tx)
{
const struct vsc73xx_counter *cnts;
int num_cnts;
int i;
if (tx) {
cnts = vsc73xx_tx_counters;
num_cnts = ARRAY_SIZE(vsc73xx_tx_counters);
} else {
cnts = vsc73xx_rx_counters;
num_cnts = ARRAY_SIZE(vsc73xx_rx_counters);
}
for (i = 0; i < num_cnts; i++) {
const struct vsc73xx_counter *cnt;
cnt = &cnts[i];
if (cnt->counter == counter)
return cnt;
}
return NULL;
}
static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
const struct vsc73xx_counter *cnt;
struct vsc73xx *vsc = ds->priv;
u8 indices[6];
int i, j;
u32 val;
int ret;
if (stringset != ETH_SS_STATS)
return;
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
VSC73XX_C_CFG, &val);
if (ret)
return;
indices[0] = (val & 0x1f); /* RX counter 0 */
indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */
indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */
indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */
indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */
indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
/* The first counters is the RX octets */
j = 0;
strncpy(data + j * ETH_GSTRING_LEN,
"RxEtherStatsOctets", ETH_GSTRING_LEN);
j++;
/* Each port supports recording 3 RX counters and 3 TX counters,
* figure out what counters we use in this set-up and return the
* names of them. The hardware default counters will be number of
* packets on RX/TX, combined broadcast+multicast packets RX/TX and
* total error packets RX/TX.
*/
for (i = 0; i < 3; i++) {
cnt = vsc73xx_find_counter(vsc, indices[i], false);
if (cnt)
strncpy(data + j * ETH_GSTRING_LEN,
cnt->name, ETH_GSTRING_LEN);
j++;
}
/* TX stats begins with the number of TX octets */
strncpy(data + j * ETH_GSTRING_LEN,
"TxEtherStatsOctets", ETH_GSTRING_LEN);
j++;
for (i = 3; i < 6; i++) {
cnt = vsc73xx_find_counter(vsc, indices[i], true);
if (cnt)
strncpy(data + j * ETH_GSTRING_LEN,
cnt->name, ETH_GSTRING_LEN);
j++;
}
}
static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
/* RX and TX packets, then 3 RX counters, 3 TX counters */
return 8;
}
static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct vsc73xx *vsc = ds->priv;
u8 regs[] = {
VSC73XX_RXOCT,
VSC73XX_C_RX0,
VSC73XX_C_RX1,
VSC73XX_C_RX2,
VSC73XX_TXOCT,
VSC73XX_C_TX0,
VSC73XX_C_TX1,
VSC73XX_C_TX2,
};
u32 val;
int ret;
int i;
for (i = 0; i < ARRAY_SIZE(regs); i++) {
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
regs[i], &val);
if (ret) {
dev_err(vsc->dev, "error reading counter %d\n", i);
return;
}
data[i] = val;
}
}
static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct vsc73xx *vsc = ds->priv;
return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
VSC73XX_MAXLEN, new_mtu + ETH_HLEN + ETH_FCS_LEN);
}
/* According to application not "VSC7398 Jumbo Frames" setting
* up the frame size to 9.6 KB does not affect the performance on standard
* frames. It is clear from the application note that
* "9.6 kilobytes" == 9600 bytes.
*/
static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
{
return 9600 - ETH_HLEN - ETH_FCS_LEN;
}
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
.phy_read = vsc73xx_phy_read,
.phy_write = vsc73xx_phy_write,
.adjust_link = vsc73xx_adjust_link,
.get_strings = vsc73xx_get_strings,
.get_ethtool_stats = vsc73xx_get_ethtool_stats,
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
.port_change_mtu = vsc73xx_change_mtu,
.port_max_mtu = vsc73xx_get_max_mtu,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 val;
int ret;
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_GPIO, &val);
if (ret)
return ret;
return !!(val & BIT(offset));
}
static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_GPIO, BIT(offset), tmp);
}
static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_GPIO, BIT(offset + 4) | BIT(offset),
BIT(offset + 4) | tmp);
}
static int vsc73xx_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_GPIO, BIT(offset + 4),
0);
}
static int vsc73xx_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 val;
int ret;
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
VSC73XX_GPIO, &val);
if (ret)
return ret;
return !(val & BIT(offset + 4));
}
static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
{
int ret;
vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
vsc->chipid);
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
vsc->gc.direction_input = vsc73xx_gpio_direction_input;
vsc->gc.direction_output = vsc73xx_gpio_direction_output;
vsc->gc.get_direction = vsc73xx_gpio_get_direction;
vsc->gc.can_sleep = true;
ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc);
if (ret) {
dev_err(vsc->dev, "unable to register GPIO chip\n");
return ret;
}
return 0;
}
int vsc73xx_probe(struct vsc73xx *vsc)
{
struct device *dev = vsc->dev;
int ret;
/* Release reset, if any */
vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(vsc->reset)) {
dev_err(dev, "failed to get RESET GPIO\n");
return PTR_ERR(vsc->reset);
}
if (vsc->reset)
/* Wait 20ms according to datasheet table 245 */
msleep(20);
ret = vsc73xx_detect(vsc);
if (ret == -EAGAIN) {
dev_err(vsc->dev,
"Chip seems to be out of control. Assert reset and try again.\n");
gpiod_set_value_cansleep(vsc->reset, 1);
/* Reset pulse should be 20ns minimum, according to datasheet
* table 245, so 10us should be fine
*/
usleep_range(10, 100);
gpiod_set_value_cansleep(vsc->reset, 0);
/* Wait 20ms according to datasheet table 245 */
msleep(20);
ret = vsc73xx_detect(vsc);
}
if (ret) {
dev_err(dev, "no chip found (%d)\n", ret);
return -ENODEV;
}
eth_random_addr(vsc->addr);
dev_info(vsc->dev,
"MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
vsc->addr[0], vsc->addr[1], vsc->addr[2],
vsc->addr[3], vsc->addr[4], vsc->addr[5]);
/* The VSC7395 switch chips have 5+1 ports which means 5
* ordinary ports and a sixth CPU port facing the processor
* with an RGMII interface. These ports are numbered 0..4
* and 6, so they leave a "hole" in the port map for port 5,
* which is invalid.
*
* The VSC7398 has 8 ports, port 7 is again the CPU port.
*
* We allocate 8 ports and avoid access to the nonexistant
* ports.
*/
vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
vsc->ds->dev = dev;
vsc->ds->num_ports = 8;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
ret = dsa_register_switch(vsc->ds);
if (ret) {
dev_err(dev, "unable to register switch (%d)\n", ret);
return ret;
}
ret = vsc73xx_gpio_probe(vsc);
if (ret) {
dsa_unregister_switch(vsc->ds);
return ret;
}
return 0;
}
EXPORT_SYMBOL(vsc73xx_probe);
void vsc73xx_remove(struct vsc73xx *vsc)
{
dsa_unregister_switch(vsc->ds);
gpiod_set_value(vsc->reset, 1);
}
EXPORT_SYMBOL(vsc73xx_remove);
void vsc73xx_shutdown(struct vsc73xx *vsc)
{
dsa_switch_shutdown(vsc->ds);
}
EXPORT_SYMBOL(vsc73xx_shutdown);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/vitesse-vsc73xx-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Pengutronix, Juergen Borleis <[email protected]>
*
* Partially based on a patch from
* Copyright (c) 2014 Stefan Roese <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/of.h>
#include "lan9303.h"
/* Generate phy-addr and -reg from the input address */
#define PHY_ADDR(x) ((((x) >> 6) + 0x10) & 0x1f)
#define PHY_REG(x) (((x) >> 1) & 0x1f)
struct lan9303_mdio {
struct mdio_device *device;
struct lan9303 chip;
};
static void lan9303_mdio_real_write(struct mdio_device *mdio, int reg, u16 val)
{
mdio->bus->write(mdio->bus, PHY_ADDR(reg), PHY_REG(reg), val);
}
static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
{
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
mutex_lock(&sw_dev->device->bus->mdio_lock);
lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
return 0;
}
static u16 lan9303_mdio_real_read(struct mdio_device *mdio, int reg)
{
return mdio->bus->read(mdio->bus, PHY_ADDR(reg), PHY_REG(reg));
}
static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
mutex_lock(&sw_dev->device->bus->mdio_lock);
*val = lan9303_mdio_real_read(sw_dev->device, reg);
*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
return 0;
}
static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
u16 val)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
}
static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
return mdiobus_read_nested(sw_dev->device->bus, phy, reg);
}
static const struct lan9303_phy_ops lan9303_mdio_phy_ops = {
.phy_read = lan9303_mdio_phy_read,
.phy_write = lan9303_mdio_phy_write,
};
static const struct regmap_config lan9303_mdio_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 1,
.can_multi_write = true,
.max_register = 0x0ff, /* address bits 0..1 are not used */
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.volatile_table = &lan9303_register_set,
.wr_table = &lan9303_register_set,
.rd_table = &lan9303_register_set,
.reg_read = lan9303_mdio_read,
.reg_write = lan9303_mdio_write,
.cache_type = REGCACHE_NONE,
};
static int lan9303_mdio_probe(struct mdio_device *mdiodev)
{
struct lan9303_mdio *sw_dev;
int ret;
sw_dev = devm_kzalloc(&mdiodev->dev, sizeof(struct lan9303_mdio),
GFP_KERNEL);
if (!sw_dev)
return -ENOMEM;
sw_dev->chip.regmap = devm_regmap_init(&mdiodev->dev, NULL, sw_dev,
&lan9303_mdio_regmap_config);
if (IS_ERR(sw_dev->chip.regmap)) {
ret = PTR_ERR(sw_dev->chip.regmap);
dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
return ret;
}
/* link forward and backward */
sw_dev->device = mdiodev;
dev_set_drvdata(&mdiodev->dev, sw_dev);
sw_dev->chip.dev = &mdiodev->dev;
sw_dev->chip.ops = &lan9303_mdio_phy_ops;
ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node);
if (ret != 0)
return ret;
dev_info(&mdiodev->dev, "LAN9303 MDIO driver loaded successfully\n");
return 0;
}
static void lan9303_mdio_remove(struct mdio_device *mdiodev)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
if (!sw_dev)
return;
lan9303_remove(&sw_dev->chip);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
if (!sw_dev)
return;
lan9303_shutdown(&sw_dev->chip);
dev_set_drvdata(&mdiodev->dev, NULL);
}
/*-------------------------------------------------------------------------*/
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
{ .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
static struct mdio_driver lan9303_mdio_driver = {
.mdiodrv.driver = {
.name = "LAN9303_MDIO",
.of_match_table = lan9303_mdio_of_match,
},
.probe = lan9303_mdio_probe,
.remove = lan9303_mdio_remove,
.shutdown = lan9303_mdio_shutdown,
};
mdio_module_driver(lan9303_mdio_driver);
MODULE_AUTHOR("Stefan Roese <[email protected]>, Juergen Borleis <[email protected]>");
MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in MDIO managed mode");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/lan9303_mdio.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
* Copyright (C) 2019,2020 Hochschule Offenburg
* Copyright (C) 2019,2020 Linutronix GmbH
* Authors: Kamil Alkhouri <[email protected]>
* Kurt Kanzenbach <[email protected]>
*/
#include <linux/ptp_classify.h>
#include "hellcreek.h"
#include "hellcreek_hwtstamp.h"
#include "hellcreek_ptp.h"
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct hellcreek *hellcreek = ds->priv;
info->phc_index = hellcreek->ptp_clock ?
ptp_clock_index(hellcreek->ptp_clock) : -1;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
/* enabled tx timestamping */
info->tx_types = BIT(HWTSTAMP_TX_ON);
/* L2 & L4 PTPv2 event rx messages are timestamped */
info->rx_filters = BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
/* Enabling/disabling TX and RX HW timestamping for different PTP messages is
* not available in the switch. Thus, this function only serves as a check if
* the user requested what is actually available or not
*/
static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
struct hwtstamp_config *config)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
bool tx_tstamp_enable = false;
bool rx_tstamp_enable = false;
/* Interaction with the timestamp hardware is prevented here. It is
* enabled when this config function ends successfully
*/
clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
switch (config->tx_type) {
case HWTSTAMP_TX_ON:
tx_tstamp_enable = true;
break;
/* TX HW timestamping can't be disabled on the switch */
case HWTSTAMP_TX_OFF:
config->tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
/* RX HW timestamping can't be disabled on the switch */
case HWTSTAMP_FILTER_NONE:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
rx_tstamp_enable = true;
break;
/* RX HW timestamping can't be enabled for all messages on the switch */
case HWTSTAMP_FILTER_ALL:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
if (!tx_tstamp_enable)
return -ERANGE;
if (!rx_tstamp_enable)
return -ERANGE;
/* If this point is reached, then the requested hwtstamp config is
* compatible with the hwtstamp offered by the switch. Therefore,
* enable the interaction with the HW timestamping
*/
set_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
return 0;
}
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
struct hwtstamp_config config;
int err;
ps = &hellcreek->ports[port].port_hwtstamp;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
if (err)
return err;
/* Save the chosen configuration to be returned later */
memcpy(&ps->tstamp_config, &config, sizeof(config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
struct hwtstamp_config *config;
ps = &hellcreek->ports[port].port_hwtstamp;
config = &ps->tstamp_config;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
* if the caller should not.
*/
static struct ptp_header *hellcreek_should_tstamp(struct hellcreek *hellcreek,
int port, struct sk_buff *skb,
unsigned int type)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
struct ptp_header *hdr;
hdr = ptp_parse_header(skb, type);
if (!hdr)
return NULL;
if (!test_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state))
return NULL;
return hdr;
}
static u64 hellcreek_get_reserved_field(const struct ptp_header *hdr)
{
return be32_to_cpu(hdr->reserved2);
}
static void hellcreek_clear_reserved_field(struct ptp_header *hdr)
{
hdr->reserved2 = 0;
}
static int hellcreek_ptp_hwtstamp_available(struct hellcreek *hellcreek,
unsigned int ts_reg)
{
u16 status;
status = hellcreek_ptp_read(hellcreek, ts_reg);
if (status & PR_TS_STATUS_TS_LOST)
dev_err(hellcreek->dev,
"Tx time stamp lost! This should never happen!\n");
/* If hwtstamp is not available, this means the previous hwtstamp was
* successfully read, and the one we need is not yet available
*/
return (status & PR_TS_STATUS_TS_AVAIL) ? 1 : 0;
}
/* Get nanoseconds timestamp from timestamping unit */
static u64 hellcreek_ptp_hwtstamp_read(struct hellcreek *hellcreek,
unsigned int ts_reg)
{
u16 nsl, nsh;
nsh = hellcreek_ptp_read(hellcreek, ts_reg);
nsh = hellcreek_ptp_read(hellcreek, ts_reg);
nsh = hellcreek_ptp_read(hellcreek, ts_reg);
nsh = hellcreek_ptp_read(hellcreek, ts_reg);
nsl = hellcreek_ptp_read(hellcreek, ts_reg);
return (u64)nsl | ((u64)nsh << 16);
}
static int hellcreek_txtstamp_work(struct hellcreek *hellcreek,
struct hellcreek_port_hwtstamp *ps, int port)
{
struct skb_shared_hwtstamps shhwtstamps;
unsigned int status_reg, data_reg;
struct sk_buff *tmp_skb;
int ts_status;
u64 ns = 0;
if (!ps->tx_skb)
return 0;
switch (port) {
case 2:
status_reg = PR_TS_TX_P1_STATUS_C;
data_reg = PR_TS_TX_P1_DATA_C;
break;
case 3:
status_reg = PR_TS_TX_P2_STATUS_C;
data_reg = PR_TS_TX_P2_DATA_C;
break;
default:
dev_err(hellcreek->dev, "Wrong port for timestamping!\n");
return 0;
}
ts_status = hellcreek_ptp_hwtstamp_available(hellcreek, status_reg);
/* Not available yet? */
if (ts_status == 0) {
/* Check whether the operation of reading the tx timestamp has
* exceeded its allowed period
*/
if (time_is_before_jiffies(ps->tx_tstamp_start +
TX_TSTAMP_TIMEOUT)) {
dev_err(hellcreek->dev,
"Timeout while waiting for Tx timestamp!\n");
goto free_and_clear_skb;
}
/* The timestamp should be available quickly, while getting it
* in high priority. Restart the work
*/
return 1;
}
mutex_lock(&hellcreek->ptp_lock);
ns = hellcreek_ptp_hwtstamp_read(hellcreek, data_reg);
ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
mutex_unlock(&hellcreek->ptp_lock);
/* Now we have the timestamp in nanoseconds, store it in the correct
* structure in order to send it to the user
*/
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
tmp_skb = ps->tx_skb;
ps->tx_skb = NULL;
/* skb_complete_tx_timestamp() frees up the client to make another
* timestampable transmit. We have to be ready for it by clearing the
* ps->tx_skb "flag" beforehand
*/
clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
/* Deliver a clone of the original outgoing tx_skb with tx hwtstamp */
skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
return 0;
free_and_clear_skb:
dev_kfree_skb_any(ps->tx_skb);
ps->tx_skb = NULL;
clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
return 0;
}
static void hellcreek_get_rxts(struct hellcreek *hellcreek,
struct hellcreek_port_hwtstamp *ps,
struct sk_buff *skb, struct sk_buff_head *rxq,
int port)
{
struct skb_shared_hwtstamps *shwt;
struct sk_buff_head received;
unsigned long flags;
/* Construct Rx timestamps for all received PTP packets. */
__skb_queue_head_init(&received);
spin_lock_irqsave(&rxq->lock, flags);
skb_queue_splice_tail_init(rxq, &received);
spin_unlock_irqrestore(&rxq->lock, flags);
for (; skb; skb = __skb_dequeue(&received)) {
struct ptp_header *hdr;
unsigned int type;
u64 ns;
/* Get nanoseconds from ptp packet */
type = SKB_PTP_TYPE(skb);
hdr = ptp_parse_header(skb, type);
ns = hellcreek_get_reserved_field(hdr);
hellcreek_clear_reserved_field(hdr);
/* Add seconds part */
mutex_lock(&hellcreek->ptp_lock);
ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
mutex_unlock(&hellcreek->ptp_lock);
/* Save time stamp */
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
netif_rx(skb);
}
}
static void hellcreek_rxtstamp_work(struct hellcreek *hellcreek,
struct hellcreek_port_hwtstamp *ps,
int port)
{
struct sk_buff *skb;
skb = skb_dequeue(&ps->rx_queue);
if (skb)
hellcreek_get_rxts(hellcreek, ps, skb, &ps->rx_queue, port);
}
long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
struct dsa_switch *ds = hellcreek->ds;
int i, restart = 0;
for (i = 0; i < ds->num_ports; i++) {
struct hellcreek_port_hwtstamp *ps;
if (!dsa_is_user_port(ds, i))
continue;
ps = &hellcreek->ports[i].port_hwtstamp;
if (test_bit(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
restart |= hellcreek_txtstamp_work(hellcreek, ps, i);
hellcreek_rxtstamp_work(hellcreek, ps, i);
}
return restart ? 1 : -1;
}
void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
struct ptp_header *hdr;
struct sk_buff *clone;
unsigned int type;
ps = &hellcreek->ports[port].port_hwtstamp;
type = ptp_classify_raw(skb);
if (type == PTP_CLASS_NONE)
return;
/* Make sure the message is a PTP message that needs to be timestamped
* and the interaction with the HW timestamping is enabled. If not, stop
* here
*/
hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
if (!hdr)
return;
clone = skb_clone_sk(skb);
if (!clone)
return;
if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
&ps->state)) {
kfree_skb(clone);
return;
}
ps->tx_skb = clone;
/* store the number of ticks occurred since system start-up till this
* moment
*/
ps->tx_tstamp_start = jiffies;
ptp_schedule_worker(hellcreek->ptp_clock, 0);
}
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
struct ptp_header *hdr;
ps = &hellcreek->ports[port].port_hwtstamp;
/* This check only fails if the user did not initialize hardware
* timestamping beforehand.
*/
if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
return false;
/* Make sure the message is a PTP message that needs to be timestamped
* and the interaction with the HW timestamping is enabled. If not, stop
* here
*/
hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
if (!hdr)
return false;
SKB_PTP_TYPE(skb) = type;
skb_queue_tail(&ps->rx_queue, skb);
ptp_schedule_worker(hellcreek->ptp_clock, 0);
return true;
}
static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
skb_queue_head_init(&ps->rx_queue);
}
int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek)
{
struct dsa_switch *ds = hellcreek->ds;
int i;
/* Initialize timestamping ports. */
for (i = 0; i < ds->num_ports; ++i) {
if (!dsa_is_user_port(ds, i))
continue;
hellcreek_hwtstamp_port_setup(hellcreek, i);
}
/* Select the synchronized clock as the source timekeeper for the
* timestamps and enable inline timestamping.
*/
hellcreek_ptp_write(hellcreek, PR_SETTINGS_C_TS_SRC_TK_MASK |
PR_SETTINGS_C_RES3TS,
PR_SETTINGS_C);
return 0;
}
void hellcreek_hwtstamp_free(struct hellcreek *hellcreek)
{
/* Nothing todo */
}
| linux-master | drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
* Copyright (C) 2019,2020 Hochschule Offenburg
* Copyright (C) 2019,2020 Linutronix GmbH
* Authors: Kamil Alkhouri <[email protected]>
* Kurt Kanzenbach <[email protected]>
*/
#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include "hellcreek.h"
#include "hellcreek_ptp.h"
#include "hellcreek_hwtstamp.h"
u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset)
{
return readw(hellcreek->ptp_base + offset);
}
void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
unsigned int offset)
{
writew(data, hellcreek->ptp_base + offset);
}
/* Get nanoseconds from PTP clock */
static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
{
u16 nsl, nsh;
/* Take a snapshot */
hellcreek_ptp_write(hellcreek, PR_COMMAND_C_SS, PR_COMMAND_C);
/* The time of the day is saved as 96 bits. However, due to hardware
* limitations the seconds are not or only partly kept in the PTP
* core. Currently only three bits for the seconds are available. That's
* why only the nanoseconds are used and the seconds are tracked in
* software. Anyway due to internal locking all five registers should be
* read.
*/
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
return (u64)nsl | ((u64)nsh << 16);
}
static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
{
u64 ns;
ns = hellcreek_ptp_clock_read(hellcreek);
if (ns < hellcreek->last_ts)
hellcreek->seconds++;
hellcreek->last_ts = ns;
ns += hellcreek->seconds * NSEC_PER_SEC;
return ns;
}
/* Retrieve the seconds parts in nanoseconds for a packet timestamped with @ns.
* There has to be a check whether an overflow occurred between the packet
* arrival and now. If so use the correct seconds (-1) for calculating the
* packet arrival time.
*/
u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
{
u64 s;
__hellcreek_ptp_gettime(hellcreek);
if (hellcreek->last_ts > ns)
s = hellcreek->seconds * NSEC_PER_SEC;
else
s = (hellcreek->seconds - 1) * NSEC_PER_SEC;
return s;
}
static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u64 ns;
mutex_lock(&hellcreek->ptp_lock);
ns = __hellcreek_ptp_gettime(hellcreek);
mutex_unlock(&hellcreek->ptp_lock);
*ts = ns_to_timespec64(ns);
return 0;
}
static int hellcreek_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u16 secl, nsh, nsl;
secl = ts->tv_sec & 0xffff;
nsh = ((u32)ts->tv_nsec & 0xffff0000) >> 16;
nsl = ts->tv_nsec & 0xffff;
mutex_lock(&hellcreek->ptp_lock);
/* Update overflow data structure */
hellcreek->seconds = ts->tv_sec;
hellcreek->last_ts = ts->tv_nsec;
/* Set time in clock */
hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
hellcreek_ptp_write(hellcreek, secl, PR_CLOCK_WRITE_C);
hellcreek_ptp_write(hellcreek, nsh, PR_CLOCK_WRITE_C);
hellcreek_ptp_write(hellcreek, nsl, PR_CLOCK_WRITE_C);
mutex_unlock(&hellcreek->ptp_lock);
return 0;
}
static int hellcreek_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u16 negative = 0, addendh, addendl;
u32 addend;
u64 adj;
if (scaled_ppm < 0) {
negative = 1;
scaled_ppm = -scaled_ppm;
}
/* IP-Core adjusts the nominal frequency by adding or subtracting 1 ns
* from the 8 ns (period of the oscillator) every time the accumulator
* register overflows. The value stored in the addend register is added
* to the accumulator register every 8 ns.
*
* addend value = (2^30 * accumulator_overflow_rate) /
* oscillator_frequency
* where:
*
* oscillator_frequency = 125 MHz
* accumulator_overflow_rate = 125 MHz * scaled_ppm * 2^-16 * 10^-6 * 8
*/
adj = scaled_ppm;
adj <<= 11;
addend = (u32)div_u64(adj, 15625);
addendh = (addend & 0xffff0000) >> 16;
addendl = addend & 0xffff;
negative = (negative << 15) & 0x8000;
mutex_lock(&hellcreek->ptp_lock);
/* Set drift register */
hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_DRIFT_C);
hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
hellcreek_ptp_write(hellcreek, addendh, PR_CLOCK_DRIFT_C);
hellcreek_ptp_write(hellcreek, addendl, PR_CLOCK_DRIFT_C);
mutex_unlock(&hellcreek->ptp_lock);
return 0;
}
static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u16 negative = 0, counth, countl;
u32 count_val;
/* If the offset is larger than IP-Core slow offset resources. Don't
* consider slow adjustment. Rather, add the offset directly to the
* current time
*/
if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
struct timespec64 now, then = ns_to_timespec64(delta);
hellcreek_ptp_gettime(ptp, &now);
now = timespec64_add(now, then);
hellcreek_ptp_settime(ptp, &now);
return 0;
}
if (delta < 0) {
negative = 1;
delta = -delta;
}
/* 'count_val' does not exceed the maximum register size (2^30) */
count_val = div_s64(delta, MAX_NS_PER_STEP);
counth = (count_val & 0xffff0000) >> 16;
countl = count_val & 0xffff;
negative = (negative << 15) & 0x8000;
mutex_lock(&hellcreek->ptp_lock);
/* Set offset write register */
hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_OFFSET_C);
hellcreek_ptp_write(hellcreek, MAX_NS_PER_STEP, PR_CLOCK_OFFSET_C);
hellcreek_ptp_write(hellcreek, MIN_CLK_CYCLES_BETWEEN_STEPS,
PR_CLOCK_OFFSET_C);
hellcreek_ptp_write(hellcreek, countl, PR_CLOCK_OFFSET_C);
hellcreek_ptp_write(hellcreek, counth, PR_CLOCK_OFFSET_C);
mutex_unlock(&hellcreek->ptp_lock);
return 0;
}
static int hellcreek_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
static void hellcreek_ptp_overflow_check(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct hellcreek *hellcreek;
hellcreek = dw_overflow_to_hellcreek(dw);
mutex_lock(&hellcreek->ptp_lock);
__hellcreek_ptp_gettime(hellcreek);
mutex_unlock(&hellcreek->ptp_lock);
schedule_delayed_work(&hellcreek->overflow_work,
HELLCREEK_OVERFLOW_PERIOD);
}
static enum led_brightness hellcreek_get_brightness(struct hellcreek *hellcreek,
int led)
{
return (hellcreek->status_out & led) ? 1 : 0;
}
static void hellcreek_set_brightness(struct hellcreek *hellcreek, int led,
enum led_brightness b)
{
mutex_lock(&hellcreek->ptp_lock);
if (b)
hellcreek->status_out |= led;
else
hellcreek->status_out &= ~led;
hellcreek_ptp_write(hellcreek, hellcreek->status_out, STATUS_OUT);
mutex_unlock(&hellcreek->ptp_lock);
}
static void hellcreek_led_sync_good_set(struct led_classdev *ldev,
enum led_brightness b)
{
struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, b);
}
static enum led_brightness hellcreek_led_sync_good_get(struct led_classdev *ldev)
{
struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
return hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
}
static void hellcreek_led_is_gm_set(struct led_classdev *ldev,
enum led_brightness b)
{
struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, b);
}
static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
{
struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
return hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
}
/* There two available LEDs internally called sync_good and is_gm. However, the
* user might want to use a different label and specify the default state. Take
* those properties from device tree.
*/
static int hellcreek_led_setup(struct hellcreek *hellcreek)
{
struct device_node *leds, *led = NULL;
enum led_default_state state;
const char *label;
int ret = -EINVAL;
of_node_get(hellcreek->dev->of_node);
leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
if (!leds) {
dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
return ret;
}
hellcreek->status_out = 0;
led = of_get_next_available_child(leds, led);
if (!led) {
dev_err(hellcreek->dev, "First LED not specified!\n");
goto out;
}
ret = of_property_read_string(led, "label", &label);
hellcreek->led_sync_good.name = ret ? "sync_good" : label;
state = led_init_default_state_get(of_fwnode_handle(led));
switch (state) {
case LEDS_DEFSTATE_ON:
hellcreek->led_sync_good.brightness = 1;
break;
case LEDS_DEFSTATE_KEEP:
hellcreek->led_sync_good.brightness =
hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
break;
default:
hellcreek->led_sync_good.brightness = 0;
}
hellcreek->led_sync_good.max_brightness = 1;
hellcreek->led_sync_good.brightness_set = hellcreek_led_sync_good_set;
hellcreek->led_sync_good.brightness_get = hellcreek_led_sync_good_get;
led = of_get_next_available_child(leds, led);
if (!led) {
dev_err(hellcreek->dev, "Second LED not specified!\n");
ret = -EINVAL;
goto out;
}
ret = of_property_read_string(led, "label", &label);
hellcreek->led_is_gm.name = ret ? "is_gm" : label;
state = led_init_default_state_get(of_fwnode_handle(led));
switch (state) {
case LEDS_DEFSTATE_ON:
hellcreek->led_is_gm.brightness = 1;
break;
case LEDS_DEFSTATE_KEEP:
hellcreek->led_is_gm.brightness =
hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
break;
default:
hellcreek->led_is_gm.brightness = 0;
}
hellcreek->led_is_gm.max_brightness = 1;
hellcreek->led_is_gm.brightness_set = hellcreek_led_is_gm_set;
hellcreek->led_is_gm.brightness_get = hellcreek_led_is_gm_get;
/* Set initial state */
if (hellcreek->led_sync_good.brightness == 1)
hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, 1);
if (hellcreek->led_is_gm.brightness == 1)
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
/* Register both leds */
led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
ret = 0;
out:
of_node_put(leds);
return ret;
}
int hellcreek_ptp_setup(struct hellcreek *hellcreek)
{
u16 status;
int ret;
/* Set up the overflow work */
INIT_DELAYED_WORK(&hellcreek->overflow_work,
hellcreek_ptp_overflow_check);
/* Setup PTP clock */
hellcreek->ptp_clock_info.owner = THIS_MODULE;
snprintf(hellcreek->ptp_clock_info.name,
sizeof(hellcreek->ptp_clock_info.name),
dev_name(hellcreek->dev));
/* IP-Core can add up to 0.5 ns per 8 ns cycle, which means
* accumulator_overflow_rate shall not exceed 62.5 MHz (which adjusts
* the nominal frequency by 6.25%)
*/
hellcreek->ptp_clock_info.max_adj = 62500000;
hellcreek->ptp_clock_info.n_alarm = 0;
hellcreek->ptp_clock_info.n_pins = 0;
hellcreek->ptp_clock_info.n_ext_ts = 0;
hellcreek->ptp_clock_info.n_per_out = 0;
hellcreek->ptp_clock_info.pps = 0;
hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
hellcreek->ptp_clock = ptp_clock_register(&hellcreek->ptp_clock_info,
hellcreek->dev);
if (IS_ERR(hellcreek->ptp_clock))
return PTR_ERR(hellcreek->ptp_clock);
/* Enable the offset correction process, if no offset correction is
* already taking place
*/
status = hellcreek_ptp_read(hellcreek, PR_CLOCK_STATUS_C);
if (!(status & PR_CLOCK_STATUS_C_OFS_ACT))
hellcreek_ptp_write(hellcreek,
status | PR_CLOCK_STATUS_C_ENA_OFS,
PR_CLOCK_STATUS_C);
/* Enable the drift correction process */
hellcreek_ptp_write(hellcreek, status | PR_CLOCK_STATUS_C_ENA_DRIFT,
PR_CLOCK_STATUS_C);
/* LED setup */
ret = hellcreek_led_setup(hellcreek);
if (ret) {
if (hellcreek->ptp_clock)
ptp_clock_unregister(hellcreek->ptp_clock);
return ret;
}
schedule_delayed_work(&hellcreek->overflow_work,
HELLCREEK_OVERFLOW_PERIOD);
return 0;
}
void hellcreek_ptp_free(struct hellcreek *hellcreek)
{
led_classdev_unregister(&hellcreek->led_is_gm);
led_classdev_unregister(&hellcreek->led_sync_good);
cancel_delayed_work_sync(&hellcreek->overflow_work);
if (hellcreek->ptp_clock)
ptp_clock_unregister(hellcreek->ptp_clock);
hellcreek->ptp_clock = NULL;
}
| linux-master | drivers/net/dsa/hirschmann/hellcreek_ptp.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
*
* Copyright (C) 2019-2021 Linutronix GmbH
* Author Kurt Kanzenbach <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/random.h>
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <net/dsa.h>
#include "hellcreek.h"
#include "hellcreek_ptp.h"
#include "hellcreek_hwtstamp.h"
static const struct hellcreek_counter hellcreek_counter[] = {
{ 0x00, "RxFiltered", },
{ 0x01, "RxOctets1k", },
{ 0x02, "RxVTAG", },
{ 0x03, "RxL2BAD", },
{ 0x04, "RxOverloadDrop", },
{ 0x05, "RxUC", },
{ 0x06, "RxMC", },
{ 0x07, "RxBC", },
{ 0x08, "RxRS<64", },
{ 0x09, "RxRS64", },
{ 0x0a, "RxRS65_127", },
{ 0x0b, "RxRS128_255", },
{ 0x0c, "RxRS256_511", },
{ 0x0d, "RxRS512_1023", },
{ 0x0e, "RxRS1024_1518", },
{ 0x0f, "RxRS>1518", },
{ 0x10, "TxTailDropQueue0", },
{ 0x11, "TxTailDropQueue1", },
{ 0x12, "TxTailDropQueue2", },
{ 0x13, "TxTailDropQueue3", },
{ 0x14, "TxTailDropQueue4", },
{ 0x15, "TxTailDropQueue5", },
{ 0x16, "TxTailDropQueue6", },
{ 0x17, "TxTailDropQueue7", },
{ 0x18, "RxTrafficClass0", },
{ 0x19, "RxTrafficClass1", },
{ 0x1a, "RxTrafficClass2", },
{ 0x1b, "RxTrafficClass3", },
{ 0x1c, "RxTrafficClass4", },
{ 0x1d, "RxTrafficClass5", },
{ 0x1e, "RxTrafficClass6", },
{ 0x1f, "RxTrafficClass7", },
{ 0x21, "TxOctets1k", },
{ 0x22, "TxVTAG", },
{ 0x23, "TxL2BAD", },
{ 0x25, "TxUC", },
{ 0x26, "TxMC", },
{ 0x27, "TxBC", },
{ 0x28, "TxTS<64", },
{ 0x29, "TxTS64", },
{ 0x2a, "TxTS65_127", },
{ 0x2b, "TxTS128_255", },
{ 0x2c, "TxTS256_511", },
{ 0x2d, "TxTS512_1023", },
{ 0x2e, "TxTS1024_1518", },
{ 0x2f, "TxTS>1518", },
{ 0x30, "TxTrafficClassOverrun0", },
{ 0x31, "TxTrafficClassOverrun1", },
{ 0x32, "TxTrafficClassOverrun2", },
{ 0x33, "TxTrafficClassOverrun3", },
{ 0x34, "TxTrafficClassOverrun4", },
{ 0x35, "TxTrafficClassOverrun5", },
{ 0x36, "TxTrafficClassOverrun6", },
{ 0x37, "TxTrafficClassOverrun7", },
{ 0x38, "TxTrafficClass0", },
{ 0x39, "TxTrafficClass1", },
{ 0x3a, "TxTrafficClass2", },
{ 0x3b, "TxTrafficClass3", },
{ 0x3c, "TxTrafficClass4", },
{ 0x3d, "TxTrafficClass5", },
{ 0x3e, "TxTrafficClass6", },
{ 0x3f, "TxTrafficClass7", },
};
static u16 hellcreek_read(struct hellcreek *hellcreek, unsigned int offset)
{
return readw(hellcreek->base + offset);
}
static u16 hellcreek_read_ctrl(struct hellcreek *hellcreek)
{
return readw(hellcreek->base + HR_CTRL_C);
}
static u16 hellcreek_read_stat(struct hellcreek *hellcreek)
{
return readw(hellcreek->base + HR_SWSTAT);
}
static void hellcreek_write(struct hellcreek *hellcreek, u16 data,
unsigned int offset)
{
writew(data, hellcreek->base + offset);
}
static void hellcreek_select_port(struct hellcreek *hellcreek, int port)
{
u16 val = port << HR_PSEL_PTWSEL_SHIFT;
hellcreek_write(hellcreek, val, HR_PSEL);
}
static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
{
u16 val = prio << HR_PSEL_PRTCWSEL_SHIFT;
hellcreek_write(hellcreek, val, HR_PSEL);
}
static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
int prio)
{
u16 val = port << HR_PSEL_PTWSEL_SHIFT;
val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
hellcreek_write(hellcreek, val, HR_PSEL);
}
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
hellcreek_write(hellcreek, val, HR_CSEL);
/* Data sheet states to wait at least 20 internal clock cycles */
ndelay(200);
}
static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
bool pvid)
{
u16 val = 0;
/* Set pvid bit first */
if (pvid)
val |= HR_VIDCFG_PVID;
hellcreek_write(hellcreek, val, HR_VIDCFG);
/* Set vlan */
val |= vid << HR_VIDCFG_VID_SHIFT;
hellcreek_write(hellcreek, val, HR_VIDCFG);
}
static void hellcreek_select_tgd(struct hellcreek *hellcreek, int port)
{
u16 val = port << TR_TGDSEL_TDGSEL_SHIFT;
hellcreek_write(hellcreek, val, TR_TGDSEL);
}
static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
{
u16 val;
/* Wait up to 1ms, although 3 us should be enough */
return readx_poll_timeout(hellcreek_read_ctrl, hellcreek,
val, val & HR_CTRL_C_READY,
3, 1000);
}
static int hellcreek_wait_until_transitioned(struct hellcreek *hellcreek)
{
u16 val;
return readx_poll_timeout_atomic(hellcreek_read_ctrl, hellcreek,
val, !(val & HR_CTRL_C_TRANSITION),
1, 1000);
}
static int hellcreek_wait_fdb_ready(struct hellcreek *hellcreek)
{
u16 val;
return readx_poll_timeout_atomic(hellcreek_read_stat, hellcreek,
val, !(val & HR_SWSTAT_BUSY),
1, 1000);
}
static int hellcreek_detect(struct hellcreek *hellcreek)
{
u16 id, rel_low, rel_high, date_low, date_high, tgd_ver;
u8 tgd_maj, tgd_min;
u32 rel, date;
id = hellcreek_read(hellcreek, HR_MODID_C);
rel_low = hellcreek_read(hellcreek, HR_REL_L_C);
rel_high = hellcreek_read(hellcreek, HR_REL_H_C);
date_low = hellcreek_read(hellcreek, HR_BLD_L_C);
date_high = hellcreek_read(hellcreek, HR_BLD_H_C);
tgd_ver = hellcreek_read(hellcreek, TR_TGDVER);
if (id != hellcreek->pdata->module_id)
return -ENODEV;
rel = rel_low | (rel_high << 16);
date = date_low | (date_high << 16);
tgd_maj = (tgd_ver & TR_TGDVER_REV_MAJ_MASK) >> TR_TGDVER_REV_MAJ_SHIFT;
tgd_min = (tgd_ver & TR_TGDVER_REV_MIN_MASK) >> TR_TGDVER_REV_MIN_SHIFT;
dev_info(hellcreek->dev, "Module ID=%02x Release=%04x Date=%04x TGD Version=%02x.%02x\n",
id, rel, date, tgd_maj, tgd_min);
return 0;
}
static void hellcreek_feature_detect(struct hellcreek *hellcreek)
{
u16 features;
features = hellcreek_read(hellcreek, HR_FEABITS0);
/* Only detect the size of the FDB table. The size and current
* utilization can be queried via devlink.
*/
hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >>
HR_FEABITS0_FDBBINS_SHIFT) * 32;
}
static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_HELLCREEK;
}
static int hellcreek_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port *hellcreek_port;
u16 val;
hellcreek_port = &hellcreek->ports[port];
dev_dbg(hellcreek->dev, "Enable port %d\n", port);
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_port(hellcreek, port);
val = hellcreek_port->ptcfg;
val |= HR_PTCFG_ADMIN_EN;
hellcreek_write(hellcreek, val, HR_PTCFG);
hellcreek_port->ptcfg = val;
mutex_unlock(&hellcreek->reg_lock);
return 0;
}
static void hellcreek_port_disable(struct dsa_switch *ds, int port)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port *hellcreek_port;
u16 val;
hellcreek_port = &hellcreek->ports[port];
dev_dbg(hellcreek->dev, "Disable port %d\n", port);
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_port(hellcreek, port);
val = hellcreek_port->ptcfg;
val &= ~HR_PTCFG_ADMIN_EN;
hellcreek_write(hellcreek, val, HR_PTCFG);
hellcreek_port->ptcfg = val;
mutex_unlock(&hellcreek->reg_lock);
}
static void hellcreek_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
int i;
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
strscpy(data + i * ETH_GSTRING_LEN,
counter->name, ETH_GSTRING_LEN);
}
}
static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return 0;
return ARRAY_SIZE(hellcreek_counter);
}
static void hellcreek_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port *hellcreek_port;
int i;
hellcreek_port = &hellcreek->ports[port];
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
u8 offset = counter->offset + port * 64;
u16 high, low;
u64 value;
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_counter(hellcreek, offset);
/* The registers are locked internally by selecting the
* counter. So low and high can be read without reading high
* again.
*/
high = hellcreek_read(hellcreek, HR_CRDH);
low = hellcreek_read(hellcreek, HR_CRDL);
value = ((u64)high << 16) | low;
hellcreek_port->counter_values[i] += value;
data[i] = hellcreek_port->counter_values[i];
mutex_unlock(&hellcreek->reg_lock);
}
}
static u16 hellcreek_private_vid(int port)
{
return VLAN_N_VID - port + 1;
}
static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
int i;
dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port);
/* Restriction: Make sure that nobody uses the "private" VLANs. These
* VLANs are internally used by the driver to ensure port
* separation. Thus, they cannot be used by someone else.
*/
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
const u16 restricted_vid = hellcreek_private_vid(i);
if (!dsa_is_user_port(ds, i))
continue;
if (vlan->vid == restricted_vid) {
NL_SET_ERR_MSG_MOD(extack, "VID restricted by driver");
return -EBUSY;
}
}
return 0;
}
static void hellcreek_select_vlan_params(struct hellcreek *hellcreek, int port,
int *shift, int *mask)
{
switch (port) {
case 0:
*shift = HR_VIDMBRCFG_P0MBR_SHIFT;
*mask = HR_VIDMBRCFG_P0MBR_MASK;
break;
case 1:
*shift = HR_VIDMBRCFG_P1MBR_SHIFT;
*mask = HR_VIDMBRCFG_P1MBR_MASK;
break;
case 2:
*shift = HR_VIDMBRCFG_P2MBR_SHIFT;
*mask = HR_VIDMBRCFG_P2MBR_MASK;
break;
case 3:
*shift = HR_VIDMBRCFG_P3MBR_SHIFT;
*mask = HR_VIDMBRCFG_P3MBR_MASK;
break;
default:
*shift = *mask = 0;
dev_err(hellcreek->dev, "Unknown port %d selected!\n", port);
}
}
static void hellcreek_apply_vlan(struct hellcreek *hellcreek, int port, u16 vid,
bool pvid, bool untagged)
{
int shift, mask;
u16 val;
dev_dbg(hellcreek->dev, "Apply VLAN: port=%d vid=%u pvid=%d untagged=%d",
port, vid, pvid, untagged);
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_port(hellcreek, port);
hellcreek_select_vlan(hellcreek, vid, pvid);
/* Setup port vlan membership */
hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
val = hellcreek->vidmbrcfg[vid];
val &= ~mask;
if (untagged)
val |= HELLCREEK_VLAN_UNTAGGED_MEMBER << shift;
else
val |= HELLCREEK_VLAN_TAGGED_MEMBER << shift;
hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
hellcreek->vidmbrcfg[vid] = val;
mutex_unlock(&hellcreek->reg_lock);
}
static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
u16 vid)
{
int shift, mask;
u16 val;
dev_dbg(hellcreek->dev, "Unapply VLAN: port=%d vid=%u\n", port, vid);
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_vlan(hellcreek, vid, false);
/* Setup port vlan membership */
hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
val = hellcreek->vidmbrcfg[vid];
val &= ~mask;
val |= HELLCREEK_VLAN_NO_MEMBER << shift;
hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
hellcreek->vidmbrcfg[vid] = val;
mutex_unlock(&hellcreek->reg_lock);
}
static int hellcreek_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct hellcreek *hellcreek = ds->priv;
int err;
err = hellcreek_vlan_prepare(ds, port, vlan, extack);
if (err)
return err;
dev_dbg(hellcreek->dev, "Add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
hellcreek_apply_vlan(hellcreek, port, vlan->vid, pvid, untagged);
return 0;
}
static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct hellcreek *hellcreek = ds->priv;
dev_dbg(hellcreek->dev, "Remove VLAN %d on port %d\n", vlan->vid, port);
hellcreek_unapply_vlan(hellcreek, port, vlan->vid);
return 0;
}
static void hellcreek_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port *hellcreek_port;
const char *new_state;
u16 val;
mutex_lock(&hellcreek->reg_lock);
hellcreek_port = &hellcreek->ports[port];
val = hellcreek_port->ptcfg;
switch (state) {
case BR_STATE_DISABLED:
new_state = "DISABLED";
val |= HR_PTCFG_BLOCKED;
val &= ~HR_PTCFG_LEARNING_EN;
break;
case BR_STATE_BLOCKING:
new_state = "BLOCKING";
val |= HR_PTCFG_BLOCKED;
val &= ~HR_PTCFG_LEARNING_EN;
break;
case BR_STATE_LISTENING:
new_state = "LISTENING";
val |= HR_PTCFG_BLOCKED;
val &= ~HR_PTCFG_LEARNING_EN;
break;
case BR_STATE_LEARNING:
new_state = "LEARNING";
val |= HR_PTCFG_BLOCKED;
val |= HR_PTCFG_LEARNING_EN;
break;
case BR_STATE_FORWARDING:
new_state = "FORWARDING";
val &= ~HR_PTCFG_BLOCKED;
val |= HR_PTCFG_LEARNING_EN;
break;
default:
new_state = "UNKNOWN";
}
hellcreek_select_port(hellcreek, port);
hellcreek_write(hellcreek, val, HR_PTCFG);
hellcreek_port->ptcfg = val;
mutex_unlock(&hellcreek->reg_lock);
dev_dbg(hellcreek->dev, "Configured STP state for port %d: %s\n",
port, new_state);
}
static void hellcreek_setup_ingressflt(struct hellcreek *hellcreek, int port,
bool enable)
{
struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
u16 ptcfg;
mutex_lock(&hellcreek->reg_lock);
ptcfg = hellcreek_port->ptcfg;
if (enable)
ptcfg |= HR_PTCFG_INGRESSFLT;
else
ptcfg &= ~HR_PTCFG_INGRESSFLT;
hellcreek_select_port(hellcreek, port);
hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
hellcreek_port->ptcfg = ptcfg;
mutex_unlock(&hellcreek->reg_lock);
}
static void hellcreek_setup_vlan_awareness(struct hellcreek *hellcreek,
bool enable)
{
u16 swcfg;
mutex_lock(&hellcreek->reg_lock);
swcfg = hellcreek->swcfg;
if (enable)
swcfg |= HR_SWCFG_VLAN_UNAWARE;
else
swcfg &= ~HR_SWCFG_VLAN_UNAWARE;
hellcreek_write(hellcreek, swcfg, HR_SWCFG);
mutex_unlock(&hellcreek->reg_lock);
}
/* Default setup for DSA: VLAN <X>: CPU and Port <X> egress untagged. */
static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port,
bool enabled)
{
const u16 vid = hellcreek_private_vid(port);
int upstream = dsa_upstream_port(ds, port);
struct hellcreek *hellcreek = ds->priv;
/* Apply vid to port as egress untagged and port vlan id */
if (enabled)
hellcreek_apply_vlan(hellcreek, port, vid, true, true);
else
hellcreek_unapply_vlan(hellcreek, port, vid);
/* Apply vid to cpu port as well */
if (enabled)
hellcreek_apply_vlan(hellcreek, upstream, vid, false, true);
else
hellcreek_unapply_vlan(hellcreek, upstream, vid);
}
static void hellcreek_port_set_ucast_flood(struct hellcreek *hellcreek,
int port, bool enable)
{
struct hellcreek_port *hellcreek_port;
u16 val;
hellcreek_port = &hellcreek->ports[port];
dev_dbg(hellcreek->dev, "%s unicast flooding on port %d\n",
enable ? "Enable" : "Disable", port);
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_port(hellcreek, port);
val = hellcreek_port->ptcfg;
if (enable)
val &= ~HR_PTCFG_UUC_FLT;
else
val |= HR_PTCFG_UUC_FLT;
hellcreek_write(hellcreek, val, HR_PTCFG);
hellcreek_port->ptcfg = val;
mutex_unlock(&hellcreek->reg_lock);
}
static void hellcreek_port_set_mcast_flood(struct hellcreek *hellcreek,
int port, bool enable)
{
struct hellcreek_port *hellcreek_port;
u16 val;
hellcreek_port = &hellcreek->ports[port];
dev_dbg(hellcreek->dev, "%s multicast flooding on port %d\n",
enable ? "Enable" : "Disable", port);
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_port(hellcreek, port);
val = hellcreek_port->ptcfg;
if (enable)
val &= ~HR_PTCFG_UMC_FLT;
else
val |= HR_PTCFG_UMC_FLT;
hellcreek_write(hellcreek, val, HR_PTCFG);
hellcreek_port->ptcfg = val;
mutex_unlock(&hellcreek->reg_lock);
}
static int hellcreek_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
if (flags.mask & BR_FLOOD)
hellcreek_port_set_ucast_flood(hellcreek, port,
!!(flags.val & BR_FLOOD));
if (flags.mask & BR_MCAST_FLOOD)
hellcreek_port_set_mcast_flood(hellcreek, port,
!!(flags.val & BR_MCAST_FLOOD));
return 0;
}
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
dev_dbg(hellcreek->dev, "Port %d joins a bridge\n", port);
/* When joining a vlan_filtering bridge, keep the switch VLAN aware */
if (!ds->vlan_filtering)
hellcreek_setup_vlan_awareness(hellcreek, false);
/* Drop private vlans */
hellcreek_setup_vlan_membership(ds, port, false);
return 0;
}
static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct hellcreek *hellcreek = ds->priv;
dev_dbg(hellcreek->dev, "Port %d leaves a bridge\n", port);
/* Enable VLAN awareness */
hellcreek_setup_vlan_awareness(hellcreek, true);
/* Enable private vlans */
hellcreek_setup_vlan_membership(ds, port, true);
}
static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
const struct hellcreek_fdb_entry *entry)
{
u16 meta = 0;
dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
"OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac,
entry->portmask, entry->is_obt, entry->pass_blocked,
entry->reprio_en, entry->reprio_tc);
/* Add mac address */
hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
hellcreek_write(hellcreek, entry->mac[3] | (entry->mac[2] << 8), HR_FDBWDM);
hellcreek_write(hellcreek, entry->mac[5] | (entry->mac[4] << 8), HR_FDBWDL);
/* Meta data */
meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
if (entry->is_obt)
meta |= HR_FDBWRM0_OBT;
if (entry->pass_blocked)
meta |= HR_FDBWRM0_PASS_BLOCKED;
if (entry->reprio_en) {
meta |= HR_FDBWRM0_REPRIO_EN;
meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
}
hellcreek_write(hellcreek, meta, HR_FDBWRM0);
/* Commit */
hellcreek_write(hellcreek, 0x00, HR_FDBWRCMD);
/* Wait until done */
return hellcreek_wait_fdb_ready(hellcreek);
}
static int __hellcreek_fdb_del(struct hellcreek *hellcreek,
const struct hellcreek_fdb_entry *entry)
{
dev_dbg(hellcreek->dev, "Delete FDB entry: MAC=%pM!\n", entry->mac);
/* Delete by matching idx */
hellcreek_write(hellcreek, entry->idx | HR_FDBWRCMD_FDBDEL, HR_FDBWRCMD);
/* Wait until done */
return hellcreek_wait_fdb_ready(hellcreek);
}
static void hellcreek_populate_fdb_entry(struct hellcreek *hellcreek,
struct hellcreek_fdb_entry *entry,
size_t idx)
{
unsigned char addr[ETH_ALEN];
u16 meta, mac;
/* Read values */
meta = hellcreek_read(hellcreek, HR_FDBMDRD);
mac = hellcreek_read(hellcreek, HR_FDBRDL);
addr[5] = mac & 0xff;
addr[4] = (mac & 0xff00) >> 8;
mac = hellcreek_read(hellcreek, HR_FDBRDM);
addr[3] = mac & 0xff;
addr[2] = (mac & 0xff00) >> 8;
mac = hellcreek_read(hellcreek, HR_FDBRDH);
addr[1] = mac & 0xff;
addr[0] = (mac & 0xff00) >> 8;
/* Populate @entry */
memcpy(entry->mac, addr, sizeof(addr));
entry->idx = idx;
entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
HR_FDBMDRD_PORTMASK_SHIFT;
entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
HR_FDBMDRD_AGE_SHIFT;
entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
HR_FDBMDRD_REPRIO_TC_SHIFT;
entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
}
/* Retrieve the index of a FDB entry by mac address. Currently we search through
* the complete table in hardware. If that's too slow, we might have to cache
* the complete FDB table in software.
*/
static int hellcreek_fdb_get(struct hellcreek *hellcreek,
const unsigned char *dest,
struct hellcreek_fdb_entry *entry)
{
size_t i;
/* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
* should reset the internal pointer. But, that doesn't work. The vendor
* suggested a subsequent write as workaround. Same for HR_FDBRDH below.
*/
hellcreek_read(hellcreek, HR_FDBMAX);
hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
/* We have to read the complete table, because the switch/driver might
* enter new entries anywhere.
*/
for (i = 0; i < hellcreek->fdb_entries; ++i) {
struct hellcreek_fdb_entry tmp = { 0 };
/* Read entry */
hellcreek_populate_fdb_entry(hellcreek, &tmp, i);
/* Force next entry */
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
if (memcmp(tmp.mac, dest, ETH_ALEN))
continue;
/* Match found */
memcpy(entry, &tmp, sizeof(*entry));
return 0;
}
return -ENOENT;
}
static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
int ret;
dev_dbg(hellcreek->dev, "Add FDB entry for MAC=%pM\n", addr);
mutex_lock(&hellcreek->reg_lock);
ret = hellcreek_fdb_get(hellcreek, addr, &entry);
if (ret) {
/* Not found */
memcpy(entry.mac, addr, sizeof(entry.mac));
entry.portmask = BIT(port);
ret = __hellcreek_fdb_add(hellcreek, &entry);
if (ret) {
dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
goto out;
}
} else {
/* Found */
ret = __hellcreek_fdb_del(hellcreek, &entry);
if (ret) {
dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
goto out;
}
entry.portmask |= BIT(port);
ret = __hellcreek_fdb_add(hellcreek, &entry);
if (ret) {
dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
goto out;
}
}
out:
mutex_unlock(&hellcreek->reg_lock);
return ret;
}
static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
int ret;
dev_dbg(hellcreek->dev, "Delete FDB entry for MAC=%pM\n", addr);
mutex_lock(&hellcreek->reg_lock);
ret = hellcreek_fdb_get(hellcreek, addr, &entry);
if (ret) {
/* Not found */
dev_err(hellcreek->dev, "FDB entry for deletion not found!\n");
} else {
/* Found */
ret = __hellcreek_fdb_del(hellcreek, &entry);
if (ret) {
dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
goto out;
}
entry.portmask &= ~BIT(port);
if (entry.portmask != 0x00) {
ret = __hellcreek_fdb_add(hellcreek, &entry);
if (ret) {
dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
goto out;
}
}
}
out:
mutex_unlock(&hellcreek->reg_lock);
return ret;
}
static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct hellcreek *hellcreek = ds->priv;
u16 entries;
int ret = 0;
size_t i;
mutex_lock(&hellcreek->reg_lock);
/* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
* should reset the internal pointer. But, that doesn't work. The vendor
* suggested a subsequent write as workaround. Same for HR_FDBRDH below.
*/
entries = hellcreek_read(hellcreek, HR_FDBMAX);
hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
dev_dbg(hellcreek->dev, "FDB dump for port %d, entries=%d!\n", port, entries);
/* Read table */
for (i = 0; i < hellcreek->fdb_entries; ++i) {
struct hellcreek_fdb_entry entry = { 0 };
/* Read entry */
hellcreek_populate_fdb_entry(hellcreek, &entry, i);
/* Force next entry */
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
/* Check valid */
if (is_zero_ether_addr(entry.mac))
continue;
/* Check port mask */
if (!(entry.portmask & BIT(port)))
continue;
ret = cb(entry.mac, 0, entry.is_static, data);
if (ret)
break;
}
mutex_unlock(&hellcreek->reg_lock);
return ret;
}
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
vlan_filtering ? "Enable" : "Disable", port);
/* Configure port to drop packages with not known vids */
hellcreek_setup_ingressflt(hellcreek, port, vlan_filtering);
/* Enable VLAN awareness on the switch. This save due to
* ds->vlan_filtering_is_global.
*/
hellcreek_setup_vlan_awareness(hellcreek, vlan_filtering);
return 0;
}
static int hellcreek_enable_ip_core(struct hellcreek *hellcreek)
{
int ret;
u16 val;
mutex_lock(&hellcreek->reg_lock);
val = hellcreek_read(hellcreek, HR_CTRL_C);
val |= HR_CTRL_C_ENABLE;
hellcreek_write(hellcreek, val, HR_CTRL_C);
ret = hellcreek_wait_until_transitioned(hellcreek);
mutex_unlock(&hellcreek->reg_lock);
return ret;
}
static void hellcreek_setup_cpu_and_tunnel_port(struct hellcreek *hellcreek)
{
struct hellcreek_port *tunnel_port = &hellcreek->ports[TUNNEL_PORT];
struct hellcreek_port *cpu_port = &hellcreek->ports[CPU_PORT];
u16 ptcfg = 0;
ptcfg |= HR_PTCFG_LEARNING_EN | HR_PTCFG_ADMIN_EN;
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_port(hellcreek, CPU_PORT);
hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
hellcreek_select_port(hellcreek, TUNNEL_PORT);
hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
cpu_port->ptcfg = ptcfg;
tunnel_port->ptcfg = ptcfg;
mutex_unlock(&hellcreek->reg_lock);
}
static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
{
int i;
/* The switch has multiple egress queues per port. The queue is selected
* via the PCP field in the VLAN header. The switch internally deals
* with traffic classes instead of PCP values and this mapping is
* configurable.
*
* The default mapping is (PCP - TC):
* 7 - 7
* 6 - 6
* 5 - 5
* 4 - 4
* 3 - 3
* 2 - 1
* 1 - 0
* 0 - 2
*
* The default should be an identity mapping.
*/
for (i = 0; i < 8; ++i) {
mutex_lock(&hellcreek->reg_lock);
hellcreek_select_prio(hellcreek, i);
hellcreek_write(hellcreek,
i << HR_PRTCCFG_PCP_TC_MAP_SHIFT,
HR_PRTCCFG);
mutex_unlock(&hellcreek->reg_lock);
}
}
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
static struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 0,
.is_static = 1,
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
static struct hellcreek_fdb_entry udp4_ptp = {
/* MAC: 01-00-5E-00-01-81 */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 0,
.is_static = 1,
.reprio_tc = 6,
.reprio_en = 1,
};
static struct hellcreek_fdb_entry udp6_ptp = {
/* MAC: 33-33-00-00-01-81 */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 0,
.is_static = 1,
.reprio_tc = 6,
.reprio_en = 1,
};
static struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
static struct hellcreek_fdb_entry udp4_p2p = {
/* MAC: 01-00-5E-00-00-6B */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6,
.reprio_en = 1,
};
static struct hellcreek_fdb_entry udp6_p2p = {
/* MAC: 33-33-00-00-00-6B */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6,
.reprio_en = 1,
};
static struct hellcreek_fdb_entry stp = {
/* MAC: 01-80-C2-00-00-00 */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
.pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6,
.reprio_en = 1,
};
int ret;
mutex_lock(&hellcreek->reg_lock);
ret = __hellcreek_fdb_add(hellcreek, &l2_ptp);
if (ret)
goto out;
ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp);
if (ret)
goto out;
ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp);
if (ret)
goto out;
ret = __hellcreek_fdb_add(hellcreek, &l2_p2p);
if (ret)
goto out;
ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p);
if (ret)
goto out;
ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p);
if (ret)
goto out;
ret = __hellcreek_fdb_add(hellcreek, &stp);
out:
mutex_unlock(&hellcreek->reg_lock);
return ret;
}
static int hellcreek_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
hellcreek->pdata->name);
}
static u64 hellcreek_devlink_vlan_table_get(void *priv)
{
struct hellcreek *hellcreek = priv;
u64 count = 0;
int i;
mutex_lock(&hellcreek->reg_lock);
for (i = 0; i < VLAN_N_VID; ++i)
if (hellcreek->vidmbrcfg[i])
count++;
mutex_unlock(&hellcreek->reg_lock);
return count;
}
static u64 hellcreek_devlink_fdb_table_get(void *priv)
{
struct hellcreek *hellcreek = priv;
u64 count = 0;
/* Reading this register has side effects. Synchronize against the other
* FDB operations.
*/
mutex_lock(&hellcreek->reg_lock);
count = hellcreek_read(hellcreek, HR_FDBMAX);
mutex_unlock(&hellcreek->reg_lock);
return count;
}
static int hellcreek_setup_devlink_resources(struct dsa_switch *ds)
{
struct devlink_resource_size_params size_vlan_params;
struct devlink_resource_size_params size_fdb_params;
struct hellcreek *hellcreek = ds->priv;
int err;
devlink_resource_size_params_init(&size_vlan_params, VLAN_N_VID,
VLAN_N_VID,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
devlink_resource_size_params_init(&size_fdb_params,
hellcreek->fdb_entries,
hellcreek->fdb_entries,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
err = dsa_devlink_resource_register(ds, "VLAN", VLAN_N_VID,
HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&size_vlan_params);
if (err)
goto out;
err = dsa_devlink_resource_register(ds, "FDB", hellcreek->fdb_entries,
HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&size_fdb_params);
if (err)
goto out;
dsa_devlink_resource_occ_get_register(ds,
HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
hellcreek_devlink_vlan_table_get,
hellcreek);
dsa_devlink_resource_occ_get_register(ds,
HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
hellcreek_devlink_fdb_table_get,
hellcreek);
return 0;
out:
dsa_devlink_resources_unregister(ds);
return err;
}
static int hellcreek_devlink_region_vlan_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct hellcreek_devlink_vlan_entry *table, *entry;
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct hellcreek *hellcreek = ds->priv;
int i;
table = kcalloc(VLAN_N_VID, sizeof(*entry), GFP_KERNEL);
if (!table)
return -ENOMEM;
entry = table;
mutex_lock(&hellcreek->reg_lock);
for (i = 0; i < VLAN_N_VID; ++i, ++entry) {
entry->member = hellcreek->vidmbrcfg[i];
entry->vid = i;
}
mutex_unlock(&hellcreek->reg_lock);
*data = (u8 *)table;
return 0;
}
static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct hellcreek_fdb_entry *table, *entry;
struct hellcreek *hellcreek = ds->priv;
size_t i;
table = kcalloc(hellcreek->fdb_entries, sizeof(*entry), GFP_KERNEL);
if (!table)
return -ENOMEM;
entry = table;
mutex_lock(&hellcreek->reg_lock);
/* Start table read */
hellcreek_read(hellcreek, HR_FDBMAX);
hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
for (i = 0; i < hellcreek->fdb_entries; ++i, ++entry) {
/* Read current entry */
hellcreek_populate_fdb_entry(hellcreek, entry, i);
/* Advance read pointer */
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
}
mutex_unlock(&hellcreek->reg_lock);
*data = (u8 *)table;
return 0;
}
static struct devlink_region_ops hellcreek_region_vlan_ops = {
.name = "vlan",
.snapshot = hellcreek_devlink_region_vlan_snapshot,
.destructor = kfree,
};
static struct devlink_region_ops hellcreek_region_fdb_ops = {
.name = "fdb",
.snapshot = hellcreek_devlink_region_fdb_snapshot,
.destructor = kfree,
};
static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int ret;
/* VLAN table */
size = VLAN_N_VID * sizeof(struct hellcreek_devlink_vlan_entry);
ops = &hellcreek_region_vlan_ops;
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region))
return PTR_ERR(region);
hellcreek->vlan_region = region;
/* FDB table */
size = hellcreek->fdb_entries * sizeof(struct hellcreek_fdb_entry);
ops = &hellcreek_region_fdb_ops;
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
ret = PTR_ERR(region);
goto err_fdb;
}
hellcreek->fdb_region = region;
return 0;
err_fdb:
dsa_devlink_region_destroy(hellcreek->vlan_region);
return ret;
}
static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
dsa_devlink_region_destroy(hellcreek->fdb_region);
dsa_devlink_region_destroy(hellcreek->vlan_region);
}
static int hellcreek_setup(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
u16 swcfg = 0;
int ret, i;
dev_dbg(hellcreek->dev, "Set up the switch\n");
/* Let's go */
ret = hellcreek_enable_ip_core(hellcreek);
if (ret) {
dev_err(hellcreek->dev, "Failed to enable IP core!\n");
return ret;
}
/* Enable CPU/Tunnel ports */
hellcreek_setup_cpu_and_tunnel_port(hellcreek);
/* Switch config: Keep defaults, enable FDB aging and learning and tag
* each frame from/to cpu port for DSA tagging. Also enable the length
* aware shaping mode. This eliminates the need for Qbv guard bands.
*/
swcfg |= HR_SWCFG_FDBAGE_EN |
HR_SWCFG_FDBLRN_EN |
HR_SWCFG_ALWAYS_OBT |
(HR_SWCFG_LAS_ON << HR_SWCFG_LAS_MODE_SHIFT);
hellcreek->swcfg = swcfg;
hellcreek_write(hellcreek, swcfg, HR_SWCFG);
/* Initial vlan membership to reflect port separation */
for (i = 0; i < ds->num_ports; ++i) {
if (!dsa_is_user_port(ds, i))
continue;
hellcreek_setup_vlan_membership(ds, i, true);
}
/* Configure PCP <-> TC mapping */
hellcreek_setup_tc_identity_mapping(hellcreek);
/* The VLAN awareness is a global switch setting. Therefore, mixed vlan
* filtering setups are not supported.
*/
ds->vlan_filtering_is_global = true;
ds->needs_standalone_vlan_filtering = true;
/* Intercept _all_ PTP multicast traffic */
ret = hellcreek_setup_fdb(hellcreek);
if (ret) {
dev_err(hellcreek->dev,
"Failed to insert static PTP FDB entries\n");
return ret;
}
/* Register devlink resources with DSA */
ret = hellcreek_setup_devlink_resources(ds);
if (ret) {
dev_err(hellcreek->dev,
"Failed to setup devlink resources!\n");
return ret;
}
ret = hellcreek_setup_devlink_regions(ds);
if (ret) {
dev_err(hellcreek->dev,
"Failed to setup devlink regions!\n");
goto err_regions;
}
return 0;
err_regions:
dsa_devlink_resources_unregister(ds);
return ret;
}
static void hellcreek_teardown(struct dsa_switch *ds)
{
hellcreek_teardown_devlink_regions(ds);
dsa_devlink_resources_unregister(ds);
}
static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct hellcreek *hellcreek = ds->priv;
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces);
/* Include GMII - the hardware does not support this interface
* mode, but it's the default interface mode for phylib, so we
* need it for compatibility with existing DT.
*/
__set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
/* The MAC settings are a hardware configuration option and cannot be
* changed at run time or by strapping. Therefore the attached PHYs
* should be programmed to only advertise settings which are supported
* by the hardware.
*/
if (hellcreek->pdata->is_100_mbits)
config->mac_capabilities = MAC_100FD;
else
config->mac_capabilities = MAC_1000FD;
}
static int
hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,
struct netdev_notifier_changeupper_info *info)
{
struct hellcreek *hellcreek = ds->priv;
bool used = true;
int ret = -EBUSY;
u16 vid;
int i;
dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port);
/*
* Deny VLAN devices on top of lan ports with the same VLAN ids, because
* it breaks the port separation due to the private VLANs. Example:
*
* lan0.100 *and* lan1.100 cannot be used in parallel. However, lan0.99
* and lan1.100 works.
*/
if (!is_vlan_dev(info->upper_dev))
return 0;
vid = vlan_dev_vlan_id(info->upper_dev);
/* For all ports, check bitmaps */
mutex_lock(&hellcreek->vlan_lock);
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
if (!dsa_is_user_port(ds, i))
continue;
if (port == i)
continue;
used = used && test_bit(vid, hellcreek->ports[i].vlan_dev_bitmap);
}
if (used)
goto out;
/* Update bitmap */
set_bit(vid, hellcreek->ports[port].vlan_dev_bitmap);
ret = 0;
out:
mutex_unlock(&hellcreek->vlan_lock);
return ret;
}
static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
int tc;
for (tc = 0; tc < 8; ++tc) {
u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
u16 val;
if (!schedule->max_sdu[tc])
continue;
dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
max_sdu, tc, port);
hellcreek_select_port_prio(hellcreek, port, tc);
val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
}
}
static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
{
int tc;
for (tc = 0; tc < 8; ++tc) {
u16 val;
hellcreek_select_port_prio(hellcreek, port, tc);
val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
<< HR_PTPRTCCFG_MAXSDU_SHIFT;
hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
}
}
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
const struct tc_taprio_sched_entry *cur, *initial, *next;
size_t i;
cur = initial = &schedule->entries[0];
next = cur + 1;
for (i = 1; i <= schedule->num_entries; ++i) {
u16 data;
u8 gates;
if (i == schedule->num_entries)
gates = initial->gate_mask ^
cur->gate_mask;
else
gates = next->gate_mask ^
cur->gate_mask;
data = gates;
if (i == schedule->num_entries)
data |= TR_GCLDAT_GCLWRLAST;
/* Gates states */
hellcreek_write(hellcreek, data, TR_GCLDAT);
/* Time interval */
hellcreek_write(hellcreek,
cur->interval & 0x0000ffff,
TR_GCLTIL);
hellcreek_write(hellcreek,
(cur->interval & 0xffff0000) >> 16,
TR_GCLTIH);
/* Commit entry */
data = ((i - 1) << TR_GCLCMD_GCLWRADR_SHIFT) |
(initial->gate_mask <<
TR_GCLCMD_INIT_GATE_STATES_SHIFT);
hellcreek_write(hellcreek, data, TR_GCLCMD);
cur++;
next++;
}
}
static void hellcreek_set_cycle_time(struct hellcreek *hellcreek,
const struct tc_taprio_qopt_offload *schedule)
{
u32 cycle_time = schedule->cycle_time;
hellcreek_write(hellcreek, cycle_time & 0x0000ffff, TR_CTWRL);
hellcreek_write(hellcreek, (cycle_time & 0xffff0000) >> 16, TR_CTWRH);
}
static void hellcreek_switch_schedule(struct hellcreek *hellcreek,
ktime_t start_time)
{
struct timespec64 ts = ktime_to_timespec64(start_time);
/* Start schedule at this point of time */
hellcreek_write(hellcreek, ts.tv_nsec & 0x0000ffff, TR_ESTWRL);
hellcreek_write(hellcreek, (ts.tv_nsec & 0xffff0000) >> 16, TR_ESTWRH);
/* Arm timer, set seconds and switch schedule */
hellcreek_write(hellcreek, TR_ESTCMD_ESTARM | TR_ESTCMD_ESTSWCFG |
((ts.tv_sec & TR_ESTCMD_ESTSEC_MASK) <<
TR_ESTCMD_ESTSEC_SHIFT), TR_ESTCMD);
}
static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
{
struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
s64 base_time_ns, current_ns;
/* The switch allows a schedule to be started only eight seconds within
* the future. Therefore, check the current PTP time if the schedule is
* startable or not.
*/
/* Use the "cached" time. That should be alright, as it's updated quite
* frequently in the PTP code.
*/
mutex_lock(&hellcreek->ptp_lock);
current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
mutex_unlock(&hellcreek->ptp_lock);
/* Calculate difference to admin base time */
base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC;
}
static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
{
struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
ktime_t base_time, current_time;
s64 current_ns;
u32 cycle_time;
/* First select port */
hellcreek_select_tgd(hellcreek, port);
/* Forward base time into the future if needed */
mutex_lock(&hellcreek->ptp_lock);
current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
mutex_unlock(&hellcreek->ptp_lock);
current_time = ns_to_ktime(current_ns);
base_time = hellcreek_port->current_schedule->base_time;
cycle_time = hellcreek_port->current_schedule->cycle_time;
if (ktime_compare(current_time, base_time) > 0) {
s64 n;
n = div64_s64(ktime_sub_ns(current_time, base_time),
cycle_time);
base_time = ktime_add_ns(base_time, (n + 1) * cycle_time);
}
/* Set admin base time and switch schedule */
hellcreek_switch_schedule(hellcreek, base_time);
taprio_offload_free(hellcreek_port->current_schedule);
hellcreek_port->current_schedule = NULL;
dev_dbg(hellcreek->dev, "Armed EST timer for port %d\n",
hellcreek_port->port);
}
static void hellcreek_check_schedule(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct hellcreek_port *hellcreek_port;
struct hellcreek *hellcreek;
bool startable;
hellcreek_port = dw_to_hellcreek_port(dw);
hellcreek = hellcreek_port->hellcreek;
mutex_lock(&hellcreek->reg_lock);
/* Check starting time */
startable = hellcreek_schedule_startable(hellcreek,
hellcreek_port->port);
if (startable) {
hellcreek_start_schedule(hellcreek, hellcreek_port->port);
mutex_unlock(&hellcreek->reg_lock);
return;
}
mutex_unlock(&hellcreek->reg_lock);
/* Reschedule */
schedule_delayed_work(&hellcreek_port->schedule_work,
HELLCREEK_SCHEDULE_PERIOD);
}
static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
struct tc_taprio_qopt_offload *taprio)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port *hellcreek_port;
bool startable;
u16 ctrl;
hellcreek_port = &hellcreek->ports[port];
dev_dbg(hellcreek->dev, "Configure traffic schedule on port %d\n",
port);
/* First cancel delayed work */
cancel_delayed_work_sync(&hellcreek_port->schedule_work);
mutex_lock(&hellcreek->reg_lock);
if (hellcreek_port->current_schedule) {
taprio_offload_free(hellcreek_port->current_schedule);
hellcreek_port->current_schedule = NULL;
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
/* Configure max sdu */
hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
/* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
ctrl = (0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT) | TR_TGDCTRL_GATE_EN;
hellcreek_write(hellcreek, ctrl, TR_TGDCTRL);
/* Cancel pending schedule */
hellcreek_write(hellcreek, 0x00, TR_ESTCMD);
/* Setup a new schedule */
hellcreek_setup_gcl(hellcreek, port, hellcreek_port->current_schedule);
/* Configure cycle time */
hellcreek_set_cycle_time(hellcreek, hellcreek_port->current_schedule);
/* Check starting time */
startable = hellcreek_schedule_startable(hellcreek, port);
if (startable) {
hellcreek_start_schedule(hellcreek, port);
mutex_unlock(&hellcreek->reg_lock);
return 0;
}
mutex_unlock(&hellcreek->reg_lock);
/* Schedule periodic schedule check */
schedule_delayed_work(&hellcreek_port->schedule_work,
HELLCREEK_SCHEDULE_PERIOD);
return 0;
}
static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port *hellcreek_port;
hellcreek_port = &hellcreek->ports[port];
dev_dbg(hellcreek->dev, "Remove traffic schedule on port %d\n", port);
/* First cancel delayed work */
cancel_delayed_work_sync(&hellcreek_port->schedule_work);
mutex_lock(&hellcreek->reg_lock);
if (hellcreek_port->current_schedule) {
taprio_offload_free(hellcreek_port->current_schedule);
hellcreek_port->current_schedule = NULL;
}
/* Reset max sdu */
hellcreek_reset_maxsdu(hellcreek, port);
/* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
hellcreek_write(hellcreek, 0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT,
TR_TGDCTRL);
mutex_unlock(&hellcreek->reg_lock);
return 0;
}
static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
struct tc_taprio_qopt_offload *schedule)
{
size_t i;
/* Does this hellcreek version support Qbv in hardware? */
if (!hellcreek->pdata->qbv_support)
return false;
/* cycle time can only be 32bit */
if (schedule->cycle_time > (u32)-1)
return false;
/* cycle time extension is not supported */
if (schedule->cycle_time_extension)
return false;
/* Only set command is supported */
for (i = 0; i < schedule->num_entries; ++i)
if (schedule->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
return false;
return true;
}
static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
{
switch (base->type) {
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
caps->supports_queue_max_sdu = true;
return 0;
}
default:
return -EOPNOTSUPP;
}
}
static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
struct hellcreek *hellcreek = ds->priv;
switch (type) {
case TC_QUERY_CAPS:
return hellcreek_tc_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_qopt_offload *taprio = type_data;
switch (taprio->cmd) {
case TAPRIO_CMD_REPLACE:
if (!hellcreek_validate_schedule(hellcreek, taprio))
return -EOPNOTSUPP;
return hellcreek_port_set_schedule(ds, port, taprio);
case TAPRIO_CMD_DESTROY:
return hellcreek_port_del_schedule(ds, port);
default:
return -EOPNOTSUPP;
}
}
default:
return -EOPNOTSUPP;
}
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
.devlink_info_get = hellcreek_devlink_info_get,
.get_ethtool_stats = hellcreek_get_ethtool_stats,
.get_sset_count = hellcreek_get_sset_count,
.get_strings = hellcreek_get_strings,
.get_tag_protocol = hellcreek_get_tag_protocol,
.get_ts_info = hellcreek_get_ts_info,
.phylink_get_caps = hellcreek_phylink_get_caps,
.port_bridge_flags = hellcreek_bridge_flags,
.port_bridge_join = hellcreek_port_bridge_join,
.port_bridge_leave = hellcreek_port_bridge_leave,
.port_disable = hellcreek_port_disable,
.port_enable = hellcreek_port_enable,
.port_fdb_add = hellcreek_fdb_add,
.port_fdb_del = hellcreek_fdb_del,
.port_fdb_dump = hellcreek_fdb_dump,
.port_hwtstamp_set = hellcreek_port_hwtstamp_set,
.port_hwtstamp_get = hellcreek_port_hwtstamp_get,
.port_pre_bridge_flags = hellcreek_pre_bridge_flags,
.port_prechangeupper = hellcreek_port_prechangeupper,
.port_rxtstamp = hellcreek_port_rxtstamp,
.port_setup_tc = hellcreek_port_setup_tc,
.port_stp_state_set = hellcreek_port_stp_state_set,
.port_txtstamp = hellcreek_port_txtstamp,
.port_vlan_add = hellcreek_vlan_add,
.port_vlan_del = hellcreek_vlan_del,
.port_vlan_filtering = hellcreek_vlan_filtering,
.setup = hellcreek_setup,
.teardown = hellcreek_teardown,
};
static int hellcreek_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hellcreek *hellcreek;
struct resource *res;
int ret, i;
hellcreek = devm_kzalloc(dev, sizeof(*hellcreek), GFP_KERNEL);
if (!hellcreek)
return -ENOMEM;
hellcreek->vidmbrcfg = devm_kcalloc(dev, VLAN_N_VID,
sizeof(*hellcreek->vidmbrcfg),
GFP_KERNEL);
if (!hellcreek->vidmbrcfg)
return -ENOMEM;
hellcreek->pdata = of_device_get_match_data(dev);
hellcreek->ports = devm_kcalloc(dev, hellcreek->pdata->num_ports,
sizeof(*hellcreek->ports),
GFP_KERNEL);
if (!hellcreek->ports)
return -ENOMEM;
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
struct hellcreek_port *port = &hellcreek->ports[i];
port->counter_values =
devm_kcalloc(dev,
ARRAY_SIZE(hellcreek_counter),
sizeof(*port->counter_values),
GFP_KERNEL);
if (!port->counter_values)
return -ENOMEM;
port->vlan_dev_bitmap = devm_bitmap_zalloc(dev, VLAN_N_VID,
GFP_KERNEL);
if (!port->vlan_dev_bitmap)
return -ENOMEM;
port->hellcreek = hellcreek;
port->port = i;
INIT_DELAYED_WORK(&port->schedule_work,
hellcreek_check_schedule);
}
mutex_init(&hellcreek->reg_lock);
mutex_init(&hellcreek->vlan_lock);
mutex_init(&hellcreek->ptp_lock);
hellcreek->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsn");
if (!res) {
dev_err(dev, "No memory region provided!\n");
return -ENODEV;
}
hellcreek->base = devm_ioremap_resource(dev, res);
if (IS_ERR(hellcreek->base))
return PTR_ERR(hellcreek->base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp");
if (!res) {
dev_err(dev, "No PTP memory region provided!\n");
return -ENODEV;
}
hellcreek->ptp_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hellcreek->ptp_base))
return PTR_ERR(hellcreek->ptp_base);
ret = hellcreek_detect(hellcreek);
if (ret) {
dev_err(dev, "No (known) chip found!\n");
return ret;
}
ret = hellcreek_wait_until_ready(hellcreek);
if (ret) {
dev_err(dev, "Switch didn't become ready!\n");
return ret;
}
hellcreek_feature_detect(hellcreek);
hellcreek->ds = devm_kzalloc(dev, sizeof(*hellcreek->ds), GFP_KERNEL);
if (!hellcreek->ds)
return -ENOMEM;
hellcreek->ds->dev = dev;
hellcreek->ds->priv = hellcreek;
hellcreek->ds->ops = &hellcreek_ds_ops;
hellcreek->ds->num_ports = hellcreek->pdata->num_ports;
hellcreek->ds->num_tx_queues = HELLCREEK_NUM_EGRESS_QUEUES;
ret = dsa_register_switch(hellcreek->ds);
if (ret) {
dev_err_probe(dev, ret, "Unable to register switch\n");
return ret;
}
ret = hellcreek_ptp_setup(hellcreek);
if (ret) {
dev_err(dev, "Failed to setup PTP!\n");
goto err_ptp_setup;
}
ret = hellcreek_hwtstamp_setup(hellcreek);
if (ret) {
dev_err(dev, "Failed to setup hardware timestamping!\n");
goto err_tstamp_setup;
}
platform_set_drvdata(pdev, hellcreek);
return 0;
err_tstamp_setup:
hellcreek_ptp_free(hellcreek);
err_ptp_setup:
dsa_unregister_switch(hellcreek->ds);
return ret;
}
static int hellcreek_remove(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
if (!hellcreek)
return 0;
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
return 0;
}
static void hellcreek_shutdown(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
if (!hellcreek)
return;
dsa_switch_shutdown(hellcreek->ds);
platform_set_drvdata(pdev, NULL);
}
static const struct hellcreek_platform_data de1soc_r1_pdata = {
.name = "r4c30",
.num_ports = 4,
.is_100_mbits = 1,
.qbv_support = 1,
.qbv_on_cpu_port = 1,
.qbu_support = 0,
.module_id = 0x4c30,
};
static const struct of_device_id hellcreek_of_match[] = {
{
.compatible = "hirschmann,hellcreek-de1soc-r1",
.data = &de1soc_r1_pdata,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, hellcreek_of_match);
static struct platform_driver hellcreek_driver = {
.probe = hellcreek_probe,
.remove = hellcreek_remove,
.shutdown = hellcreek_shutdown,
.driver = {
.name = "hellcreek",
.of_match_table = hellcreek_of_match,
},
};
module_platform_driver(hellcreek_driver);
MODULE_AUTHOR("Kurt Kanzenbach <[email protected]>");
MODULE_DESCRIPTION("Hirschmann Hellcreek driver");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/net/dsa/hirschmann/hellcreek.c |
// SPDX-License-Identifier: GPL-2.0
/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
*
* Copyright (C) 2021 Alvin Šipraga <[email protected]>
* Copyright (C) 2021 Michael Rasmussen <[email protected]>
*
* The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
* integrated PHYs for the user facing ports, and an extension interface which
* can be connected to the CPU - or another PHY - via either MII, RMII, or
* RGMII. The switch is configured via the Realtek Simple Management Interface
* (SMI), which uses the MDIO/MDC lines.
*
* Below is a simplified block diagram of the chip and its relevant interfaces.
*
* .-----------------------------------.
* | |
* UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
* UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
* UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
* UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
* | |
* CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
* | interface 1 GMAC 1 |
* | |
* SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
* EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
* | ~RTL8365MB ~~~ |
* | ~GXXXC TAIWAN~ |
* GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
* | |
* Interrupt <----------> Link UP/DOWN events |
* controller | |
* '-----------------------------------'
*
* The driver uses DSA to integrate the 4 user and 1 extension ports into the
* kernel. Netdevices are created for the user ports, as are PHY devices for
* their integrated PHYs. The device tree firmware should also specify the link
* partner of the extension port - either via a fixed-link or other phy-handle.
* See the device tree bindings for more detailed information. Note that the
* driver has only been tested with a fixed-link, but in principle it should not
* matter.
*
* NOTE: Currently, only the RGMII interface is implemented in this driver.
*
* The interrupt line is asserted on link UP/DOWN events. The driver creates a
* custom irqchip to handle this interrupt and demultiplex the events by reading
* the status registers via SMI. Interrupts are then propagated to the relevant
* PHY device.
*
* The EEPROM contains initial register values which the chip will read over I2C
* upon hardware reset. It is also possible to omit the EEPROM. In both cases,
* the driver will manually reprogram some registers using jam tables to reach
* an initial state defined by the vendor driver.
*
* This Linux driver is written based on an OS-agnostic vendor driver from
* Realtek. The reference GPL-licensed sources can be found in the OpenWrt
* source tree under the name rtl8367c. The vendor driver claims to support a
* number of similar switch controllers from Realtek, but the only hardware we
* have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
* the name RTL8367C. Although one wishes that the 'C' stood for some kind of
* common hardware revision, there exist examples of chips with the suffix -VC
* which are explicitly not supported by the rtl8367c driver and which instead
* require the rtl8367d vendor driver. With all this uncertainty, the driver has
* been modestly named rtl8365mb. Future implementors may wish to rename things
* accordingly.
*
* In the same family of chips, some carry up to 8 user ports and up to 2
* extension ports. Where possible this driver tries to make things generic, but
* more work must be done to support these configurations. According to
* documentation from Realtek, the family should include the following chips:
*
* - RTL8363NB
* - RTL8363NB-VB
* - RTL8363SC
* - RTL8363SC-VB
* - RTL8364NB
* - RTL8364NB-VB
* - RTL8365MB-VC
* - RTL8366SC
* - RTL8367RB-VB
* - RTL8367SB
* - RTL8367S
* - RTL8370MB
* - RTL8310SR
*
* Some of the register logic for these additional chips has been skipped over
* while implementing this driver. It is therefore not possible to assume that
* things will work out-of-the-box for other chips, and a careful review of the
* vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
* one of the simpler chips.
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mutex.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include "realtek.h"
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
#define RTL8365MB_NUM_PHYREGS 32
#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
#define RTL8365MB_MAX_NUM_PORTS 11
#define RTL8365MB_MAX_NUM_EXTINTS 3
#define RTL8365MB_LEARN_LIMIT_MAX 2112
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
#define RTL8365MB_CHIP_VER_REG 0x1301
#define RTL8365MB_MAGIC_REG 0x13C2
#define RTL8365MB_MAGIC_VALUE 0x0249
/* Chip reset register */
#define RTL8365MB_CHIP_RESET_REG 0x1322
#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
/* Interrupt polarity register */
#define RTL8365MB_INTR_POLARITY_REG 0x1100
#define RTL8365MB_INTR_POLARITY_MASK 0x0001
#define RTL8365MB_INTR_POLARITY_HIGH 0
#define RTL8365MB_INTR_POLARITY_LOW 1
/* Interrupt control/status register - enable/check specific interrupt types */
#define RTL8365MB_INTR_CTRL_REG 0x1101
#define RTL8365MB_INTR_STATUS_REG 0x1102
#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
#define RTL8365MB_INTR_ALL_MASK \
(RTL8365MB_INTR_SLIENT_START_2_MASK | \
RTL8365MB_INTR_SLIENT_START_MASK | \
RTL8365MB_INTR_ACL_ACTION_MASK | \
RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
RTL8365MB_INTR_INTERRUPT_8051_MASK | \
RTL8365MB_INTR_LOOP_DETECTION_MASK | \
RTL8365MB_INTR_GREEN_TIMER_MASK | \
RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
RTL8365MB_INTR_SPEED_CHANGE_MASK | \
RTL8365MB_INTR_LEARN_OVER_MASK | \
RTL8365MB_INTR_METER_EXCEEDED_MASK | \
RTL8365MB_INTR_LINK_CHANGE_MASK)
/* Per-port interrupt type status registers */
#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
/* PHY indirect access registers */
#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
#define RTL8365MB_PHY_BASE 0x2000
#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
/* PHY OCP address prefix register */
#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
/* External interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
#define RTL8365MB_EXT_PORT_MODE_GMII 6
#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
#define RTL8365MB_EXT_PORT_MODE_SGMII 9
#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
/* External interface mode configuration registers 0~1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
(0xF << (((_extint) % 2)))
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
(((_extint) % 2) * 4)
/* External interface RGMII TX/RX delay configuration registers 0~2 */
#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
#define RTL8365MB_EXT_RGMXF_REG(_extint) \
((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
(_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
(_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
0x0)
#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
#define RTL8365MB_PORT_SPEED_10M 0
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
/* External interface force configuration registers 0~2 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
(_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
/* CPU port mask register - controls which ports are treated as CPU ports */
#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
/* CPU control register */
#define RTL8365MB_CPU_CTRL_REG 0x121A
#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
/* Maximum packet length register */
#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
#define RTL8365MB_CFG0_MAX_LEN_MAX 0x3FFF
/* Port learning limit registers */
#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
(RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
/* Port isolation (forwarding mask) registers */
#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
(RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
/* MSTP port state registers - indexed by tree instance */
#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
(RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
(0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
/* MIB counter value registers */
#define RTL8365MB_MIB_COUNTER_BASE 0x1000
#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
/* MIB counter address register */
#define RTL8365MB_MIB_ADDRESS_REG 0x1004
#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
#define RTL8365MB_MIB_ADDRESS(_p, _x) \
(((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
#define RTL8365MB_MIB_CTRL0_REG 0x1005
#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
* to block. On the other hand, accessing MIB counters absolutely requires us to
* block. The solution is thus to schedule work which polls the MIB counters
* asynchronously and updates some private data, which the callback can then
* fetch atomically. Three seconds should be a good enough polling interval.
*/
#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
enum rtl8365mb_mib_counter_index {
RTL8365MB_MIB_ifInOctets,
RTL8365MB_MIB_dot3StatsFCSErrors,
RTL8365MB_MIB_dot3StatsSymbolErrors,
RTL8365MB_MIB_dot3InPauseFrames,
RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
RTL8365MB_MIB_etherStatsFragments,
RTL8365MB_MIB_etherStatsJabbers,
RTL8365MB_MIB_ifInUcastPkts,
RTL8365MB_MIB_etherStatsDropEvents,
RTL8365MB_MIB_ifInMulticastPkts,
RTL8365MB_MIB_ifInBroadcastPkts,
RTL8365MB_MIB_inMldChecksumError,
RTL8365MB_MIB_inIgmpChecksumError,
RTL8365MB_MIB_inMldSpecificQuery,
RTL8365MB_MIB_inMldGeneralQuery,
RTL8365MB_MIB_inIgmpSpecificQuery,
RTL8365MB_MIB_inIgmpGeneralQuery,
RTL8365MB_MIB_inMldLeaves,
RTL8365MB_MIB_inIgmpLeaves,
RTL8365MB_MIB_etherStatsOctets,
RTL8365MB_MIB_etherStatsUnderSizePkts,
RTL8365MB_MIB_etherOversizeStats,
RTL8365MB_MIB_etherStatsPkts64Octets,
RTL8365MB_MIB_etherStatsPkts65to127Octets,
RTL8365MB_MIB_etherStatsPkts128to255Octets,
RTL8365MB_MIB_etherStatsPkts256to511Octets,
RTL8365MB_MIB_etherStatsPkts512to1023Octets,
RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
RTL8365MB_MIB_ifOutOctets,
RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
RTL8365MB_MIB_dot3StatsDeferredTransmissions,
RTL8365MB_MIB_dot3StatsLateCollisions,
RTL8365MB_MIB_etherStatsCollisions,
RTL8365MB_MIB_dot3StatsExcessiveCollisions,
RTL8365MB_MIB_dot3OutPauseFrames,
RTL8365MB_MIB_ifOutDiscards,
RTL8365MB_MIB_dot1dTpPortInDiscards,
RTL8365MB_MIB_ifOutUcastPkts,
RTL8365MB_MIB_ifOutMulticastPkts,
RTL8365MB_MIB_ifOutBroadcastPkts,
RTL8365MB_MIB_outOampduPkts,
RTL8365MB_MIB_inOampduPkts,
RTL8365MB_MIB_inIgmpJoinsSuccess,
RTL8365MB_MIB_inIgmpJoinsFail,
RTL8365MB_MIB_inMldJoinsSuccess,
RTL8365MB_MIB_inMldJoinsFail,
RTL8365MB_MIB_inReportSuppressionDrop,
RTL8365MB_MIB_inLeaveSuppressionDrop,
RTL8365MB_MIB_outIgmpReports,
RTL8365MB_MIB_outIgmpLeaves,
RTL8365MB_MIB_outIgmpGeneralQuery,
RTL8365MB_MIB_outIgmpSpecificQuery,
RTL8365MB_MIB_outMldReports,
RTL8365MB_MIB_outMldLeaves,
RTL8365MB_MIB_outMldGeneralQuery,
RTL8365MB_MIB_outMldSpecificQuery,
RTL8365MB_MIB_inKnownMulticastPkts,
RTL8365MB_MIB_END,
};
struct rtl8365mb_mib_counter {
u32 offset;
u32 length;
const char *name;
};
#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
[RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
};
static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
struct rtl8365mb_jam_tbl_entry {
u16 reg;
u16 val;
};
/* Lifted from the vendor driver sources */
static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
{ 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
{ 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
{ 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
{ 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
{ 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
{ 0x13F0, 0x0000 },
};
static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
{ 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
{ 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
{ 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
{ 0x1D32, 0x0002 },
};
enum rtl8365mb_phy_interface_mode {
RTL8365MB_PHY_INTERFACE_MODE_INVAL = 0,
RTL8365MB_PHY_INTERFACE_MODE_INTERNAL = BIT(0),
RTL8365MB_PHY_INTERFACE_MODE_MII = BIT(1),
RTL8365MB_PHY_INTERFACE_MODE_TMII = BIT(2),
RTL8365MB_PHY_INTERFACE_MODE_RMII = BIT(3),
RTL8365MB_PHY_INTERFACE_MODE_RGMII = BIT(4),
RTL8365MB_PHY_INTERFACE_MODE_SGMII = BIT(5),
RTL8365MB_PHY_INTERFACE_MODE_HSGMII = BIT(6),
};
/**
* struct rtl8365mb_extint - external interface info
* @port: the port with an external interface
* @id: the external interface ID, which is either 0, 1, or 2
* @supported_interfaces: a bitmask of supported PHY interface modes
*
* Represents a mapping: port -> { id, supported_interfaces }. To be embedded
* in &struct rtl8365mb_chip_info for every port with an external interface.
*/
struct rtl8365mb_extint {
int port;
int id;
unsigned int supported_interfaces;
};
/**
* struct rtl8365mb_chip_info - static chip-specific info
* @name: human-readable chip name
* @chip_id: chip identifier
* @chip_ver: chip silicon revision
* @extints: available external interfaces
* @jam_table: chip-specific initialization jam table
* @jam_size: size of the chip's jam table
*
* These data are specific to a given chip in the family of switches supported
* by this driver. When adding support for another chip in the family, a new
* chip info should be added to the rtl8365mb_chip_infos array.
*/
struct rtl8365mb_chip_info {
const char *name;
u32 chip_id;
u32 chip_ver;
const struct rtl8365mb_extint extints[RTL8365MB_MAX_NUM_EXTINTS];
const struct rtl8365mb_jam_tbl_entry *jam_table;
size_t jam_size;
};
/* Chip info for each supported switch in the family */
#define PHY_INTF(_mode) (RTL8365MB_PHY_INTERFACE_MODE_ ## _mode)
static const struct rtl8365mb_chip_info rtl8365mb_chip_infos[] = {
{
.name = "RTL8365MB-VC",
.chip_id = 0x6367,
.chip_ver = 0x0040,
.extints = {
{ 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
PHY_INTF(RMII) | PHY_INTF(RGMII) },
},
.jam_table = rtl8365mb_init_jam_8365mb_vc,
.jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
},
{
.name = "RTL8367S",
.chip_id = 0x6367,
.chip_ver = 0x00A0,
.extints = {
{ 6, 1, PHY_INTF(SGMII) | PHY_INTF(HSGMII) },
{ 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
PHY_INTF(RMII) | PHY_INTF(RGMII) },
},
.jam_table = rtl8365mb_init_jam_8365mb_vc,
.jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
},
{
.name = "RTL8367RB-VB",
.chip_id = 0x6367,
.chip_ver = 0x0020,
.extints = {
{ 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
PHY_INTF(RMII) | PHY_INTF(RGMII) },
{ 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
PHY_INTF(RMII) | PHY_INTF(RGMII) },
},
.jam_table = rtl8365mb_init_jam_8365mb_vc,
.jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
},
};
enum rtl8365mb_stp_state {
RTL8365MB_STP_STATE_DISABLED = 0,
RTL8365MB_STP_STATE_BLOCKING = 1,
RTL8365MB_STP_STATE_LEARNING = 2,
RTL8365MB_STP_STATE_FORWARDING = 3,
};
enum rtl8365mb_cpu_insert {
RTL8365MB_CPU_INSERT_TO_ALL = 0,
RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
RTL8365MB_CPU_INSERT_TO_NONE = 2,
};
enum rtl8365mb_cpu_position {
RTL8365MB_CPU_POS_AFTER_SA = 0,
RTL8365MB_CPU_POS_BEFORE_CRC = 1,
};
enum rtl8365mb_cpu_format {
RTL8365MB_CPU_FORMAT_8BYTES = 0,
RTL8365MB_CPU_FORMAT_4BYTES = 1,
};
enum rtl8365mb_cpu_rxlen {
RTL8365MB_CPU_RXLEN_72BYTES = 0,
RTL8365MB_CPU_RXLEN_64BYTES = 1,
};
/**
* struct rtl8365mb_cpu - CPU port configuration
* @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
* @mask: port mask of ports that parse should parse CPU tags
* @trap_port: forward trapped frames to this port
* @insert: CPU tag insertion mode in switch->CPU frames
* @position: position of CPU tag in frame
* @rx_length: minimum CPU RX length
* @format: CPU tag format
*
* Represents the CPU tagging and CPU port configuration of the switch. These
* settings are configurable at runtime.
*/
struct rtl8365mb_cpu {
bool enable;
u32 mask;
u32 trap_port;
enum rtl8365mb_cpu_insert insert;
enum rtl8365mb_cpu_position position;
enum rtl8365mb_cpu_rxlen rx_length;
enum rtl8365mb_cpu_format format;
};
/**
* struct rtl8365mb_port - private per-port data
* @priv: pointer to parent realtek_priv data
* @index: DSA port index, same as dsa_port::index
* @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
* access via rtl8365mb_get_stats64
* @stats_lock: protect the stats structure during read/update
* @mib_work: delayed work for polling MIB counters
*/
struct rtl8365mb_port {
struct realtek_priv *priv;
unsigned int index;
struct rtnl_link_stats64 stats;
spinlock_t stats_lock;
struct delayed_work mib_work;
};
/**
* struct rtl8365mb - driver private data
* @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
* @chip_info: chip-specific info about the attached switch
* @cpu: CPU tagging and CPU port configuration for this chip
* @mib_lock: prevent concurrent reads of MIB counters
* @ports: per-port data
*
* Private data for this driver.
*/
struct rtl8365mb {
struct realtek_priv *priv;
int irq;
const struct rtl8365mb_chip_info *chip_info;
struct rtl8365mb_cpu cpu;
struct mutex mib_lock;
struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
};
static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
{
u32 val;
return regmap_read_poll_timeout(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
val, !val, 10, 100);
}
static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
u32 ocp_addr)
{
u32 val;
int ret;
/* Set OCP prefix */
val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
ret = regmap_update_bits(
priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
if (ret)
return ret;
/* Set PHY register address */
val = RTL8365MB_PHY_BASE;
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
ocp_addr >> 1);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
ocp_addr >> 6);
ret = regmap_write(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
if (ret)
return ret;
return 0;
}
static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 *data)
{
u32 val;
int ret;
mutex_lock(&priv->map_lock);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
goto out;
ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
goto out;
/* Execute read operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
val);
if (ret)
goto out;
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
goto out;
/* Get PHY register data */
ret = regmap_read(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
if (ret)
goto out;
*data = val & 0xFFFF;
out:
mutex_unlock(&priv->map_lock);
return ret;
}
static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 data)
{
u32 val;
int ret;
mutex_lock(&priv->map_lock);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
goto out;
ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
goto out;
/* Set PHY register data */
ret = regmap_write(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
if (ret)
goto out;
/* Execute write operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
val);
if (ret)
goto out;
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
goto out;
out:
mutex_unlock(&priv->map_lock);
return 0;
}
static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 ocp_addr;
u16 val;
int ret;
if (phy > RTL8365MB_PHYADDRMAX)
return -EINVAL;
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
if (ret) {
dev_err(priv->dev,
"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
phy, regnum, ocp_addr, val);
return val;
}
static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 ocp_addr;
int ret;
if (phy > RTL8365MB_PHYADDRMAX)
return -EINVAL;
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
if (ret) {
dev_err(priv->dev,
"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
phy, regnum, ocp_addr, val);
return 0;
}
static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
return rtl8365mb_phy_read(ds->priv, phy, regnum);
}
static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
u16 val)
{
return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
}
static const struct rtl8365mb_extint *
rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
{
struct rtl8365mb *mb = priv->chip_data;
int i;
for (i = 0; i < RTL8365MB_MAX_NUM_EXTINTS; i++) {
const struct rtl8365mb_extint *extint =
&mb->chip_info->extints[i];
if (!extint->supported_interfaces)
continue;
if (extint->port == port)
return extint;
}
return NULL;
}
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_cpu *cpu;
struct rtl8365mb *mb;
mb = priv->chip_data;
cpu = &mb->cpu;
if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
return DSA_TAG_PROTO_RTL8_4T;
return DSA_TAG_PROTO_RTL8_4;
}
static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(priv, port);
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
u32 val;
int ret;
if (!extint)
return -ENODEV;
dp = dsa_to_port(priv->ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
*
* The Realtek vendor driver indicates the following possible
* configuration settings:
*
* TX delay:
* 0 = no delay, 1 = 2 ns delay
* RX delay:
* 0 = no delay, 7 = maximum delay
* Each step is approximately 0.3 ns, so the maximum delay is about
* 2.1 ns.
*
* The vendor driver also states that this must be configured *before*
* forcing the external interface into a particular mode, which is done
* in the rtl8365mb_phylink_mac_link_{up,down} functions.
*
* Only configure an RGMII TX (resp. RX) delay if the
* tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
* specified. We ignore the detail of the RGMII interface mode
* (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
* property.
*/
if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
val = val / 1000; /* convert to ns */
if (val == 0 || val == 2)
tx_delay = val / 2;
else
dev_warn(priv->dev,
"RGMII TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
if (val <= 7)
rx_delay = val;
else
dev_warn(priv->dev,
"RGMII RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
if (ret)
return ret;
ret = regmap_update_bits(
priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(extint->id),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
extint->id));
if (ret)
return ret;
return 0;
}
static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(priv, port);
u32 r_tx_pause;
u32 r_rx_pause;
u32 r_duplex;
u32 r_speed;
u32 r_link;
int val;
int ret;
if (!extint)
return -ENODEV;
if (link) {
/* Force the link up with the desired configuration */
r_link = 1;
r_rx_pause = rx_pause ? 1 : 0;
r_tx_pause = tx_pause ? 1 : 0;
if (speed == SPEED_1000) {
r_speed = RTL8365MB_PORT_SPEED_1000M;
} else if (speed == SPEED_100) {
r_speed = RTL8365MB_PORT_SPEED_100M;
} else if (speed == SPEED_10) {
r_speed = RTL8365MB_PORT_SPEED_10M;
} else {
dev_err(priv->dev, "unsupported port speed %s\n",
phy_speed_to_str(speed));
return -EINVAL;
}
if (duplex == DUPLEX_FULL) {
r_duplex = 1;
} else if (duplex == DUPLEX_HALF) {
r_duplex = 0;
} else {
dev_err(priv->dev, "unsupported duplex %s\n",
phy_duplex_to_str(duplex));
return -EINVAL;
}
} else {
/* Force the link down and reset any programmed configuration */
r_link = 0;
r_tx_pause = 0;
r_rx_pause = 0;
r_speed = 0;
r_duplex = 0;
}
val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
r_tx_pause) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
r_rx_pause) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
ret = regmap_write(priv->map,
RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(extint->id),
val);
if (ret)
return ret;
return 0;
}
static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(ds->priv, port);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
if (!extint) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
/* GMII is the default interface mode for phylib, so
* we have to support it for ports with integrated PHY.
*/
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
return;
}
/* Populate according to the modes supported by _this driver_,
* not necessarily the modes supported by the hardware, some of
* which remain unimplemented.
*/
if (extint->supported_interfaces & RTL8365MB_PHY_INTERFACE_MODE_RGMII)
phy_interface_set_rgmii(config->supported_interfaces);
}
static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct realtek_priv *priv = ds->priv;
int ret;
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
port);
return;
}
if (phy_interface_mode_is_rgmii(state->interface)) {
ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
if (ret)
dev_err(priv->dev,
"failed to configure RGMII mode on port %d: %d\n",
port, ret);
return;
}
/* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
* supports
*/
}
static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
mb = priv->chip_data;
p = &mb->ports[port];
cancel_delayed_work_sync(&p->mib_work);
if (phy_interface_mode_is_rgmii(interface)) {
ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
false, false);
if (ret)
dev_err(priv->dev,
"failed to reset forced mode on port %d: %d\n",
port, ret);
return;
}
}
static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev, int speed,
int duplex, bool tx_pause,
bool rx_pause)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
mb = priv->chip_data;
p = &mb->ports[port];
schedule_delayed_work(&p->mib_work, 0);
if (phy_interface_mode_is_rgmii(interface)) {
ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
duplex, tx_pause,
rx_pause);
if (ret)
dev_err(priv->dev,
"failed to force mode on port %d: %d\n", port,
ret);
return;
}
}
static int rtl8365mb_port_change_mtu(struct dsa_switch *ds, int port,
int new_mtu)
{
struct realtek_priv *priv = ds->priv;
int frame_size;
/* When a new MTU is set, DSA always sets the CPU port's MTU to the
* largest MTU of the slave ports. Because the switch only has a global
* RX length register, only allowing CPU port here is enough.
*/
if (!dsa_is_cpu_port(ds, port))
return 0;
frame_size = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
dev_dbg(priv->dev, "changing mtu to %d (frame size: %d)\n",
new_mtu, frame_size);
return regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
RTL8365MB_CFG0_MAX_LEN_MASK,
FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK,
frame_size));
}
static int rtl8365mb_port_max_mtu(struct dsa_switch *ds, int port)
{
return RTL8365MB_CFG0_MAX_LEN_MAX - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
struct realtek_priv *priv = ds->priv;
enum rtl8365mb_stp_state val;
int msti = 0;
switch (state) {
case BR_STATE_DISABLED:
val = RTL8365MB_STP_STATE_DISABLED;
break;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
val = RTL8365MB_STP_STATE_BLOCKING;
break;
case BR_STATE_LEARNING:
val = RTL8365MB_STP_STATE_LEARNING;
break;
case BR_STATE_FORWARDING:
val = RTL8365MB_STP_STATE_FORWARDING;
break;
default:
dev_err(priv->dev, "invalid STP state: %u\n", state);
return;
}
regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
}
static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
enable ? RTL8365MB_LEARN_LIMIT_MAX : 0);
}
static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
u32 mask)
{
return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
}
static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
u32 offset, u32 length, u64 *mibvalue)
{
u64 tmpvalue = 0;
u32 val;
int ret;
int i;
/* The MIB address is an SRAM address. We request a particular address
* and then poll the control register before reading the value from some
* counter registers.
*/
ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
RTL8365MB_MIB_ADDRESS(port, offset));
if (ret)
return ret;
/* Poll for completion */
ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
!(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
10, 100);
if (ret)
return ret;
/* Presumably this indicates a MIB counter read failure */
if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
return -EIO;
/* There are four MIB counter registers each holding a 16 bit word of a
* MIB counter. Depending on the offset, we should read from the upper
* two or lower two registers. In case the MIB counter is 4 words, we
* read from all four registers.
*/
if (length == 4)
offset = 3;
else
offset = (offset + 1) % 4;
/* Read the MIB counter 16 bits at a time */
for (i = 0; i < length; i++) {
ret = regmap_read(priv->map,
RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
if (ret)
return ret;
tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
}
/* Only commit the result if no error occurred */
*mibvalue = tmpvalue;
return 0;
}
static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &data[i]);
if (ret) {
dev_err(priv->dev,
"failed to read port %d counters: %d\n", port,
ret);
break;
}
}
mutex_unlock(&mb->mib_lock);
}
static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
}
}
static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
return RTL8365MB_MIB_END;
}
static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
mutex_lock(&mb->mib_lock);
rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&phy_stats->SymbolErrorDuringCarrier);
mutex_unlock(&mb->mib_lock);
}
static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_mac_stats *mac_stats)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
[RTL8365MB_MIB_ifOutUcastPkts] = 1,
[RTL8365MB_MIB_ifOutMulticastPkts] = 1,
[RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
[RTL8365MB_MIB_dot3OutPauseFrames] = 1,
[RTL8365MB_MIB_ifOutDiscards] = 1,
[RTL8365MB_MIB_ifInOctets] = 1,
[RTL8365MB_MIB_ifInUcastPkts] = 1,
[RTL8365MB_MIB_ifInMulticastPkts] = 1,
[RTL8365MB_MIB_ifInBroadcastPkts] = 1,
[RTL8365MB_MIB_dot3InPauseFrames] = 1,
[RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
};
struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
/* Only fetch required MIB counters (marked = 1 above) */
if (!cnt[i])
continue;
ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &cnt[i]);
if (ret)
break;
}
mutex_unlock(&mb->mib_lock);
/* The RTL8365MB-VC exposes MIB objects, which we have to translate into
* IEEE 802.3 Managed Objects. This is not always completely faithful,
* but we try out best. See RFC 3635 for a detailed treatment of the
* subject.
*/
mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
cnt[RTL8365MB_MIB_ifOutDiscards];
mac_stats->SingleCollisionFrames =
cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
mac_stats->MultipleCollisionFrames =
cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
cnt[RTL8365MB_MIB_ifInMulticastPkts] +
cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
cnt[RTL8365MB_MIB_dot3InPauseFrames];
mac_stats->FrameCheckSequenceErrors =
cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
18 * mac_stats->FramesTransmittedOK;
mac_stats->FramesWithDeferredXmissions =
cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
mac_stats->FramesAbortedDueToXSColls =
cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
18 * mac_stats->FramesReceivedOK;
mac_stats->MulticastFramesXmittedOK =
cnt[RTL8365MB_MIB_ifOutMulticastPkts];
mac_stats->BroadcastFramesXmittedOK =
cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
mac_stats->MulticastFramesReceivedOK =
cnt[RTL8365MB_MIB_ifInMulticastPkts];
mac_stats->BroadcastFramesReceivedOK =
cnt[RTL8365MB_MIB_ifInBroadcastPkts];
}
static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
mutex_lock(&mb->mib_lock);
rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&ctrl_stats->UnsupportedOpcodesReceived);
mutex_unlock(&mb->mib_lock);
}
static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
[RTL8365MB_MIB_ifOutUcastPkts] = 1,
[RTL8365MB_MIB_ifOutMulticastPkts] = 1,
[RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
[RTL8365MB_MIB_ifOutDiscards] = 1,
[RTL8365MB_MIB_ifInOctets] = 1,
[RTL8365MB_MIB_ifInUcastPkts] = 1,
[RTL8365MB_MIB_ifInMulticastPkts] = 1,
[RTL8365MB_MIB_ifInBroadcastPkts] = 1,
[RTL8365MB_MIB_etherStatsDropEvents] = 1,
[RTL8365MB_MIB_etherStatsCollisions] = 1,
[RTL8365MB_MIB_etherStatsFragments] = 1,
[RTL8365MB_MIB_etherStatsJabbers] = 1,
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
};
struct rtl8365mb *mb = priv->chip_data;
struct rtnl_link_stats64 *stats;
int ret;
int i;
stats = &mb->ports[port].stats;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
/* Only fetch required MIB counters (marked = 1 above) */
if (!cnt[i])
continue;
ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
c->length, &cnt[i]);
if (ret)
break;
}
mutex_unlock(&mb->mib_lock);
/* Don't update statistics if there was an error reading the counters */
if (ret)
return;
spin_lock(&mb->ports[port].stats_lock);
stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
cnt[RTL8365MB_MIB_ifInMulticastPkts] +
cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
cnt[RTL8365MB_MIB_ifOutDiscards];
stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
/* if{In,Out}Octets includes FCS - remove it */
stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
stats->tx_bytes =
cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
cnt[RTL8365MB_MIB_etherStatsJabbers];
stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
spin_unlock(&mb->ports[port].stats_lock);
}
static void rtl8365mb_stats_poll(struct work_struct *work)
{
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
struct rtl8365mb_port,
mib_work);
struct realtek_priv *priv = p->priv;
rtl8365mb_stats_update(priv, p->index);
schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
}
static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
mb = priv->chip_data;
p = &mb->ports[port];
spin_lock(&p->stats_lock);
memcpy(s, &p->stats, sizeof(*s));
spin_unlock(&p->stats_lock);
}
static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
* so requires accessing a series of registers in a particular order.
*/
mutex_init(&mb->mib_lock);
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
if (dsa_is_unused_port(priv->ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
spin_lock_init(&p->stats_lock);
/* This work polls the MIB counters and keeps the stats64 data
* up-to-date.
*/
INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
}
}
static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
int i;
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
if (dsa_is_unused_port(priv->ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
}
}
static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
u32 *val)
{
int ret;
ret = regmap_read(priv->map, reg, val);
if (ret)
return ret;
return regmap_write(priv->map, reg, *val);
}
static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
struct realtek_priv *priv = data;
unsigned long line_changes = 0;
u32 stat;
int line;
int ret;
ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
goto out_error;
if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
u32 linkdown_ind;
u32 linkup_ind;
u32 val;
ret = rtl8365mb_get_and_clear_status_reg(
priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
if (ret)
goto out_error;
linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
ret = rtl8365mb_get_and_clear_status_reg(
priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
if (ret)
goto out_error;
linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
line_changes = linkup_ind | linkdown_ind;
}
if (!line_changes)
goto out_none;
for_each_set_bit(line, &line_changes, priv->num_ports) {
int child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
out_error:
dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
out_none:
return IRQ_NONE;
}
static struct irq_chip rtl8365mb_irq_chip = {
.name = "rtl8365mb",
/* The hardware doesn't support masking IRQs on a per-port basis */
};
static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, domain->host_data);
irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
return 0;
}
static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
{
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
.map = rtl8365mb_irq_map,
.unmap = rtl8365mb_irq_unmap,
.xlate = irq_domain_xlate_onecell,
};
static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
{
return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
RTL8365MB_INTR_LINK_CHANGE_MASK,
FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
enable ? 1 : 0));
}
static int rtl8365mb_irq_enable(struct realtek_priv *priv)
{
return rtl8365mb_set_irq_enable(priv, true);
}
static int rtl8365mb_irq_disable(struct realtek_priv *priv)
{
return rtl8365mb_set_irq_enable(priv, false);
}
static int rtl8365mb_irq_setup(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
struct device_node *intc;
u32 irq_trig;
int virq;
int irq;
u32 val;
int ret;
int i;
intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* rtl8365mb IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
dev_err(priv->dev, "failed to get parent irq: %d\n",
irq);
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
&rtl8365mb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto out_put_node;
}
for (i = 0; i < priv->num_ports; i++) {
virq = irq_create_mapping(priv->irqdomain, i);
if (!virq) {
dev_err(priv->dev,
"failed to create irq domain mapping\n");
ret = -EINVAL;
goto out_remove_irqdomain;
}
irq_set_parent(virq, irq);
}
/* Configure chip interrupt signal polarity */
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
val = RTL8365MB_INTR_POLARITY_HIGH;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
val = RTL8365MB_INTR_POLARITY_LOW;
break;
default:
dev_err(priv->dev, "unsupported irq trigger type %u\n",
irq_trig);
ret = -EINVAL;
goto out_remove_irqdomain;
}
ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
RTL8365MB_INTR_POLARITY_MASK,
FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
if (ret)
goto out_remove_irqdomain;
/* Disable the interrupt in case the chip has it enabled on reset */
ret = rtl8365mb_irq_disable(priv);
if (ret)
goto out_remove_irqdomain;
/* Clear the interrupt status register */
ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
RTL8365MB_INTR_ALL_MASK);
if (ret)
goto out_remove_irqdomain;
ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
"rtl8365mb", priv);
if (ret) {
dev_err(priv->dev, "failed to request irq: %d\n", ret);
goto out_remove_irqdomain;
}
/* Store the irq so that we know to free it during teardown */
mb->irq = irq;
ret = rtl8365mb_irq_enable(priv);
if (ret)
goto out_free_irq;
of_node_put(intc);
return 0;
out_free_irq:
free_irq(mb->irq, priv);
mb->irq = 0;
out_remove_irqdomain:
for (i = 0; i < priv->num_ports; i++) {
virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
irq_domain_remove(priv->irqdomain);
priv->irqdomain = NULL;
out_put_node:
of_node_put(intc);
return ret;
}
static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
int virq;
int i;
if (mb->irq) {
free_irq(mb->irq, priv);
mb->irq = 0;
}
if (priv->irqdomain) {
for (i = 0; i < priv->num_ports; i++) {
virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
irq_domain_remove(priv->irqdomain);
priv->irqdomain = NULL;
}
}
static int rtl8365mb_cpu_config(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
struct rtl8365mb_cpu *cpu = &mb->cpu;
u32 val;
int ret;
ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
RTL8365MB_CPU_PORT_MASK_MASK,
FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
cpu->mask));
if (ret)
return ret;
val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
cpu->trap_port >> 3 & 0x1);
ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
if (ret)
return ret;
return 0;
}
static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_cpu *cpu;
struct rtl8365mb *mb;
mb = priv->chip_data;
cpu = &mb->cpu;
switch (proto) {
case DSA_TAG_PROTO_RTL8_4:
cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
break;
case DSA_TAG_PROTO_RTL8_4T:
cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
break;
/* The switch also supports a 4-byte format, similar to rtl4a but with
* the same 0x04 8-bit version and probably 8-bit port source/dest.
* There is no public doc about it. Not supported yet and it will probably
* never be.
*/
default:
return -EPROTONOSUPPORT;
}
return rtl8365mb_cpu_config(priv);
}
static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
const struct rtl8365mb_chip_info *ci;
int ret;
int i;
ci = mb->chip_info;
/* Do any chip-specific init jam before getting to the common stuff */
if (ci->jam_table) {
for (i = 0; i < ci->jam_size; i++) {
ret = regmap_write(priv->map, ci->jam_table[i].reg,
ci->jam_table[i].val);
if (ret)
return ret;
}
}
/* Common init jam */
for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
rtl8365mb_init_jam_common[i].val);
if (ret)
return ret;
}
return 0;
}
static int rtl8365mb_reset_chip(struct realtek_priv *priv)
{
u32 val;
priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
/* Realtek documentation says the chip needs 1 second to reset. Sleep
* for 100 ms before accessing any registers to prevent ACK timeouts.
*/
msleep(100);
return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
!(val & RTL8365MB_CHIP_RESET_HW_MASK),
20000, 1e6);
}
static int rtl8365mb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
struct rtl8365mb_cpu *cpu;
struct dsa_port *cpu_dp;
struct rtl8365mb *mb;
int ret;
int i;
mb = priv->chip_data;
cpu = &mb->cpu;
ret = rtl8365mb_reset_chip(priv);
if (ret) {
dev_err(priv->dev, "failed to reset chip: %d\n", ret);
goto out_error;
}
/* Configure switch to vendor-defined initial state */
ret = rtl8365mb_switch_init(priv);
if (ret) {
dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
goto out_error;
}
/* Set up cascading IRQs */
ret = rtl8365mb_irq_setup(priv);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
cpu->mask |= BIT(cpu_dp->index);
if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
cpu->trap_port = cpu_dp->index;
}
cpu->enable = cpu->mask > 0;
ret = rtl8365mb_cpu_config(priv);
if (ret)
goto out_teardown_irq;
/* Configure ports */
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
if (dsa_is_unused_port(priv->ds, i))
continue;
/* Forward only to the CPU */
ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
if (ret)
goto out_teardown_irq;
/* Disable learning */
ret = rtl8365mb_port_set_learning(priv, i, false);
if (ret)
goto out_teardown_irq;
/* Set the initial STP state of all ports to DISABLED, otherwise
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
/* Set up per-port private data */
p->priv = priv;
p->index = i;
}
ret = rtl8365mb_port_change_mtu(ds, cpu->trap_port, ETH_DATA_LEN);
if (ret)
goto out_teardown_irq;
if (priv->setup_interface) {
ret = priv->setup_interface(ds);
if (ret) {
dev_err(priv->dev, "could not set up MDIO bus\n");
goto out_teardown_irq;
}
}
/* Start statistics counter polling */
rtl8365mb_stats_setup(priv);
return 0;
out_teardown_irq:
rtl8365mb_irq_teardown(priv);
out_error:
return ret;
}
static void rtl8365mb_teardown(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
rtl8365mb_stats_teardown(priv);
rtl8365mb_irq_teardown(priv);
}
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
{
int ret;
/* For some reason we have to write a magic value to an arbitrary
* register whenever accessing the chip ID/version registers.
*/
ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
if (ret)
return ret;
ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
if (ret)
return ret;
ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
if (ret)
return ret;
/* Reset magic register */
ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
if (ret)
return ret;
return 0;
}
static int rtl8365mb_detect(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
u32 chip_id;
u32 chip_ver;
int ret;
int i;
ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
dev_err(priv->dev, "failed to read chip id and version: %d\n",
ret);
return ret;
}
for (i = 0; i < ARRAY_SIZE(rtl8365mb_chip_infos); i++) {
const struct rtl8365mb_chip_info *ci = &rtl8365mb_chip_infos[i];
if (ci->chip_id == chip_id && ci->chip_ver == chip_ver) {
mb->chip_info = ci;
break;
}
}
if (!mb->chip_info) {
dev_err(priv->dev,
"unrecognized switch (id=0x%04x, ver=0x%04x)", chip_id,
chip_ver);
return -ENODEV;
}
dev_info(priv->dev, "found an %s switch\n", mb->chip_info->name);
priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
mb->priv = priv;
mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
return 0;
}
static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
.phylink_get_caps = rtl8365mb_phylink_get_caps,
.phylink_mac_config = rtl8365mb_phylink_mac_config,
.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
.get_sset_count = rtl8365mb_get_sset_count,
.get_eth_phy_stats = rtl8365mb_get_phy_stats,
.get_eth_mac_stats = rtl8365mb_get_mac_stats,
.get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
.get_stats64 = rtl8365mb_get_stats64,
.port_change_mtu = rtl8365mb_port_change_mtu,
.port_max_mtu = rtl8365mb_port_max_mtu,
};
static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
.phylink_get_caps = rtl8365mb_phylink_get_caps,
.phylink_mac_config = rtl8365mb_phylink_mac_config,
.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
.phy_read = rtl8365mb_dsa_phy_read,
.phy_write = rtl8365mb_dsa_phy_write,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
.get_sset_count = rtl8365mb_get_sset_count,
.get_eth_phy_stats = rtl8365mb_get_phy_stats,
.get_eth_mac_stats = rtl8365mb_get_mac_stats,
.get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
.get_stats64 = rtl8365mb_get_stats64,
.port_change_mtu = rtl8365mb_port_change_mtu,
.port_max_mtu = rtl8365mb_port_max_mtu,
};
static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
.phy_write = rtl8365mb_phy_write,
};
const struct realtek_variant rtl8365mb_variant = {
.ds_ops_smi = &rtl8365mb_switch_ops_smi,
.ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
.ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
EXPORT_SYMBOL_GPL(rtl8365mb_variant);
MODULE_AUTHOR("Alvin Šipraga <[email protected]>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/realtek/rtl8365mb.c |
// SPDX-License-Identifier: GPL-2.0+
/* Realtek Simple Management Interface (SMI) driver
* It can be discussed how "simple" this interface is.
*
* The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
* but the protocol is not MDIO at all. Instead it is a Realtek
* pecularity that need to bit-bang the lines in a special way to
* communicate with the switch.
*
* ASICs we intend to support with this driver:
*
* RTL8366 - The original version, apparently
* RTL8369 - Similar enough to have the same datsheet as RTL8366
* RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
* different register layout from the other two
* RTL8366S - Is this "RTL8366 super"?
* RTL8367 - Has an OpenWRT driver as well
* RTL8368S - Seems to be an alternative name for RTL8366RB
* RTL8370 - Also uses SMI
*
* Copyright (C) 2017 Linus Walleij <[email protected]>
* Copyright (C) 2010 Antti Seppälä <[email protected]>
* Copyright (C) 2010 Roman Yeryomin <[email protected]>
* Copyright (C) 2011 Colin Leitner <[email protected]>
* Copyright (C) 2009-2010 Gabor Juhos <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
#include "realtek.h"
#define REALTEK_SMI_ACK_RETRY_COUNT 5
static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
{
ndelay(priv->clk_delay);
}
static void realtek_smi_start(struct realtek_priv *priv)
{
/* Set GPIO pins to output mode, with initial state:
* SCK = 0, SDA = 1
*/
gpiod_direction_output(priv->mdc, 0);
gpiod_direction_output(priv->mdio, 1);
realtek_smi_clk_delay(priv);
/* CLK 1: 0 -> 1, 1 -> 0 */
gpiod_set_value(priv->mdc, 1);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 0);
realtek_smi_clk_delay(priv);
/* CLK 2: */
gpiod_set_value(priv->mdc, 1);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdio, 0);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 0);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdio, 1);
}
static void realtek_smi_stop(struct realtek_priv *priv)
{
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdio, 0);
gpiod_set_value(priv->mdc, 1);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdio, 1);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 1);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 0);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 1);
/* Add a click */
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 0);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 1);
/* Set GPIO pins to input mode */
gpiod_direction_input(priv->mdio);
gpiod_direction_input(priv->mdc);
}
static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
{
for (; len > 0; len--) {
realtek_smi_clk_delay(priv);
/* Prepare data */
gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
realtek_smi_clk_delay(priv);
/* Clocking */
gpiod_set_value(priv->mdc, 1);
realtek_smi_clk_delay(priv);
gpiod_set_value(priv->mdc, 0);
}
}
static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
{
gpiod_direction_input(priv->mdio);
for (*data = 0; len > 0; len--) {
u32 u;
realtek_smi_clk_delay(priv);
/* Clocking */
gpiod_set_value(priv->mdc, 1);
realtek_smi_clk_delay(priv);
u = !!gpiod_get_value(priv->mdio);
gpiod_set_value(priv->mdc, 0);
*data |= (u << (len - 1));
}
gpiod_direction_output(priv->mdio, 0);
}
static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
{
int retry_cnt;
retry_cnt = 0;
do {
u32 ack;
realtek_smi_read_bits(priv, 1, &ack);
if (ack == 0)
break;
if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
dev_err(priv->dev, "ACK timeout\n");
return -ETIMEDOUT;
}
} while (1);
return 0;
}
static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
{
realtek_smi_write_bits(priv, data, 8);
return realtek_smi_wait_for_ack(priv);
}
static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
{
realtek_smi_write_bits(priv, data, 8);
return 0;
}
static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
{
u32 t;
/* Read data */
realtek_smi_read_bits(priv, 8, &t);
*data = (t & 0xff);
/* Send an ACK */
realtek_smi_write_bits(priv, 0x00, 1);
return 0;
}
static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
{
u32 t;
/* Read data */
realtek_smi_read_bits(priv, 8, &t);
*data = (t & 0xff);
/* Send an ACK */
realtek_smi_write_bits(priv, 0x01, 1);
return 0;
}
static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
{
unsigned long flags;
u8 lo = 0;
u8 hi = 0;
int ret;
spin_lock_irqsave(&priv->lock, flags);
realtek_smi_start(priv);
/* Send READ command */
ret = realtek_smi_write_byte(priv, priv->cmd_read);
if (ret)
goto out;
/* Set ADDR[7:0] */
ret = realtek_smi_write_byte(priv, addr & 0xff);
if (ret)
goto out;
/* Set ADDR[15:8] */
ret = realtek_smi_write_byte(priv, addr >> 8);
if (ret)
goto out;
/* Read DATA[7:0] */
realtek_smi_read_byte0(priv, &lo);
/* Read DATA[15:8] */
realtek_smi_read_byte1(priv, &hi);
*data = ((u32)lo) | (((u32)hi) << 8);
ret = 0;
out:
realtek_smi_stop(priv);
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
static int realtek_smi_write_reg(struct realtek_priv *priv,
u32 addr, u32 data, bool ack)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&priv->lock, flags);
realtek_smi_start(priv);
/* Send WRITE command */
ret = realtek_smi_write_byte(priv, priv->cmd_write);
if (ret)
goto out;
/* Set ADDR[7:0] */
ret = realtek_smi_write_byte(priv, addr & 0xff);
if (ret)
goto out;
/* Set ADDR[15:8] */
ret = realtek_smi_write_byte(priv, addr >> 8);
if (ret)
goto out;
/* Write DATA[7:0] */
ret = realtek_smi_write_byte(priv, data & 0xff);
if (ret)
goto out;
/* Write DATA[15:8] */
if (ack)
ret = realtek_smi_write_byte(priv, data >> 8);
else
ret = realtek_smi_write_byte_noack(priv, data >> 8);
if (ret)
goto out;
ret = 0;
out:
realtek_smi_stop(priv);
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
/* There is one single case when we need to use this accessor and that
* is when issueing soft reset. Since the device reset as soon as we write
* that bit, no ACK will come back for natural reasons.
*/
static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
{
return realtek_smi_write_reg(ctx, reg, val, false);
}
/* Regmap accessors */
static int realtek_smi_write(void *ctx, u32 reg, u32 val)
{
struct realtek_priv *priv = ctx;
return realtek_smi_write_reg(priv, reg, val, true);
}
static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
{
struct realtek_priv *priv = ctx;
return realtek_smi_read_reg(priv, reg, val);
}
static void realtek_smi_lock(void *ctx)
{
struct realtek_priv *priv = ctx;
mutex_lock(&priv->map_lock);
}
static void realtek_smi_unlock(void *ctx)
{
struct realtek_priv *priv = ctx;
mutex_unlock(&priv->map_lock);
}
static const struct regmap_config realtek_smi_regmap_config = {
.reg_bits = 10, /* A4..A0 R4..R0 */
.val_bits = 16,
.reg_stride = 1,
/* PHY regs are at 0x8000 */
.max_register = 0xffff,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.reg_read = realtek_smi_read,
.reg_write = realtek_smi_write,
.cache_type = REGCACHE_NONE,
.lock = realtek_smi_lock,
.unlock = realtek_smi_unlock,
};
static const struct regmap_config realtek_smi_nolock_regmap_config = {
.reg_bits = 10, /* A4..A0 R4..R0 */
.val_bits = 16,
.reg_stride = 1,
/* PHY regs are at 0x8000 */
.max_register = 0xffff,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.reg_read = realtek_smi_read,
.reg_write = realtek_smi_write,
.cache_type = REGCACHE_NONE,
.disable_locking = true,
};
static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
struct realtek_priv *priv = bus->priv;
return priv->ops->phy_read(priv, addr, regnum);
}
static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct realtek_priv *priv = bus->priv;
return priv->ops->phy_write(priv, addr, regnum, val);
}
static int realtek_smi_setup_mdio(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
struct device_node *mdio_np;
int ret;
mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
if (!mdio_np) {
dev_err(priv->dev, "no MDIO bus node\n");
return -ENODEV;
}
priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
if (!priv->slave_mii_bus) {
ret = -ENOMEM;
goto err_put_node;
}
priv->slave_mii_bus->priv = priv;
priv->slave_mii_bus->name = "SMI slave MII";
priv->slave_mii_bus->read = realtek_smi_mdio_read;
priv->slave_mii_bus->write = realtek_smi_mdio_write;
snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
ds->index);
priv->slave_mii_bus->dev.of_node = mdio_np;
priv->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus = priv->slave_mii_bus;
ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
if (ret) {
dev_err(priv->dev, "unable to register MDIO bus %s\n",
priv->slave_mii_bus->id);
goto err_put_node;
}
return 0;
err_put_node:
of_node_put(mdio_np);
return ret;
}
static int realtek_smi_probe(struct platform_device *pdev)
{
const struct realtek_variant *var;
struct device *dev = &pdev->dev;
struct realtek_priv *priv;
struct regmap_config rc;
struct device_node *np;
int ret;
var = of_device_get_match_data(dev);
np = dev->of_node;
priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->chip_data = (void *)priv + sizeof(*priv);
mutex_init(&priv->map_lock);
rc = realtek_smi_regmap_config;
rc.lock_arg = priv;
priv->map = devm_regmap_init(dev, NULL, priv, &rc);
if (IS_ERR(priv->map)) {
ret = PTR_ERR(priv->map);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
rc = realtek_smi_nolock_regmap_config;
priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
if (IS_ERR(priv->map_nolock)) {
ret = PTR_ERR(priv->map_nolock);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
/* Link forward and backward */
priv->dev = dev;
priv->clk_delay = var->clk_delay;
priv->cmd_read = var->cmd_read;
priv->cmd_write = var->cmd_write;
priv->ops = var->ops;
priv->setup_interface = realtek_smi_setup_mdio;
priv->write_reg_noack = realtek_smi_write_reg_noack;
dev_set_drvdata(dev, priv);
spin_lock_init(&priv->lock);
/* TODO: if power is software controlled, set up any regulators here */
priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
dev_err(dev, "failed to get RESET GPIO\n");
return PTR_ERR(priv->reset);
}
if (priv->reset) {
gpiod_set_value(priv->reset, 1);
dev_dbg(dev, "asserted RESET\n");
msleep(REALTEK_HW_STOP_DELAY);
gpiod_set_value(priv->reset, 0);
msleep(REALTEK_HW_START_DELAY);
dev_dbg(dev, "deasserted RESET\n");
}
/* Fetch MDIO pins */
priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
if (IS_ERR(priv->mdc))
return PTR_ERR(priv->mdc);
priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
if (IS_ERR(priv->mdio))
return PTR_ERR(priv->mdio);
priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
ret = priv->ops->detect(priv);
if (ret) {
dev_err(dev, "unable to detect switch\n");
return ret;
}
priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = dev;
priv->ds->num_ports = priv->num_ports;
priv->ds->priv = priv;
priv->ds->ops = var->ds_ops_smi;
ret = dsa_register_switch(priv->ds);
if (ret) {
dev_err_probe(dev, ret, "unable to register switch\n");
return ret;
}
return 0;
}
static int realtek_smi_remove(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return 0;
dsa_unregister_switch(priv->ds);
if (priv->slave_mii_bus)
of_node_put(priv->slave_mii_bus->dev.of_node);
/* leave the device reset asserted */
if (priv->reset)
gpiod_set_value(priv->reset, 1);
return 0;
}
static void realtek_smi_shutdown(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
platform_set_drvdata(pdev, NULL);
}
static const struct of_device_id realtek_smi_of_match[] = {
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
{
.compatible = "realtek,rtl8366rb",
.data = &rtl8366rb_variant,
},
#endif
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{
.compatible = "realtek,rtl8365mb",
.data = &rtl8365mb_variant,
},
#endif
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
static struct platform_driver realtek_smi_driver = {
.driver = {
.name = "realtek-smi",
.of_match_table = realtek_smi_of_match,
},
.probe = realtek_smi_probe,
.remove = realtek_smi_remove,
.shutdown = realtek_smi_shutdown,
};
module_platform_driver(realtek_smi_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/realtek/realtek-smi.c |
// SPDX-License-Identifier: GPL-2.0
/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
*
* This is a sparsely documented chip, the only viable documentation seems
* to be a patched up code drop from the vendor that appear in various
* GPL source trees.
*
* Copyright (C) 2017 Linus Walleij <[email protected]>
* Copyright (C) 2009-2010 Gabor Juhos <[email protected]>
* Copyright (C) 2010 Antti Seppälä <[email protected]>
* Copyright (C) 2010 Roman Yeryomin <[email protected]>
* Copyright (C) 2011 Colin Leitner <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include "realtek.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
#define RTL8366RB_PHY_NO_MAX 4
#define RTL8366RB_PHY_ADDR_MAX 31
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
#define RTL8366RB_SGCR_MAX_LENGTH_16000 RTL8366RB_SGCR_MAX_LENGTH(0x3)
#define RTL8366RB_SGCR_EN_VLAN BIT(13)
#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
/* Port Enable Control register */
#define RTL8366RB_PECR 0x0001
/* Switch per-port learning disablement register */
#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
/* Security control, actually aging register */
#define RTL8366RB_SECURITY_CTRL 0x0003
#define RTL8366RB_SSCR2 0x0004
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
/* Port Mode Control registers */
#define RTL8366RB_PMC0 0x0005
#define RTL8366RB_PMC0_SPI BIT(0)
#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
#define RTL8366RB_PMC0_PROBE BIT(2)
#define RTL8366RB_PMC0_DIS_BISR BIT(3)
#define RTL8366RB_PMC0_ADCTEST BIT(4)
#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
#define RTL8366RB_PMC0_EN_SCAN BIT(6)
#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
#define RTL8366RB_PMC1 0x0006
/* Port Mirror Control Register */
#define RTL8366RB_PMCR 0x0007
#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
/* bits 0..7 = port 0, bits 8..15 = port 1 */
#define RTL8366RB_PAACR0 0x0010
/* bits 0..7 = port 2, bits 8..15 = port 3 */
#define RTL8366RB_PAACR1 0x0011
/* bits 0..7 = port 4, bits 8..15 = port 5 */
#define RTL8366RB_PAACR2 0x0012
#define RTL8366RB_PAACR_SPEED_10M 0
#define RTL8366RB_PAACR_SPEED_100M 1
#define RTL8366RB_PAACR_SPEED_1000M 2
#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
#define RTL8366RB_PAACR_LINK_UP BIT(4)
#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
#define RTL8366RB_PAACR_AN BIT(7)
#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
RTL8366RB_PAACR_FULL_DUPLEX | \
RTL8366RB_PAACR_LINK_UP | \
RTL8366RB_PAACR_TX_PAUSE | \
RTL8366RB_PAACR_RX_PAUSE)
/* bits 0..7 = port 0, bits 8..15 = port 1 */
#define RTL8366RB_PSTAT0 0x0014
/* bits 0..7 = port 2, bits 8..15 = port 3 */
#define RTL8366RB_PSTAT1 0x0015
/* bits 0..7 = port 4, bits 8..15 = port 5 */
#define RTL8366RB_PSTAT2 0x0016
#define RTL8366RB_POWER_SAVING_REG 0x0021
/* Spanning tree status (STP) control, two bits per port per FID */
#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
#define RTL8366RB_STP_STATE_DISABLED 0x0
#define RTL8366RB_STP_STATE_BLOCKING 0x1
#define RTL8366RB_STP_STATE_LEARNING 0x2
#define RTL8366RB_STP_STATE_FORWARDING 0x3
#define RTL8366RB_STP_MASK GENMASK(1, 0)
#define RTL8366RB_STP_STATE(port, state) \
((state) << ((port) * 2))
#define RTL8366RB_STP_STATE_MASK(port) \
RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
/* CPU port control reg */
#define RTL8368RB_CPU_CTRL_REG 0x0061
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
/* Disables inserting custom tag length/type 0x8899 */
#define RTL8368RB_CPU_NO_TAG BIT(15)
#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
#define RTL8366RB_RESET_CTRL_REG 0x0100
#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
#define RTL8366RB_CHIP_ID_REG 0x0509
#define RTL8366RB_CHIP_ID_8366 0x5937
#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
#define RTL8366RB_CHIP_VERSION_MASK 0xf
/* PHY registers control */
#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
#define RTL8366RB_PHY_CTRL_READ BIT(0)
#define RTL8366RB_PHY_CTRL_WRITE 0
#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
#define RTL8366RB_PHY_INT_BUSY BIT(0)
#define RTL8366RB_PHY_EXT_BUSY BIT(4)
#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
#define RTL8366RB_PHY_REG_MASK 0x1f
#define RTL8366RB_PHY_PAGE_OFFSET 5
#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
#define RTL8366RB_PHY_NO_OFFSET 9
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
/* VLAN Ingress Control Register 1, one bit per port.
* bit 0 .. 5 will make the switch drop ingress frames without
* VID such as untagged or priority-tagged frames for respective
* port.
* bit 6 .. 11 will make the switch drop ingress frames carrying
* a C-tag with VID != 0 for respective port.
*/
#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
/* VLAN Ingress Control Register 2, one bit per port.
* bit0 .. bit5 will make the switch drop all ingress frames with
* a VLAN classification that does not include the port is in its
* member set.
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
#define RTL8366RB_LED_BLINKRATE_REG 0x0430
#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
#define RTL8366RB_LED_CTRL_REG 0x0431
#define RTL8366RB_LED_OFF 0x0
#define RTL8366RB_LED_DUP_COL 0x1
#define RTL8366RB_LED_LINK_ACT 0x2
#define RTL8366RB_LED_SPD1000 0x3
#define RTL8366RB_LED_SPD100 0x4
#define RTL8366RB_LED_SPD10 0x5
#define RTL8366RB_LED_SPD1000_ACT 0x6
#define RTL8366RB_LED_SPD100_ACT 0x7
#define RTL8366RB_LED_SPD10_ACT 0x8
#define RTL8366RB_LED_SPD100_10_ACT 0x9
#define RTL8366RB_LED_FIBER 0xa
#define RTL8366RB_LED_AN_FAULT 0xb
#define RTL8366RB_LED_LINK_RX 0xc
#define RTL8366RB_LED_LINK_TX 0xd
#define RTL8366RB_LED_MASTER 0xe
#define RTL8366RB_LED_FORCE 0xf
#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
#define RTL8366RB_LED_1_OFFSET 6
#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
#define RTL8366RB_LED_3_OFFSET 6
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
#define RTL8366RB_MIB_COUNTER_BASE 0x1000
#define RTL8366RB_MIB_CTRL_REG 0x13F0
#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
(RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
#define RTL8366RB_FIDMAX 7
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
RTL8366RB_PORT_2 | \
RTL8366RB_PORT_3 | \
RTL8366RB_PORT_4 | \
RTL8366RB_PORT_5 | \
RTL8366RB_PORT_CPU)
#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
RTL8366RB_PORT_2 | \
RTL8366RB_PORT_3 | \
RTL8366RB_PORT_4 | \
RTL8366RB_PORT_5)
#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
RTL8366RB_PORT_2 | \
RTL8366RB_PORT_3 | \
RTL8366RB_PORT_4)
#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
/* First configuration word per member config, VID and prio */
#define RTL8366RB_VLAN_VID_MASK 0xfff
#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
/* Second configuration word per member config, member and untagged */
#define RTL8366RB_VLAN_UNTAG_SHIFT 8
#define RTL8366RB_VLAN_UNTAG_MASK 0xff
#define RTL8366RB_VLAN_MEMBER_MASK 0xff
/* Third config word per member config, STAG currently unused */
#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
#define RTL8366RB_VLAN_FID_MASK 0x7
/* Port ingress bandwidth control */
#define RTL8366RB_IB_BASE 0x0200
#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
#define RTL8366RB_IB_BDTH_MASK 0x3fff
#define RTL8366RB_IB_PREIFG BIT(14)
/* Port egress bandwidth control */
#define RTL8366RB_EB_BASE 0x02d1
#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
#define RTL8366RB_EB_BDTH_MASK 0x3fff
#define RTL8366RB_EB_PREIFG_REG 0x02f8
#define RTL8366RB_EB_PREIFG BIT(9)
#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
#define RTL8366RB_BDTH_UNIT 64
#define RTL8366RB_BDTH_REG_DEFAULT 16383
/* QOS */
#define RTL8366RB_QOS BIT(15)
/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
#define RTL8366RB_QOS_DEFAULT_PREIFG 1
/* Interrupt handling */
#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
#define RTL8366RB_P4_RGMII_LED BIT(2)
#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
RTL8366RB_INTERRUPT_ACLEXCEED | \
RTL8366RB_INTERRUPT_STORMEXCEED | \
RTL8366RB_INTERRUPT_P4_FIBER | \
RTL8366RB_INTERRUPT_P4_UTP)
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
/* Port isolation registers */
#define RTL8366RB_PORT_ISO_BASE 0x0F08
#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
#define RTL8366RB_PORT_ISO_EN BIT(0)
#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
/* bits 0..5 enable force when cleared */
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
#define RTL8366RB_OAM_PARSER_REG 0x0F14
#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
* @pvid_enabled: if PVID is set for respective port
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
bool pvid_enabled[RTL8366RB_NUM_PORTS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
{ 0, 8, 2, "EtherStatsUnderSizePkts" },
{ 0, 10, 2, "EtherFragments" },
{ 0, 12, 2, "EtherStatsPkts64Octets" },
{ 0, 14, 2, "EtherStatsPkts65to127Octets" },
{ 0, 16, 2, "EtherStatsPkts128to255Octets" },
{ 0, 18, 2, "EtherStatsPkts256to511Octets" },
{ 0, 20, 2, "EtherStatsPkts512to1023Octets" },
{ 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
{ 0, 24, 2, "EtherOversizeStats" },
{ 0, 26, 2, "EtherStatsJabbers" },
{ 0, 28, 2, "IfInUcastPkts" },
{ 0, 30, 2, "EtherStatsMulticastPkts" },
{ 0, 32, 2, "EtherStatsBroadcastPkts" },
{ 0, 34, 2, "EtherStatsDropEvents" },
{ 0, 36, 2, "Dot3StatsFCSErrors" },
{ 0, 38, 2, "Dot3StatsSymbolErrors" },
{ 0, 40, 2, "Dot3InPauseFrames" },
{ 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
{ 0, 44, 4, "IfOutOctets" },
{ 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
{ 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
{ 0, 52, 2, "Dot3sDeferredTransmissions" },
{ 0, 54, 2, "Dot3StatsLateCollisions" },
{ 0, 56, 2, "EtherStatsCollisions" },
{ 0, 58, 2, "Dot3StatsExcessiveCollisions" },
{ 0, 60, 2, "Dot3OutPauseFrames" },
{ 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
{ 0, 64, 2, "Dot1dTpPortInDiscards" },
{ 0, 66, 2, "IfOutUcastPkts" },
{ 0, 68, 2, "IfOutMulticastPkts" },
{ 0, 70, 2, "IfOutBroadcastPkts" },
};
static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue)
{
u32 addr, val;
int ret;
int i;
addr = RTL8366RB_MIB_COUNTER_BASE +
RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
mib->offset;
/* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
ret = regmap_write(priv->map, addr, 0); /* Write whatever */
if (ret)
return ret;
/* Read MIB control register */
ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
if (ret)
return -EIO;
if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
return -EBUSY;
if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
return -EIO;
/* Read each individual MIB 16 bits at the time */
*mibvalue = 0;
for (i = mib->length; i > 0; i--) {
ret = regmap_read(priv->map, addr + (i - 1), &val);
if (ret)
return ret;
*mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
}
return 0;
}
static u32 rtl8366rb_get_irqmask(struct irq_data *d)
{
int line = irqd_to_hwirq(d);
u32 val;
/* For line interrupts we combine link down in bits
* 6..11 with link up in bits 0..5 into one interrupt.
*/
if (line < 12)
val = BIT(line) | BIT(line + 6);
else
val = BIT(line);
return val;
}
static void rtl8366rb_mask_irq(struct irq_data *d)
{
struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d), 0);
if (ret)
dev_err(priv->dev, "could not mask IRQ\n");
}
static void rtl8366rb_unmask_irq(struct irq_data *d)
{
struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d),
rtl8366rb_get_irqmask(d));
if (ret)
dev_err(priv->dev, "could not unmask IRQ\n");
}
static irqreturn_t rtl8366rb_irq(int irq, void *data)
{
struct realtek_priv *priv = data;
u32 stat;
int ret;
/* This clears the IRQ status register */
ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&stat);
if (ret) {
dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
stat &= RTL8366RB_INTERRUPT_VALID;
if (!stat)
return IRQ_NONE;
while (stat) {
int line = __ffs(stat);
int child_irq;
stat &= ~BIT(line);
/* For line interrupts we combine link down in bits
* 6..11 with link up in bits 0..5 into one interrupt.
*/
if (line < 12 && line > 5)
line -= 5;
child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
}
static struct irq_chip rtl8366rb_irq_chip = {
.name = "RTL8366RB",
.irq_mask = rtl8366rb_mask_irq,
.irq_unmask = rtl8366rb_unmask_irq,
};
static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, domain->host_data);
irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
return 0;
}
static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
{
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
.map = rtl8366rb_irq_map,
.unmap = rtl8366rb_irq_unmap,
.xlate = irq_domain_xlate_onecell,
};
static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
{
struct device_node *intc;
unsigned long irq_trig;
int irq;
int ret;
u32 val;
int i;
intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* RB8366RB IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
dev_err(priv->dev, "failed to get parent IRQ\n");
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&val);
if (ret) {
dev_err(priv->dev, "can't read interrupt status\n");
goto out_put_node;
}
/* Fetch IRQ edge information from the descriptor */
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
dev_info(priv->dev, "active high/rising IRQ\n");
val = 0;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
dev_info(priv->dev, "active low/falling IRQ\n");
val = RTL8366RB_INTERRUPT_POLARITY;
break;
}
ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_INTERRUPT_POLARITY,
val);
if (ret) {
dev_err(priv->dev, "could not configure IRQ polarity\n");
goto out_put_node;
}
ret = devm_request_threaded_irq(priv->dev, irq, NULL,
rtl8366rb_irq, IRQF_ONESHOT,
"RTL8366RB", priv);
if (ret) {
dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
priv->irqdomain = irq_domain_add_linear(intc,
RTL8366RB_NUM_INTERRUPT,
&rtl8366rb_irqdomain_ops,
priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
goto out_put_node;
}
for (i = 0; i < priv->num_ports; i++)
irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
out_put_node:
of_node_put(intc);
return ret;
}
static int rtl8366rb_set_addr(struct realtek_priv *priv)
{
u8 addr[ETH_ALEN];
u16 val;
int ret;
eth_random_addr(addr);
dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
val = addr[0] << 8 | addr[1];
ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
if (ret)
return ret;
val = addr[2] << 8 | addr[3];
ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
if (ret)
return ret;
val = addr[4] << 8 | addr[5];
ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
if (ret)
return ret;
return 0;
}
/* Found in a vendor driver */
/* Struct for handling the jam tables' entries */
struct rtl8366rb_jam_tbl_entry {
u16 reg;
u16 val;
};
/* For the "version 0" early silicon, appear in most source releases */
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_0[] = {
{0x000B, 0x0001}, {0x03A6, 0x0100}, {0x03A7, 0x0001}, {0x02D1, 0x3FFF},
{0x02D2, 0x3FFF}, {0x02D3, 0x3FFF}, {0x02D4, 0x3FFF}, {0x02D5, 0x3FFF},
{0x02D6, 0x3FFF}, {0x02D7, 0x3FFF}, {0x02D8, 0x3FFF}, {0x022B, 0x0688},
{0x022C, 0x0FAC}, {0x03D0, 0x4688}, {0x03D1, 0x01F5}, {0x0000, 0x0830},
{0x02F9, 0x0200}, {0x02F7, 0x7FFF}, {0x02F8, 0x03FF}, {0x0080, 0x03E8},
{0x0081, 0x00CE}, {0x0082, 0x00DA}, {0x0083, 0x0230}, {0xBE0F, 0x2000},
{0x0231, 0x422A}, {0x0232, 0x422A}, {0x0233, 0x422A}, {0x0234, 0x422A},
{0x0235, 0x422A}, {0x0236, 0x422A}, {0x0237, 0x422A}, {0x0238, 0x422A},
{0x0239, 0x422A}, {0x023A, 0x422A}, {0x023B, 0x422A}, {0x023C, 0x422A},
{0x023D, 0x422A}, {0x023E, 0x422A}, {0x023F, 0x422A}, {0x0240, 0x422A},
{0x0241, 0x422A}, {0x0242, 0x422A}, {0x0243, 0x422A}, {0x0244, 0x422A},
{0x0245, 0x422A}, {0x0246, 0x422A}, {0x0247, 0x422A}, {0x0248, 0x422A},
{0x0249, 0x0146}, {0x024A, 0x0146}, {0x024B, 0x0146}, {0xBE03, 0xC961},
{0x024D, 0x0146}, {0x024E, 0x0146}, {0x024F, 0x0146}, {0x0250, 0x0146},
{0xBE64, 0x0226}, {0x0252, 0x0146}, {0x0253, 0x0146}, {0x024C, 0x0146},
{0x0251, 0x0146}, {0x0254, 0x0146}, {0xBE62, 0x3FD0}, {0x0084, 0x0320},
{0x0255, 0x0146}, {0x0256, 0x0146}, {0x0257, 0x0146}, {0x0258, 0x0146},
{0x0259, 0x0146}, {0x025A, 0x0146}, {0x025B, 0x0146}, {0x025C, 0x0146},
{0x025D, 0x0146}, {0x025E, 0x0146}, {0x025F, 0x0146}, {0x0260, 0x0146},
{0x0261, 0xA23F}, {0x0262, 0x0294}, {0x0263, 0xA23F}, {0x0264, 0x0294},
{0x0265, 0xA23F}, {0x0266, 0x0294}, {0x0267, 0xA23F}, {0x0268, 0x0294},
{0x0269, 0xA23F}, {0x026A, 0x0294}, {0x026B, 0xA23F}, {0x026C, 0x0294},
{0x026D, 0xA23F}, {0x026E, 0x0294}, {0x026F, 0xA23F}, {0x0270, 0x0294},
{0x02F5, 0x0048}, {0xBE09, 0x0E00}, {0xBE1E, 0x0FA0}, {0xBE14, 0x8448},
{0xBE15, 0x1007}, {0xBE4A, 0xA284}, {0xC454, 0x3F0B}, {0xC474, 0x3F0B},
{0xBE48, 0x3672}, {0xBE4B, 0x17A7}, {0xBE4C, 0x0B15}, {0xBE52, 0x0EDD},
{0xBE49, 0x8C00}, {0xBE5B, 0x785C}, {0xBE5C, 0x785C}, {0xBE5D, 0x785C},
{0xBE61, 0x368A}, {0xBE63, 0x9B84}, {0xC456, 0xCC13}, {0xC476, 0xCC13},
{0xBE65, 0x307D}, {0xBE6D, 0x0005}, {0xBE6E, 0xE120}, {0xBE2E, 0x7BAF},
};
/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_1[] = {
{0x0000, 0x0830}, {0x0001, 0x8000}, {0x0400, 0x8130}, {0xBE78, 0x3C3C},
{0x0431, 0x5432}, {0xBE37, 0x0CE4}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
{0xC44C, 0x1585}, {0xC44C, 0x1185}, {0xC44C, 0x1585}, {0xC46C, 0x1585},
{0xC46C, 0x1185}, {0xC46C, 0x1585}, {0xC451, 0x2135}, {0xC471, 0x2135},
{0xBE10, 0x8140}, {0xBE15, 0x0007}, {0xBE6E, 0xE120}, {0xBE69, 0xD20F},
{0xBE6B, 0x0320}, {0xBE24, 0xB000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF20},
{0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800}, {0xBE24, 0x0000},
{0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60}, {0xBE21, 0x0140},
{0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000}, {0xBE2E, 0x7B7A},
{0xBE36, 0x0CE4}, {0x02F5, 0x0048}, {0xBE77, 0x2940}, {0x000A, 0x83E0},
{0xBE79, 0x3C3C}, {0xBE00, 0x1340},
};
/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_2[] = {
{0x0450, 0x0000}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
{0xC44F, 0x6250}, {0xC46F, 0x6250}, {0xC456, 0x0C14}, {0xC476, 0x0C14},
{0xC44C, 0x1C85}, {0xC44C, 0x1885}, {0xC44C, 0x1C85}, {0xC46C, 0x1C85},
{0xC46C, 0x1885}, {0xC46C, 0x1C85}, {0xC44C, 0x0885}, {0xC44C, 0x0881},
{0xC44C, 0x0885}, {0xC46C, 0x0885}, {0xC46C, 0x0881}, {0xC46C, 0x0885},
{0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
{0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6E, 0x0320},
{0xBE77, 0x2940}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
{0x8000, 0x0001}, {0xBE15, 0x1007}, {0x8000, 0x0000}, {0xBE15, 0x1007},
{0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160}, {0xBE10, 0x8140},
{0xBE00, 0x1340}, {0x0F51, 0x0010},
};
/* Appears in a DDWRT code dump */
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_3[] = {
{0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
{0x0F51, 0x0017}, {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
{0xC456, 0x0C14}, {0xC476, 0x0C14}, {0xC454, 0x3F8B}, {0xC474, 0x3F8B},
{0xC450, 0x2071}, {0xC470, 0x2071}, {0xC451, 0x226B}, {0xC471, 0x226B},
{0xC452, 0xA293}, {0xC472, 0xA293}, {0xC44C, 0x1585}, {0xC44C, 0x1185},
{0xC44C, 0x1585}, {0xC46C, 0x1585}, {0xC46C, 0x1185}, {0xC46C, 0x1585},
{0xC44C, 0x0185}, {0xC44C, 0x0181}, {0xC44C, 0x0185}, {0xC46C, 0x0185},
{0xC46C, 0x0181}, {0xC46C, 0x0185}, {0xBE24, 0xB000}, {0xBE23, 0xFF51},
{0xBE22, 0xDF20}, {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800},
{0xBE24, 0x0000}, {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60},
{0xBE21, 0x0140}, {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000},
{0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
{0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6B, 0x0320},
{0xBE77, 0x2800}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
{0x8000, 0x0001}, {0xBE10, 0x8140}, {0x8000, 0x0000}, {0xBE10, 0x8140},
{0xBE15, 0x1007}, {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160},
{0xBE10, 0x8140}, {0xBE00, 0x1340}, {0x0450, 0x0000}, {0x0401, 0x0000},
};
/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_f5d8235[] = {
{0x0242, 0x02BF}, {0x0245, 0x02BF}, {0x0248, 0x02BF}, {0x024B, 0x02BF},
{0x024E, 0x02BF}, {0x0251, 0x02BF}, {0x0254, 0x0A3F}, {0x0256, 0x0A3F},
{0x0258, 0x0A3F}, {0x025A, 0x0A3F}, {0x025C, 0x0A3F}, {0x025E, 0x0A3F},
{0x0263, 0x007C}, {0x0100, 0x0004}, {0xBE5B, 0x3500}, {0x800E, 0x200F},
{0xBE1D, 0x0F00}, {0x8001, 0x5011}, {0x800A, 0xA2F4}, {0x800B, 0x17A3},
{0xBE4B, 0x17A3}, {0xBE41, 0x5011}, {0xBE17, 0x2100}, {0x8000, 0x8304},
{0xBE40, 0x8304}, {0xBE4A, 0xA2F4}, {0x800C, 0xA8D5}, {0x8014, 0x5500},
{0x8015, 0x0004}, {0xBE4C, 0xA8D5}, {0xBE59, 0x0008}, {0xBE09, 0x0E00},
{0xBE36, 0x1036}, {0xBE37, 0x1036}, {0x800D, 0x00FF}, {0xBE4D, 0x00FF},
};
/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_dgn3500[] = {
{0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0F51, 0x0017},
{0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0}, {0x0450, 0x0000},
{0x0401, 0x0000}, {0x0431, 0x0960},
};
/* This jam table activates "green ethernet", which means low power mode
* and is claimed to detect the cable length and not use more power than
* necessary, and the ports should enter power saving mode 10 seconds after
* a cable is disconnected. Seems to always be the same.
*/
static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
{0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
{0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
{0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
};
/* Function that jams the tables in the proper registers */
static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
int jam_size, struct realtek_priv *priv,
bool write_dbg)
{
u32 val;
int ret;
int i;
for (i = 0; i < jam_size; i++) {
if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
ret = regmap_read(priv->map,
RTL8366RB_PHY_ACCESS_BUSY_REG,
&val);
if (ret)
return ret;
if (!(val & RTL8366RB_PHY_INT_BUSY)) {
ret = regmap_write(priv->map,
RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
}
}
if (write_dbg)
dev_dbg(priv->dev, "jam %04x into register %04x\n",
jam_table[i].val,
jam_table[i].reg);
ret = regmap_write(priv->map,
jam_table[i].reg,
jam_table[i].val);
if (ret)
return ret;
}
return 0;
}
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
u32 val;
int ret;
int i;
rb = priv->chip_data;
ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
dev_err(priv->dev, "unable to read chip id\n");
return ret;
}
switch (chip_id) {
case RTL8366RB_CHIP_ID_8366:
break;
default:
dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
return -ENODEV;
}
ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
&chip_ver);
if (ret) {
dev_err(priv->dev, "unable to read chip version\n");
return ret;
}
dev_info(priv->dev, "RTL%04x ver %u chip found\n",
chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
/* Do the init dance using the right jam table */
switch (chip_ver) {
case 0:
jam_table = rtl8366rb_init_jam_ver_0;
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
break;
case 1:
jam_table = rtl8366rb_init_jam_ver_1;
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
break;
case 2:
jam_table = rtl8366rb_init_jam_ver_2;
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
break;
default:
jam_table = rtl8366rb_init_jam_ver_3;
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
break;
}
/* Special jam tables for special routers
* TODO: are these necessary? Maintainers, please test
* without them, using just the off-the-shelf tables.
*/
if (of_machine_is_compatible("belkin,f5d8235-v1")) {
jam_table = rtl8366rb_init_jam_f5d8235;
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
}
if (of_machine_is_compatible("netgear,dgn3500") ||
of_machine_is_compatible("netgear,dgn3500b")) {
jam_table = rtl8366rb_init_jam_dgn3500;
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
if (ret)
return ret;
/* Isolate all user ports so they can only send packets to itself and the CPU port */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
}
/* CPU port can send packets to all ports */
ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
if (ret)
return ret;
ret = regmap_write(priv->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
if (ret)
return ret;
/* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
ret = regmap_write(priv->map, 0x0c, 0x240);
if (ret)
return ret;
ret = regmap_write(priv->map, 0x0d, 0x240);
if (ret)
return ret;
/* Set some random MAC address */
ret = rtl8366rb_set_addr(priv);
if (ret)
return ret;
/* Enable CPU port with custom DSA tag 8899.
*
* If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
* the custom tag is turned off.
*/
ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
0xFFFF,
BIT(priv->cpu_port));
if (ret)
return ret;
/* Make sure we default-enable the fixed CPU port */
ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
BIT(priv->cpu_port),
0);
if (ret)
return ret;
/* Set maximum packet length to 1536 bytes */
ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
return ret;
for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
/* layer 2 size, see rtl8366rb_change_mtu() */
rb->max_mtu[i] = 1532;
/* Disable learning for all ports */
ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
/* Port 4 setup: this enables Port 4, usually the WAN port,
* common PHY IO mode is apparently mode 0, and this is not what
* the port is initialized to. There is no explanation of the
* IO modes in the Realtek source code, if your WAN port is
* connected to something exotic such as fiber, then this might
* be worth experimenting with.
*/
ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
RTL8366RB_PMC0_P4_IOMODE_MASK,
0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
if (ret)
return ret;
/* Accept all packets by default, we enable filtering on-demand */
ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
0);
if (ret)
return ret;
ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
0);
if (ret)
return ret;
/* Don't drop packets whose DA has not been learned */
ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
if (ret)
return ret;
/* Set blinking, TODO: make this configurable */
ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
if (ret)
return ret;
/* Set up LED activity:
* Each port has 4 LEDs, we configure all ports to the same
* behaviour (no individual config) but we can set up each
* LED separately.
*/
if (priv->leds_disabled) {
/* Turn everything off */
regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x0FFF, 0);
regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x0FFF, 0);
regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
0);
val = RTL8366RB_LED_OFF;
} else {
/* TODO: make this configurable per LED */
val = RTL8366RB_LED_FORCE;
}
for (i = 0; i < 4; i++) {
ret = regmap_update_bits(priv->map,
RTL8366RB_LED_CTRL_REG,
0xf << (i * 4),
val << (i * 4));
if (ret)
return ret;
}
ret = rtl8366_reset_vlan(priv);
if (ret)
return ret;
ret = rtl8366rb_setup_cascaded_irq(priv);
if (ret)
dev_info(priv->dev, "no interrupt support\n");
if (priv->setup_interface) {
ret = priv->setup_interface(ds);
if (ret) {
dev_err(priv->dev, "could not set up MDIO bus\n");
return -ENODEV;
}
}
return 0;
}
static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
/* This switch uses the 4 byte protocol A Realtek DSA tag */
return DSA_TAG_PROTO_RTL4_A;
}
static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
unsigned long *interfaces = config->supported_interfaces;
struct realtek_priv *priv = ds->priv;
if (port == priv->cpu_port) {
__set_bit(PHY_INTERFACE_MODE_MII, interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
/* REVMII only supports 100M FD */
__set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
/* RGMII only supports 1G FD */
phy_interface_set_rgmii(interfaces);
config->mac_capabilities = MAC_1000 | MAC_100 |
MAC_SYM_PAUSE;
} else {
/* RSGMII port, but we don't have that, and we don't
* specify in DT, so phylib uses the default of GMII
*/
__set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
config->mac_capabilities = MAC_1000 | MAC_100 | MAC_10 |
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
}
}
static void
rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct realtek_priv *priv = ds->priv;
int ret;
if (port != priv->cpu_port)
return;
dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
return;
}
ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
if (ret) {
dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
/* Enable the CPU port */
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret) {
dev_err(priv->dev, "failed to enable the CPU port\n");
return;
}
}
static void
rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct realtek_priv *priv = ds->priv;
int ret;
if (port != priv->cpu_port)
return;
dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
/* Disable the CPU port */
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret) {
dev_err(priv->dev, "failed to disable the CPU port\n");
return;
}
}
static void rb8366rb_set_port_led(struct realtek_priv *priv,
int port, bool enable)
{
u16 val = enable ? 0x3f : 0;
int ret;
if (priv->leds_disabled)
return;
switch (port) {
case 0:
ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F, val);
break;
case 1:
ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F << RTL8366RB_LED_1_OFFSET,
val << RTL8366RB_LED_1_OFFSET);
break;
case 2:
ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F, val);
break;
case 3:
ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F << RTL8366RB_LED_3_OFFSET,
val << RTL8366RB_LED_3_OFFSET);
break;
case 4:
ret = regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
enable ? RTL8366RB_P4_RGMII_LED : 0);
break;
default:
dev_err(priv->dev, "no LED for port %d\n", port);
return;
}
if (ret)
dev_err(priv->dev, "error updating LED on port %d\n", port);
}
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct realtek_priv *priv = ds->priv;
int ret;
dev_dbg(priv->dev, "enable port %d\n", port);
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret)
return ret;
rb8366rb_set_port_led(priv, port, true);
return 0;
}
static void
rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
struct realtek_priv *priv = ds->priv;
int ret;
dev_dbg(priv->dev, "disable port %d\n", port);
ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret)
return;
rb8366rb_set_port_led(priv, port, false);
}
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
/* Loop over all other ports than the current one */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
/* Current port handled last */
if (i == port)
continue;
/* Not on this bridge */
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
if (ret)
dev_err(priv->dev, "failed to join port %d\n", port);
port_bitmap |= BIT(i);
}
/* Set the bits for the ports we can access */
return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
}
static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
/* Loop over all other ports than this one */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
/* Current port handled last */
if (i == port)
continue;
/* Not on this bridge */
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
if (ret)
dev_err(priv->dev, "failed to leave port %d\n", port);
port_bitmap |= BIT(i);
}
/* Clear the bits for the ports we can not access, leave ourselves */
regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
}
/**
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
* @priv: SMI state container
* @port: the port to drop untagged and C-tagged frames on
* @drop: whether to drop or pass untagged and C-tagged frames
*
* Return: zero for success, a negative number on error.
*/
static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
{
return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
}
static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
int ret;
rb = priv->chip_data;
dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
vlan_filtering ? "enable" : "disable");
/* If the port is not in the member set, the frame will be dropped */
ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
BIT(port), vlan_filtering ? BIT(port) : 0);
if (ret)
return ret;
/* If VLAN filtering is enabled and PVID is also enabled, we must
* not drop any untagged or C-tagged frames. If we turn off VLAN
* filtering on a port, we need to accept any frames.
*/
if (vlan_filtering)
ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
else
ret = rtl8366rb_drop_untagged(priv, port, false);
return ret;
}
static int
rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
/* We support enabling/disabling learning */
if (flags.mask & ~(BR_LEARNING))
return -EINVAL;
return 0;
}
static int
rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct realtek_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
BIT(port),
(flags.val & BR_LEARNING) ? 0 : BIT(port));
if (ret)
return ret;
}
return 0;
}
static void
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct realtek_priv *priv = ds->priv;
u32 val;
int i;
switch (state) {
case BR_STATE_DISABLED:
val = RTL8366RB_STP_STATE_DISABLED;
break;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
val = RTL8366RB_STP_STATE_BLOCKING;
break;
case BR_STATE_LEARNING:
val = RTL8366RB_STP_STATE_LEARNING;
break;
case BR_STATE_FORWARDING:
val = RTL8366RB_STP_STATE_FORWARDING;
break;
default:
dev_err(priv->dev, "unknown bridge state requested\n");
return;
}
/* Set the same status for the port on all the FIDs */
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
RTL8366RB_STP_STATE_MASK(port),
RTL8366RB_STP_STATE(port, val));
}
}
static void
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
{
struct realtek_priv *priv = ds->priv;
/* This will age out any learned L2 entries */
regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), BIT(port));
/* Restore the normal state of things */
regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), 0);
}
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
unsigned int max_mtu;
u32 len;
int i;
/* Cache the per-port MTU setting */
rb = priv->chip_data;
rb->max_mtu[port] = new_mtu;
/* Roof out the MTU for the entire switch to the greatest
* common denominator: the biggest set for any one port will
* be the biggest MTU for the switch.
*
* The first setting, 1522 bytes, is max IP packet 1500 bytes,
* plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
* This function should consider the parameter an SDU, so the
* MTU passed for this setting is 1518 bytes. The same logic
* of subtracting the DSA tag of 4 bytes apply to the other
* settings.
*/
max_mtu = 1518;
for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
if (rb->max_mtu[i] > max_mtu)
max_mtu = rb->max_mtu[i];
}
if (max_mtu <= 1518)
len = RTL8366RB_SGCR_MAX_LENGTH_1522;
else if (max_mtu > 1518 && max_mtu <= 1532)
len = RTL8366RB_SGCR_MAX_LENGTH_1536;
else if (max_mtu > 1532 && max_mtu <= 1548)
len = RTL8366RB_SGCR_MAX_LENGTH_1552;
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
len);
}
static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
{
/* The max MTU is 16000 bytes, so we subtract the CPU tag
* and the max presented to the system is 15996 bytes.
*/
return 15996;
}
static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
int ret;
int i;
memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
if (vid >= RTL8366RB_NUM_VIDS)
return -EINVAL;
/* write VID */
ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
vid & RTL8366RB_VLAN_VID_MASK);
if (ret)
return ret;
/* write table access control word */
ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_READ_CTRL);
if (ret)
return ret;
for (i = 0; i < 3; i++) {
ret = regmap_read(priv->map,
RTL8366RB_VLAN_TABLE_READ_BASE + i,
&data[i]);
if (ret)
return ret;
}
vlan4k->vid = vid;
vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
RTL8366RB_VLAN_UNTAG_MASK;
vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
return 0;
}
static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
int ret;
int i;
if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
vlan4k->fid > RTL8366RB_FIDMAX)
return -EINVAL;
data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
RTL8366RB_VLAN_UNTAG_SHIFT);
data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
ret = regmap_write(priv->map,
RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
data[i]);
if (ret)
return ret;
}
/* write table access control word */
ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_WRITE_CTRL);
return ret;
}
static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
int ret;
int i;
memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
if (index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
for (i = 0; i < 3; i++) {
ret = regmap_read(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
&data[i]);
if (ret)
return ret;
}
vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
RTL8366RB_VLAN_PRIORITY_MASK;
vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
RTL8366RB_VLAN_UNTAG_MASK;
vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
return 0;
}
static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
int ret;
int i;
if (index >= RTL8366RB_NUM_VLANS ||
vlanmc->vid >= RTL8366RB_NUM_VIDS ||
vlanmc->priority > RTL8366RB_PRIORITYMAX ||
vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
vlanmc->fid > RTL8366RB_FIDMAX)
return -EINVAL;
data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
RTL8366RB_VLAN_PRIORITY_SHIFT);
data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
RTL8366RB_VLAN_UNTAG_SHIFT);
data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
ret = regmap_write(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
data[i]);
if (ret)
return ret;
}
return 0;
}
static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
{
u32 data;
int ret;
if (port >= priv->num_ports)
return -EINVAL;
ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
&data);
if (ret)
return ret;
*val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
RTL8366RB_PORT_VLAN_CTRL_MASK;
return 0;
}
static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
rb = priv->chip_data;
pvid_enabled = !!index;
if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
if (ret)
return ret;
rb->pvid_enabled[port] = pvid_enabled;
/* If VLAN filtering is enabled and PVID is also enabled, we must
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
}
static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
{
unsigned int max = RTL8366RB_NUM_VLANS - 1;
if (priv->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
if (vlan > max)
return false;
return true;
}
static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
}
static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
}
static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 val;
u32 reg;
int ret;
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
mutex_lock(&priv->map_lock);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
ret = regmap_write(priv->map_nolock, reg, 0);
if (ret) {
dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
goto out;
}
ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG,
&val);
if (ret)
goto out;
ret = val;
dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
out:
mutex_unlock(&priv->map_lock);
return ret;
}
static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 reg;
int ret;
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
mutex_lock(&priv->map_lock);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
ret = regmap_write(priv->map_nolock, reg, val);
if (ret)
goto out;
out:
mutex_unlock(&priv->map_lock);
return ret;
}
static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
return rtl8366rb_phy_read(ds->priv, phy, regnum);
}
static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
u16 val)
{
return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
}
static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
u32 val;
int ret;
priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
RTL8366RB_CHIP_CTRL_RESET_HW);
do {
usleep_range(20000, 25000);
ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
if (ret)
return ret;
if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
break;
} while (--timeout);
if (!timeout) {
dev_err(priv->dev, "timeout waiting for the switch to reset\n");
return -EIO;
}
return 0;
}
static int rtl8366rb_detect(struct realtek_priv *priv)
{
struct device *dev = priv->dev;
int ret;
u32 val;
/* Detect device */
ret = regmap_read(priv->map, 0x5c, &val);
if (ret) {
dev_err(dev, "can't get chip ID (%d)\n", ret);
return ret;
}
switch (val) {
case 0x6027:
dev_info(dev, "found an RTL8366S switch\n");
dev_err(dev, "this switch is not yet supported, submit patches!\n");
return -ENODEV;
case 0x5937:
dev_info(dev, "found an RTL8366RB switch\n");
priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
priv->num_ports = RTL8366RB_NUM_PORTS;
priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
priv->mib_counters = rtl8366rb_mib_counters;
priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
break;
default:
dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
val);
break;
}
ret = rtl8366rb_reset_chip(priv);
if (ret)
return ret;
return 0;
}
static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
.phylink_mac_link_up = rtl8366rb_mac_link_up,
.phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
.port_bridge_join = rtl8366rb_port_bridge_join,
.port_bridge_leave = rtl8366rb_port_bridge_leave,
.port_vlan_filtering = rtl8366rb_vlan_filtering,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
.port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
.port_bridge_flags = rtl8366rb_port_bridge_flags,
.port_stp_state_set = rtl8366rb_port_stp_state_set,
.port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
};
static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phy_read = rtl8366rb_dsa_phy_read,
.phy_write = rtl8366rb_dsa_phy_write,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
.phylink_mac_link_up = rtl8366rb_mac_link_up,
.phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
.port_bridge_join = rtl8366rb_port_bridge_join,
.port_bridge_leave = rtl8366rb_port_bridge_leave,
.port_vlan_filtering = rtl8366rb_vlan_filtering,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
.port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
.port_bridge_flags = rtl8366rb_port_bridge_flags,
.port_stp_state_set = rtl8366rb_port_stp_state_set,
.port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
};
static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
.set_vlan_mc = rtl8366rb_set_vlan_mc,
.get_vlan_4k = rtl8366rb_get_vlan_4k,
.set_vlan_4k = rtl8366rb_set_vlan_4k,
.get_mc_index = rtl8366rb_get_mc_index,
.set_mc_index = rtl8366rb_set_mc_index,
.get_mib_counter = rtl8366rb_get_mib_counter,
.is_vlan_valid = rtl8366rb_is_vlan_valid,
.enable_vlan = rtl8366rb_enable_vlan,
.enable_vlan4k = rtl8366rb_enable_vlan4k,
.phy_read = rtl8366rb_phy_read,
.phy_write = rtl8366rb_phy_write,
};
const struct realtek_variant rtl8366rb_variant = {
.ds_ops_smi = &rtl8366rb_switch_ops_smi,
.ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
.ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/realtek/rtl8366rb.c |
// SPDX-License-Identifier: GPL-2.0
/* Realtek SMI library helpers for the RTL8366x variants
* RTL8366RB and RTL8366S
*
* Copyright (C) 2017 Linus Walleij <[email protected]>
* Copyright (C) 2009-2010 Gabor Juhos <[email protected]>
* Copyright (C) 2010 Antti Seppälä <[email protected]>
* Copyright (C) 2010 Roman Yeryomin <[email protected]>
* Copyright (C) 2011 Colin Leitner <[email protected]>
*/
#include <linux/if_bridge.h>
#include <net/dsa.h>
#include "realtek.h"
int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
{
int ret;
int i;
*used = 0;
for (i = 0; i < priv->num_ports; i++) {
int index = 0;
ret = priv->ops->get_mc_index(priv, i, &index);
if (ret)
return ret;
if (mc_index == index) {
*used = 1;
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
* @priv: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
int ret;
int i;
/* Try to find an existing member config entry for this VID */
for (i = 0; i < priv->num_vlan_mc; i++) {
ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vid == vlanmc->vid)
return i;
}
/* We have no MC entry for this VID, try to find an empty one */
for (i = 0; i < priv->num_vlan_mc; i++) {
ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret) {
dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
vlanmc->vid = vid;
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
for (i = 0; i < priv->num_vlan_mc; i++) {
int used;
ret = rtl8366_mc_is_used(priv, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlanmc->vid = vid;
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
dev_err(priv->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
struct rtl8366_vlan_4k vlan4k;
int mc;
int ret;
if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
dev_dbg(priv->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag);
/* Update the 4K table */
ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlan4k.member |= member;
vlan4k.untag |= untag;
vlan4k.fid = fid;
ret = priv->ops->set_vlan_4k(priv, &vlan4k);
if (ret)
return ret;
dev_dbg(priv->dev,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
/* Find or allocate a member config for this VID */
ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
/* Update the MC entry */
vlanmc.member |= member;
vlanmc.untag |= untag;
vlanmc.fid = fid;
/* Commit updates to the MC entry */
ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
if (ret)
dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
mc, vid);
else
dev_dbg(priv->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
int mc;
int ret;
if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
/* Find or allocate a member config for this VID */
ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
ret = priv->ops->set_mc_index(priv, port, mc);
if (ret) {
dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
}
dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
port, vid, mc);
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
int ret;
/* To enable 4k VLAN, ordinary VLAN must be enabled first,
* but if we disable 4k VLAN it is fine to leave ordinary
* VLAN enabled.
*/
if (enable) {
/* Make sure VLAN is ON */
ret = priv->ops->enable_vlan(priv, true);
if (ret)
return ret;
priv->vlan_enabled = true;
}
ret = priv->ops->enable_vlan4k(priv, enable);
if (ret)
return ret;
priv->vlan4k_enabled = enable;
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
int ret;
ret = priv->ops->enable_vlan(priv, enable);
if (ret)
return ret;
priv->vlan_enabled = enable;
/* If we turn VLAN off, make sure that we turn off
* 4k VLAN as well, if that happened to be on.
*/
if (!enable) {
priv->vlan4k_enabled = false;
ret = priv->ops->enable_vlan4k(priv, false);
}
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int i;
rtl8366_enable_vlan(priv, false);
rtl8366_enable_vlan4k(priv, false);
/* Clear the 16 VLAN member configurations */
vlanmc.vid = 0;
vlanmc.priority = 0;
vlanmc.member = 0;
vlanmc.untag = 0;
vlanmc.fid = 0;
for (i = 0; i < priv->num_vlan_mc; i++) {
ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
struct realtek_priv *priv = ds->priv;
u32 member = 0;
u32 untag = 0;
int ret;
if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
return -EINVAL;
}
/* Enable VLAN in the hardware
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
ret = rtl8366_enable_vlan4k(priv, true);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
return ret;
}
dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
member |= BIT(port);
if (untagged)
untag |= BIT(port);
ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
if (ret) {
dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
return ret;
}
if (!pvid)
return 0;
ret = rtl8366_set_pvid(priv, port, vlan->vid);
if (ret) {
dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
port, vlan->vid);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct realtek_priv *priv = ds->priv;
int ret, i;
dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
for (i = 0; i < priv->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
if (vlan->vid == vlanmc.vid) {
/* Remove this port from the VLAN */
vlanmc.member &= ~BIT(port);
vlanmc.untag &= ~BIT(port);
/*
* If no ports are members of this VLAN
* anymore then clear the whole member
* config so it can be reused.
*/
if (!vlanmc.member) {
vlanmc.vid = 0;
vlanmc.priority = 0;
vlanmc.fid = 0;
}
ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret) {
dev_err(priv->dev,
"failed to remove VLAN %04x\n",
vlan->vid);
return ret;
}
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
struct realtek_priv *priv = ds->priv;
struct rtl8366_mib_counter *mib;
int i;
if (port >= priv->num_ports)
return;
for (i = 0; i < priv->num_mib_counters; i++) {
mib = &priv->mib_counters[i];
strncpy(data + i * ETH_GSTRING_LEN,
mib->name, ETH_GSTRING_LEN);
}
}
EXPORT_SYMBOL_GPL(rtl8366_get_strings);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct realtek_priv *priv = ds->priv;
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
if (port >= priv->num_ports)
return -EINVAL;
return priv->num_mib_counters;
}
EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
struct realtek_priv *priv = ds->priv;
int i;
int ret;
if (port >= priv->num_ports)
return;
for (i = 0; i < priv->num_mib_counters; i++) {
struct rtl8366_mib_counter *mib;
u64 mibvalue = 0;
mib = &priv->mib_counters[i];
ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
if (ret) {
dev_err(priv->dev, "error reading MIB counter %s\n",
mib->name);
}
data[i] = mibvalue;
}
}
EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
| linux-master | drivers/net/dsa/realtek/rtl8366-core.c |
// SPDX-License-Identifier: GPL-2.0+
/* Realtek MDIO interface driver
*
* ASICs we intend to support with this driver:
*
* RTL8366 - The original version, apparently
* RTL8369 - Similar enough to have the same datsheet as RTL8366
* RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
* different register layout from the other two
* RTL8366S - Is this "RTL8366 super"?
* RTL8367 - Has an OpenWRT driver as well
* RTL8368S - Seems to be an alternative name for RTL8366RB
* RTL8370 - Also uses SMI
*
* Copyright (C) 2017 Linus Walleij <[email protected]>
* Copyright (C) 2010 Antti Seppälä <[email protected]>
* Copyright (C) 2010 Roman Yeryomin <[email protected]>
* Copyright (C) 2011 Colin Leitner <[email protected]>
* Copyright (C) 2009-2010 Gabor Juhos <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/overflow.h>
#include <linux/regmap.h>
#include "realtek.h"
/* Read/write via mdiobus */
#define REALTEK_MDIO_CTRL0_REG 31
#define REALTEK_MDIO_START_REG 29
#define REALTEK_MDIO_CTRL1_REG 21
#define REALTEK_MDIO_ADDRESS_REG 23
#define REALTEK_MDIO_DATA_WRITE_REG 24
#define REALTEK_MDIO_DATA_READ_REG 25
#define REALTEK_MDIO_START_OP 0xFFFF
#define REALTEK_MDIO_ADDR_OP 0x000E
#define REALTEK_MDIO_READ_OP 0x0001
#define REALTEK_MDIO_WRITE_OP 0x0003
static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
{
struct realtek_priv *priv = ctx;
struct mii_bus *bus = priv->bus;
int ret;
mutex_lock(&bus->mdio_lock);
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
if (ret)
goto out_unlock;
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
if (ret)
goto out_unlock;
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
if (ret)
goto out_unlock;
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
out_unlock:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
{
struct realtek_priv *priv = ctx;
struct mii_bus *bus = priv->bus;
int ret;
mutex_lock(&bus->mdio_lock);
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
if (ret)
goto out_unlock;
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
if (ret)
goto out_unlock;
ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
if (ret)
goto out_unlock;
ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
if (ret >= 0) {
*val = ret;
ret = 0;
}
out_unlock:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static void realtek_mdio_lock(void *ctx)
{
struct realtek_priv *priv = ctx;
mutex_lock(&priv->map_lock);
}
static void realtek_mdio_unlock(void *ctx)
{
struct realtek_priv *priv = ctx;
mutex_unlock(&priv->map_lock);
}
static const struct regmap_config realtek_mdio_regmap_config = {
.reg_bits = 10, /* A4..A0 R4..R0 */
.val_bits = 16,
.reg_stride = 1,
/* PHY regs are at 0x8000 */
.max_register = 0xffff,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.reg_read = realtek_mdio_read,
.reg_write = realtek_mdio_write,
.cache_type = REGCACHE_NONE,
.lock = realtek_mdio_lock,
.unlock = realtek_mdio_unlock,
};
static const struct regmap_config realtek_mdio_nolock_regmap_config = {
.reg_bits = 10, /* A4..A0 R4..R0 */
.val_bits = 16,
.reg_stride = 1,
/* PHY regs are at 0x8000 */
.max_register = 0xffff,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.reg_read = realtek_mdio_read,
.reg_write = realtek_mdio_write,
.cache_type = REGCACHE_NONE,
.disable_locking = true,
};
static int realtek_mdio_probe(struct mdio_device *mdiodev)
{
struct realtek_priv *priv;
struct device *dev = &mdiodev->dev;
const struct realtek_variant *var;
struct regmap_config rc;
struct device_node *np;
int ret;
var = of_device_get_match_data(dev);
if (!var)
return -EINVAL;
priv = devm_kzalloc(&mdiodev->dev,
size_add(sizeof(*priv), var->chip_data_sz),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->map_lock);
rc = realtek_mdio_regmap_config;
rc.lock_arg = priv;
priv->map = devm_regmap_init(dev, NULL, priv, &rc);
if (IS_ERR(priv->map)) {
ret = PTR_ERR(priv->map);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
rc = realtek_mdio_nolock_regmap_config;
priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
if (IS_ERR(priv->map_nolock)) {
ret = PTR_ERR(priv->map_nolock);
dev_err(dev, "regmap init failed: %d\n", ret);
return ret;
}
priv->mdio_addr = mdiodev->addr;
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
priv->chip_data = (void *)priv + sizeof(*priv);
priv->clk_delay = var->clk_delay;
priv->cmd_read = var->cmd_read;
priv->cmd_write = var->cmd_write;
priv->ops = var->ops;
priv->write_reg_noack = realtek_mdio_write;
np = dev->of_node;
dev_set_drvdata(dev, priv);
/* TODO: if power is software controlled, set up any regulators here */
priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
dev_err(dev, "failed to get RESET GPIO\n");
return PTR_ERR(priv->reset);
}
if (priv->reset) {
gpiod_set_value(priv->reset, 1);
dev_dbg(dev, "asserted RESET\n");
msleep(REALTEK_HW_STOP_DELAY);
gpiod_set_value(priv->reset, 0);
msleep(REALTEK_HW_START_DELAY);
dev_dbg(dev, "deasserted RESET\n");
}
ret = priv->ops->detect(priv);
if (ret) {
dev_err(dev, "unable to detect switch\n");
return ret;
}
priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = dev;
priv->ds->num_ports = priv->num_ports;
priv->ds->priv = priv;
priv->ds->ops = var->ds_ops_mdio;
ret = dsa_register_switch(priv->ds);
if (ret) {
dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
return ret;
}
return 0;
}
static void realtek_mdio_remove(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
dsa_unregister_switch(priv->ds);
/* leave the device reset asserted */
if (priv->reset)
gpiod_set_value(priv->reset, 1);
}
static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id realtek_mdio_of_match[] = {
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
{ .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
#endif
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{ .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
#endif
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
static struct mdio_driver realtek_mdio_driver = {
.mdiodrv.driver = {
.name = "realtek-mdio",
.of_match_table = realtek_mdio_of_match,
},
.probe = realtek_mdio_probe,
.remove = realtek_mdio_remove,
.shutdown = realtek_mdio_shutdown,
};
mdio_module_driver(realtek_mdio_driver);
MODULE_AUTHOR("Luiz Angelo Daros de Luca <[email protected]>");
MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/realtek/realtek-mdio.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright 2021-2022 Innovative Advantage Inc.
*/
#include <linux/mfd/ocelot.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <soc/mscc/ocelot.h>
#include <soc/mscc/vsc7514_regs.h>
#include "felix.h"
#define VSC7514_NUM_PORTS 11
#define OCELOT_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
OCELOT_PORT_MODE_QSGMII)
static const u32 vsc7512_port_modes[VSC7514_NUM_PORTS] = {
OCELOT_PORT_MODE_INTERNAL,
OCELOT_PORT_MODE_INTERNAL,
OCELOT_PORT_MODE_INTERNAL,
OCELOT_PORT_MODE_INTERNAL,
OCELOT_PORT_MODE_SERDES,
OCELOT_PORT_MODE_SERDES,
OCELOT_PORT_MODE_SERDES,
OCELOT_PORT_MODE_SERDES,
OCELOT_PORT_MODE_SERDES,
OCELOT_PORT_MODE_SGMII,
OCELOT_PORT_MODE_SERDES,
};
static const struct ocelot_ops ocelot_ext_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
.wm_dec = ocelot_wm_dec,
.wm_stat = ocelot_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
static const char * const vsc7512_resource_names[TARGET_MAX] = {
[SYS] = "sys",
[REW] = "rew",
[S0] = "s0",
[S1] = "s1",
[S2] = "s2",
[QS] = "qs",
[QSYS] = "qsys",
[ANA] = "ana",
};
static const struct felix_info vsc7512_info = {
.resource_names = vsc7512_resource_names,
.regfields = vsc7514_regfields,
.map = vsc7514_regmap,
.ops = &ocelot_ext_ops,
.vcap = vsc7514_vcap_props,
.num_mact_rows = 1024,
.num_ports = VSC7514_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.port_modes = vsc7512_port_modes,
.phylink_mac_config = ocelot_phylink_mac_config,
.configure_serdes = ocelot_port_configure_serdes,
};
static int ocelot_ext_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dsa_switch *ds;
struct ocelot *ocelot;
struct felix *felix;
int err;
felix = kzalloc(sizeof(*felix), GFP_KERNEL);
if (!felix)
return -ENOMEM;
dev_set_drvdata(dev, felix);
ocelot = &felix->ocelot;
ocelot->dev = dev;
ocelot->num_flooding_pgids = 1;
felix->info = &vsc7512_info;
ds = kzalloc(sizeof(*ds), GFP_KERNEL);
if (!ds) {
err = -ENOMEM;
dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
goto err_free_felix;
}
ds->dev = dev;
ds->num_ports = felix->info->num_ports;
ds->num_tx_queues = felix->info->num_tx_queues;
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
felix->tag_proto = DSA_TAG_PROTO_OCELOT;
err = dsa_register_switch(ds);
if (err) {
dev_err_probe(dev, err, "Failed to register DSA switch\n");
goto err_free_ds;
}
return 0;
err_free_ds:
kfree(ds);
err_free_felix:
kfree(felix);
return err;
}
static int ocelot_ext_remove(struct platform_device *pdev)
{
struct felix *felix = dev_get_drvdata(&pdev->dev);
if (!felix)
return 0;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
return 0;
}
static void ocelot_ext_shutdown(struct platform_device *pdev)
{
struct felix *felix = dev_get_drvdata(&pdev->dev);
if (!felix)
return;
dsa_switch_shutdown(felix->ds);
dev_set_drvdata(&pdev->dev, NULL);
}
static const struct of_device_id ocelot_ext_switch_of_match[] = {
{ .compatible = "mscc,vsc7512-switch" },
{ },
};
MODULE_DEVICE_TABLE(of, ocelot_ext_switch_of_match);
static struct platform_driver ocelot_ext_switch_driver = {
.driver = {
.name = "ocelot-ext-switch",
.of_match_table = ocelot_ext_switch_of_match,
},
.probe = ocelot_ext_probe,
.remove = ocelot_ext_remove,
.shutdown = ocelot_ext_shutdown,
};
module_platform_driver(ocelot_ext_switch_driver);
MODULE_DESCRIPTION("External Ocelot Switch driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(MFD_OCELOT);
| linux-master | drivers/net/dsa/ocelot/ocelot_ext.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019-2021 NXP
*
* This is an umbrella module for all network switches that are
* register-compatible with Ocelot and that perform I/O to their host CPU
* through an NPI (Node Processor Interface) Ethernet port.
*/
#include <uapi/linux/if_bridge.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/8021q.h>
#include <linux/dsa/ocelot.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
/* Translate the DSA database API into the ocelot switch library API,
* which uses VID 0 for all ports that aren't part of a bridge,
* and expects the bridge_dev to be NULL in that case.
*/
static struct net_device *felix_classify_db(struct dsa_db db)
{
switch (db.type) {
case DSA_DB_PORT:
case DSA_DB_LAG:
return NULL;
case DSA_DB_BRIDGE:
return db.bridge.dev;
default:
return ERR_PTR(-EOPNOTSUPP);
}
}
static int felix_cpu_port_for_master(struct dsa_switch *ds,
struct net_device *master)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *cpu_dp;
int lag;
if (netif_is_lag_master(master)) {
mutex_lock(&ocelot->fwd_domain_lock);
lag = ocelot_bond_get_id(ocelot, master);
mutex_unlock(&ocelot->fwd_domain_lock);
return lag;
}
cpu_dp = master->dsa_ptr;
return cpu_dp->index;
}
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
int upstream, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = ds->priv;
unsigned long cookie;
int key_length, err;
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
GFP_KERNEL);
if (!outer_tagging_rule)
return -ENOMEM;
cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
outer_tagging_rule->id.cookie = cookie;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
outer_tagging_rule->lookup = 0;
outer_tagging_rule->ingress_port.value = port;
outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
outer_tagging_rule->egress_port.value = upstream;
outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
if (err)
kfree(outer_tagging_rule);
return err;
}
static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
int upstream, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot_vcap_block *block_vcap_es0;
struct ocelot *ocelot = ds->priv;
unsigned long cookie;
block_vcap_es0 = &ocelot->block[VCAP_ES0];
cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
cookie, false);
if (!outer_tagging_rule)
return -ENOENT;
return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
}
/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
* rules for steering those tagged packets towards the correct destination port
*/
static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
unsigned long cpu_ports = dsa_cpu_ports(ds);
struct ocelot *ocelot = ds->priv;
unsigned long cookie;
int err;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
return -ENOMEM;
redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!redirect_rule) {
kfree(untagging_rule);
return -ENOMEM;
}
cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
untagging_rule->ingress_port_mask = cpu_ports;
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
untagging_rule->id.cookie = cookie;
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
untagging_rule->lookup = 0;
untagging_rule->action.vlan_pop_cnt_ena = true;
untagging_rule->action.vlan_pop_cnt = 1;
untagging_rule->action.pag_override_mask = 0xff;
untagging_rule->action.pag_val = port;
err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
if (err) {
kfree(untagging_rule);
kfree(redirect_rule);
return err;
}
cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
redirect_rule->ingress_port_mask = cpu_ports;
redirect_rule->pag = port;
redirect_rule->prio = 1;
redirect_rule->id.cookie = cookie;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
redirect_rule->lookup = 0;
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
redirect_rule->action.port_mask = BIT(port);
err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
if (err) {
ocelot_vcap_filter_del(ocelot, untagging_rule);
kfree(redirect_rule);
return err;
}
return 0;
}
static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot *ocelot = ds->priv;
unsigned long cookie;
int err;
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
cookie, false);
if (!untagging_rule)
return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
return err;
cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
cookie, false);
if (!redirect_rule)
return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
struct dsa_port *cpu_dp;
int err;
/* tag_8021q.c assumes we are implementing this via port VLAN
* membership, which we aren't. So we don't need to add any VCAP filter
* for the CPU port.
*/
if (!dsa_is_user_port(ds, port))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
if (err)
return err;
}
err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
if (err)
goto add_tx_failed;
return 0;
add_tx_failed:
dsa_switch_for_each_cpu_port(cpu_dp, ds)
felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
return err;
}
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct dsa_port *cpu_dp;
int err;
if (!dsa_is_user_port(ds, port))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
if (err)
return err;
}
err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
if (err)
goto del_tx_failed;
return 0;
del_tx_failed:
dsa_switch_for_each_cpu_port(cpu_dp, ds)
felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
return err;
}
static int felix_trap_get_cpu_port(struct dsa_switch *ds,
const struct ocelot_vcap_filter *trap)
{
struct dsa_port *dp;
int first_port;
if (WARN_ON(!trap->ingress_port_mask))
return -1;
first_port = __ffs(trap->ingress_port_mask);
dp = dsa_to_port(ds, first_port);
return dp->cpu_dp->index;
}
/* On switches with no extraction IRQ wired, trapped packets need to be
* replicated over Ethernet as well, otherwise we'd get no notification of
* their arrival when using the ocelot-8021q tagging protocol.
*/
static int felix_update_trapping_destinations(struct dsa_switch *ds,
bool using_tag_8021q)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
bool cpu_copy_ena;
int err;
if (!felix->info->quirk_no_xtr_irq)
return 0;
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */
list_for_each_entry(trap, &block_vcap_is2->rules, list) {
if (!trap->is_trap)
continue;
/* Figure out the current trapping destination */
if (using_tag_8021q) {
/* Redirect to the tag_8021q CPU port. If timestamps
* are necessary, also copy trapped packets to the CPU
* port module.
*/
mask_mode = OCELOT_MASK_MODE_REDIRECT;
port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
cpu_copy_ena = !!trap->take_ts;
} else {
/* Trap packets only to the CPU port module, which is
* redirected to the NPI port (the DSA CPU port)
*/
mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
port_mask = 0;
cpu_copy_ena = true;
}
if (trap->action.mask_mode == mask_mode &&
trap->action.port_mask == port_mask &&
trap->action.cpu_copy_ena == cpu_copy_ena)
continue;
trap->action.mask_mode = mask_mode;
trap->action.port_mask = port_mask;
trap->action.cpu_copy_ena = cpu_copy_ena;
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err)
return err;
}
return 0;
}
/* The CPU port module is connected to the Node Processor Interface (NPI). This
* is the mode through which frames can be injected from and extracted to an
* external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
* running Linux, and this forms a DSA setup together with the enetc or fman
* DSA master.
*/
static void felix_npi_port_init(struct ocelot *ocelot, int port)
{
ocelot->npi = port;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
QSYS_EXT_CPU_CFG);
/* NPI port Injection/Extraction configuration */
ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
ocelot->npi_xtr_prefix);
ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
ocelot->npi_inj_prefix);
/* Disable transmission of pause frames */
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
}
static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
{
/* Restore hardware defaults */
int unused_port = ocelot->num_phys_ports + 2;
ocelot->npi = -1;
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
QSYS_EXT_CPU_CFG);
ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
OCELOT_TAG_PREFIX_DISABLED);
ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
OCELOT_TAG_PREFIX_DISABLED);
/* Enable transmission of pause frames */
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
}
static int felix_tag_npi_setup(struct dsa_switch *ds)
{
struct dsa_port *dp, *first_cpu_dp = NULL;
struct ocelot *ocelot = ds->priv;
dsa_switch_for_each_user_port(dp, ds) {
if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
dev_err(ds->dev, "Multiple NPI ports not supported\n");
return -EINVAL;
}
first_cpu_dp = dp->cpu_dp;
}
if (!first_cpu_dp)
return -EINVAL;
felix_npi_port_init(ocelot, first_cpu_dp->index);
return 0;
}
static void felix_tag_npi_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
felix_npi_port_deinit(ocelot, ocelot->npi);
}
static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
return BIT(ocelot->num_phys_ports);
}
static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
struct net_device *master,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct ocelot *ocelot = ds->priv;
if (netif_is_lag_master(master)) {
NL_SET_ERR_MSG_MOD(extack,
"LAG DSA master only supported using ocelot-8021q");
return -EOPNOTSUPP;
}
/* Changing the NPI port breaks user ports still assigned to the old
* one, so only allow it while they're down, and don't allow them to
* come back up until they're all changed to the new one.
*/
dsa_switch_for_each_user_port(other_dp, ds) {
struct net_device *slave = other_dp->slave;
if (other_dp != dp && (slave->flags & IFF_UP) &&
dsa_port_to_master(other_dp) != master) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot change while old master still has users");
return -EOPNOTSUPP;
}
}
felix_npi_port_deinit(ocelot, ocelot->npi);
felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
return 0;
}
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
* concerned, it thinks it is a "dumb switch" - the queues of the CPU port
* module are now disconnected from it, but can still be accessed through
* register-based MMIO.
*/
static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
.change_master = felix_tag_npi_change_master,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *dp;
int err;
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
dsa_switch_for_each_cpu_port(dp, ds)
ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
dp->cpu_dp->index);
dsa_switch_for_each_available_port(dp, ds)
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
* - When these packets are injected from the tag_8021q
* CPU port, we want them to go out, not loop back
* into the system.
* - STP traffic ingressing on a user port should go to
* the tag_8021q CPU port, not to the hardware CPU
* port module.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
/* The ownership of the CPU port module's queues might have just been
* transferred to the tag_8021q tagger from the NPI-based tagger.
* So there might still be all sorts of crap in the queues. On the
* other hand, the MMIO-based matching of PTP frames is very brittle,
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
ocelot_drain_cpu_queue(ocelot, 0);
return 0;
}
static void felix_tag_8021q_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *dp;
dsa_switch_for_each_available_port(dp, ds)
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
dp->index);
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
dsa_switch_for_each_cpu_port(dp, ds)
ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
dsa_tag_8021q_unregister(ds);
}
static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
{
return dsa_cpu_ports(ds);
}
static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
struct net_device *master,
struct netlink_ext_ack *extack)
{
int cpu = felix_cpu_port_for_master(ds, master);
struct ocelot *ocelot = ds->priv;
ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
return felix_update_trapping_destinations(ds, true);
}
static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
.change_master = felix_tag_8021q_change_master,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
bool uc, bool mc, bool bc)
{
struct ocelot *ocelot = ds->priv;
unsigned long val;
val = uc ? mask : 0;
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
val = mc ? mask : 0;
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
val = bc ? mask : 0;
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
}
static void
felix_migrate_host_flood(struct dsa_switch *ds,
const struct felix_tag_proto_ops *proto_ops,
const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long mask;
if (old_proto_ops) {
mask = old_proto_ops->get_host_fwd_mask(ds);
felix_set_host_flood(ds, mask, false, false, false);
}
mask = proto_ops->get_host_fwd_mask(ds);
felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
!!felix->host_flood_mc_mask, true);
}
static int felix_migrate_mdbs(struct dsa_switch *ds,
const struct felix_tag_proto_ops *proto_ops,
const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
unsigned long from, to;
if (!old_proto_ops)
return 0;
from = old_proto_ops->get_host_fwd_mask(ds);
to = proto_ops->get_host_fwd_mask(ds);
return ocelot_migrate_mdbs(ocelot, from, to);
}
/* Configure the shared hardware resources for a transition between
* @old_proto_ops and @proto_ops.
* Manual migration is needed because as far as DSA is concerned, no change of
* the CPU port is taking place here, just of the tagging protocol.
*/
static int
felix_tag_proto_setup_shared(struct dsa_switch *ds,
const struct felix_tag_proto_ops *proto_ops,
const struct felix_tag_proto_ops *old_proto_ops)
{
bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
int err;
err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
if (err)
return err;
felix_update_trapping_destinations(ds, using_tag_8021q);
felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
return 0;
}
/* This always leaves the switch in a consistent state, because although the
* tag_8021q setup can fail, the NPI setup can't. So either the change is made,
* or the restoration is guaranteed to work.
*/
static int felix_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int err;
switch (proto) {
case DSA_TAG_PROTO_SEVILLE:
case DSA_TAG_PROTO_OCELOT:
proto_ops = &felix_tag_npi_proto_ops;
break;
case DSA_TAG_PROTO_OCELOT_8021Q:
proto_ops = &felix_tag_8021q_proto_ops;
break;
default:
return -EPROTONOSUPPORT;
}
old_proto_ops = felix->tag_proto_ops;
if (proto_ops == old_proto_ops)
return 0;
err = proto_ops->setup(ds);
if (err)
goto setup_failed;
err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
if (err)
goto setup_shared_failed;
if (old_proto_ops)
old_proto_ops->teardown(ds);
felix->tag_proto_ops = proto_ops;
felix->tag_proto = proto;
return 0;
setup_shared_failed:
proto_ops->teardown(ds);
setup_failed:
return err;
}
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
return felix->tag_proto;
}
static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
bool uc, bool mc)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long mask;
if (uc)
felix->host_flood_uc_mask |= BIT(port);
else
felix->host_flood_uc_mask &= ~BIT(port);
if (mc)
felix->host_flood_mc_mask |= BIT(port);
else
felix->host_flood_mc_mask &= ~BIT(port);
mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
!!felix->host_flood_mc_mask, true);
}
static int felix_port_change_master(struct dsa_switch *ds, int port,
struct net_device *master,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
return felix->tag_proto_ops->change_master(ds, port, master, extack);
}
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
struct ocelot *ocelot = ds->priv;
ocelot_set_ageing_time(ocelot, ageing_time);
return 0;
}
static void felix_port_fast_age(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
int err;
err = ocelot_mact_flush(ocelot, port);
if (err)
dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
port, ERR_PTR(err));
}
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct ocelot *ocelot = ds->priv;
return ocelot_fdb_dump(ocelot, port, cb, data);
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
if (dsa_port_is_cpu(dp) && !bridge_dev &&
dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
return 0;
if (dsa_port_is_cpu(dp))
port = PGID_CPU;
return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
if (dsa_port_is_cpu(dp) && !bridge_dev &&
dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
return 0;
if (dsa_port_is_cpu(dp))
port = PGID_CPU;
return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
}
static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
dsa_mdb_present_in_other_db(ds, port, mdb, db))
return 0;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
dsa_mdb_present_in_other_db(ds, port, mdb, db))
return 0;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
struct ocelot *ocelot = ds->priv;
return ocelot_bridge_stp_state_set(ocelot, port, state);
}
static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags val,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_pre_bridge_flags(ocelot, port, val);
}
static int felix_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags val,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
ocelot_port_bridge_flags(ocelot, port, val);
return 0;
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_bridge_leave(ocelot, port, bridge.dev);
}
static int felix_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
int err;
err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
if (err)
return err;
/* Update the logical LAG port that serves as tag_8021q CPU port */
if (!dsa_is_cpu_port(ds, port))
return 0;
return felix_port_change_master(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_lag_leave(ocelot, port, lag.dev);
/* Update the logical LAG port that serves as tag_8021q CPU port */
if (!dsa_is_cpu_port(ds, port))
return 0;
return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled);
return 0;
}
static int felix_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
u16 flags = vlan->flags;
/* Ocelot switches copy frames as-is to the CPU, so the flags:
* egress-untagged or not, pvid or not, make no difference. This
* behavior is already better than what DSA just tries to approximate
* when it installs the VLAN with the same flags on the CPU port.
* Just accept any configuration, and don't let ocelot deny installing
* multiple native VLANs on the NPI port, because the switch doesn't
* look at the port tag settings towards the NPI interface anyway.
*/
if (port == ocelot->npi)
return 0;
return ocelot_vlan_prepare(ocelot, port, vlan->vid,
flags & BRIDGE_VLAN_INFO_PVID,
flags & BRIDGE_VLAN_INFO_UNTAGGED,
extack);
}
static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
}
static int felix_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
u16 flags = vlan->flags;
int err;
err = felix_vlan_prepare(ds, port, vlan, extack);
if (err)
return err;
return ocelot_vlan_add(ocelot, port, vlan->vid,
flags & BRIDGE_VLAN_INFO_PVID,
flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
static int felix_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct ocelot *ocelot = ds->priv;
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD;
__set_bit(ocelot->ports[port]->phy_mode,
config->supported_interfaces);
}
static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
if (felix->info->phylink_mac_config)
felix->info->phylink_mac_config(ocelot, port, mode, state);
}
static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
int port,
phy_interface_t iface)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct phylink_pcs *pcs = NULL;
if (felix->pcs && felix->pcs[port])
pcs = felix->pcs[port];
return pcs;
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
phy_interface_t interface)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix;
felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
felix->info->quirks);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
felix->info->quirks);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
}
static int felix_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
if (!dsa_port_is_user(dp))
return 0;
if (ocelot->npi >= 0) {
struct net_device *master = dsa_port_to_master(dp);
if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
dev_err(ds->dev, "Multiple masters are not allowed\n");
return -EINVAL;
}
}
return 0;
}
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
ocelot_rmw_gix(ocelot,
ANA_PORT_QOS_CFG_QOS_PCP_ENA,
ANA_PORT_QOS_CFG_QOS_PCP_ENA,
ANA_PORT_QOS_CFG,
port);
for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
ocelot_rmw_ix(ocelot,
(ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
ANA_PORT_PCP_DEI_MAP,
port, i);
}
}
static void felix_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *stats)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_stats64(ocelot, port, stats);
}
static void felix_get_pause_stats(struct dsa_switch *ds, int port,
struct ethtool_pause_stats *pause_stats)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_pause_stats(ocelot, port, pause_stats);
}
static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
}
static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
}
static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_mac_stats *mac_stats)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
}
static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
}
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
struct ocelot *ocelot = ds->priv;
return ocelot_get_strings(ocelot, port, stringset, data);
}
static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
struct ocelot *ocelot = ds->priv;
ocelot_get_ethtool_stats(ocelot, port, data);
}
static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ocelot *ocelot = ds->priv;
return ocelot_get_sset_count(ocelot, port, sset);
}
static int felix_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct ocelot *ocelot = ds->priv;
return ocelot_get_ts_info(ocelot, port, info);
}
static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
[PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
[PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
[PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
[PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
[PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
[PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
};
static int felix_validate_phy_mode(struct felix *felix, int port,
phy_interface_t phy_mode)
{
u32 modes = felix->info->port_modes[port];
if (felix_phy_match_table[phy_mode] & modes)
return 0;
return -EOPNOTSUPP;
}
static int felix_parse_ports_node(struct felix *felix,
struct device_node *ports_node,
phy_interface_t *port_phy_modes)
{
struct device *dev = felix->ocelot.dev;
struct device_node *child;
for_each_available_child_of_node(ports_node, child) {
phy_interface_t phy_mode;
u32 port;
int err;
/* Get switch port number from DT */
if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
of_node_put(child);
return -ENODEV;
}
/* Get PHY mode from DT */
err = of_get_phy_mode(child, &phy_mode);
if (err) {
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
port);
of_node_put(child);
return -ENODEV;
}
err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
dev_info(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
/* Leave port_phy_modes[port] = 0, which is also
* PHY_INTERFACE_MODE_NA. This will perform a
* best-effort to bring up as many ports as possible.
*/
continue;
}
port_phy_modes[port] = phy_mode;
}
return 0;
}
static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
{
struct device *dev = felix->ocelot.dev;
struct device_node *switch_node;
struct device_node *ports_node;
int err;
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports");
if (!ports_node)
ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
return -ENODEV;
}
err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
of_node_put(ports_node);
return err;
}
static struct regmap *felix_request_regmap_by_name(struct felix *felix,
const char *resource_name)
{
struct ocelot *ocelot = &felix->ocelot;
struct resource res;
int i;
/* In an MFD configuration, regmaps are registered directly to the
* parent device before the child devices are probed, so there is no
* need to initialize a new one.
*/
if (!felix->info->resources)
return dev_get_regmap(ocelot->dev->parent, resource_name);
for (i = 0; i < felix->info->num_resources; i++) {
if (strcmp(resource_name, felix->info->resources[i].name))
continue;
memcpy(&res, &felix->info->resources[i], sizeof(res));
res.start += felix->switch_base;
res.end += felix->switch_base;
return ocelot_regmap_init(ocelot, &res);
}
return ERR_PTR(-ENOENT);
}
static struct regmap *felix_request_regmap(struct felix *felix,
enum ocelot_target target)
{
const char *resource_name = felix->info->resource_names[target];
/* If the driver didn't provide a resource name for the target,
* the resource is optional.
*/
if (!resource_name)
return NULL;
return felix_request_regmap_by_name(felix, resource_name);
}
static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
{
char resource_name[32];
sprintf(resource_name, "port%d", port);
return felix_request_regmap_by_name(felix, resource_name);
}
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
ocelot->map = felix->info->map;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->vcap_pol.base = felix->info->vcap_pol_base;
ocelot->vcap_pol.max = felix->info->vcap_pol_max;
ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
ocelot->ops = felix->info->ops;
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->devlink = felix->ds->devlink;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
if (!port_phy_modes)
return -ENOMEM;
err = felix_parse_dt(felix, port_phy_modes);
if (err) {
kfree(port_phy_modes);
return err;
}
for (i = 0; i < TARGET_MAX; i++) {
target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space: %pe\n",
target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
ocelot->targets[i] = target;
}
err = ocelot_regfields_init(ocelot, felix->info->regfields);
if (err) {
dev_err(ocelot->dev, "failed to init reg fields map\n");
kfree(port_phy_modes);
return err;
}
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
GFP_KERNEL);
if (!ocelot_port) {
dev_err(ocelot->dev,
"failed to allocate port memory\n");
kfree(port_phy_modes);
return -ENOMEM;
}
target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map memory space for port %d: %pe\n",
port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->target = target;
ocelot_port->index = port;
ocelot->ports[port] = ocelot_port;
}
kfree(port_phy_modes);
if (felix->info->mdio_bus_alloc) {
err = felix->info->mdio_bus_alloc(ocelot);
if (err < 0)
return err;
}
return 0;
}
static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
struct sk_buff *skb)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
struct sk_buff *skb_match = NULL, *skb_tmp;
unsigned long flags;
if (!clone)
return;
spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
if (skb != clone)
continue;
__skb_unlink(skb, &ocelot_port->tx_skbs);
skb_match = skb;
break;
}
spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
WARN_ONCE(!skb_match,
"Could not find skb clone in TX timestamping list\n");
}
#define work_to_xmit_work(w) \
container_of((w), struct felix_deferred_xmit_work, work)
static void felix_port_deferred_xmit(struct kthread_work *work)
{
struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
struct dsa_switch *ds = xmit_work->dp->ds;
struct sk_buff *skb = xmit_work->skb;
u32 rew_op = ocelot_ptp_rew_op(skb);
struct ocelot *ocelot = ds->priv;
int port = xmit_work->dp->index;
int retries = 10;
do {
if (ocelot_can_inject(ocelot, 0))
break;
cpu_relax();
} while (--retries);
if (!retries) {
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
kfree_skb(skb);
return;
}
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
consume_skb(skb);
kfree(xmit_work);
}
static int felix_connect_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct ocelot_8021q_tagger_data *tagger_data;
switch (proto) {
case DSA_TAG_PROTO_OCELOT_8021Q:
tagger_data = ocelot_8021q_tagger_data(ds);
tagger_data->xmit_work_fn = felix_port_deferred_xmit;
return 0;
case DSA_TAG_PROTO_OCELOT:
case DSA_TAG_PROTO_SEVILLE:
return 0;
default:
return -EPROTONOSUPPORT;
}
}
static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_port *dp;
int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
return err;
if (ocelot->targets[HSIO])
ocelot_pll5_init(ocelot);
err = ocelot_init(ocelot);
if (err)
goto out_mdiobus_free;
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
if (err) {
dev_err(ocelot->dev,
"Timestamp initialization failed\n");
ocelot->ptp = 0;
}
}
dsa_switch_for_each_available_port(dp, ds) {
ocelot_init_port(ocelot, dp->index);
if (felix->info->configure_serdes)
felix->info->configure_serdes(ocelot, dp->index,
dp->dn);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
/* The initial tag protocol is NPI which won't fail during initial
* setup, there's no real point in checking for errors.
*/
felix_change_tag_protocol(ds, felix->tag_proto);
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
ds->fdb_isolation = true;
ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
dsa_switch_for_each_available_port(dp, ds)
ocelot_deinit_port(ocelot, dp->index);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
out_mdiobus_free:
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
return err;
}
static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_port *dp;
rtnl_lock();
if (felix->tag_proto_ops)
felix->tag_proto_ops->teardown(ds);
rtnl_unlock();
dsa_switch_for_each_available_port(dp, ds)
ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
return ocelot_hwstamp_get(ocelot, port, ifr);
}
static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
err = ocelot_hwstamp_set(ocelot, port, ifr);
if (err)
return err;
using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
if (!felix->info->quirk_no_xtr_irq)
return false;
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
if (err)
goto out;
/* We trap to the CPU port module all PTP frames, but
* felix_rxtstamp() only gets called for event frames.
* So we need to avoid sending duplicate general
* message frames by running a second BPF classifier
* here and dropping those.
*/
__skb_push(skb, ETH_HLEN);
type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
if (type == PTP_CLASS_NONE) {
kfree_skb(skb);
continue;
}
netif_rx(skb);
}
out:
if (err < 0) {
dev_err_ratelimited(ocelot->dev,
"Error during packet extraction: %pe\n",
ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
}
return true;
}
static bool felix_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo;
struct skb_shared_hwtstamps *shhwtstamps;
struct ocelot *ocelot = ds->priv;
struct timespec64 ts;
u32 tstamp_hi;
u64 tstamp;
switch (type & PTP_CLASS_PMASK) {
case PTP_CLASS_L2:
if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2))
return false;
break;
case PTP_CLASS_IPV4:
case PTP_CLASS_IPV6:
if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4))
return false;
break;
}
/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
* for RX timestamping. Then free it, and poll for its copy through
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
tstamp_hi = tstamp >> 32;
if ((tstamp & 0xffffffff) < tstamp_lo)
tstamp_hi--;
tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamps->hwtstamp = tstamp;
return false;
}
static void felix_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb)
{
struct ocelot *ocelot = ds->priv;
struct sk_buff *clone = NULL;
if (!ocelot->ptp)
return;
if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
dev_err_ratelimited(ds->dev,
"port %d delivering skb without TX timestamp\n",
port);
return;
}
if (clone)
OCELOT_SKB_CB(skb)->clone = clone;
}
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
ocelot_port_set_maxlen(ocelot, port, new_mtu);
mutex_lock(&ocelot->fwd_domain_lock);
if (ocelot_port->taprio && ocelot->ops->tas_guard_bands_update)
ocelot->ops->tas_guard_bands_update(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
static int felix_get_max_mtu(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
return ocelot_get_max_mtu(ocelot, port);
}
static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
if (err)
return err;
using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
}
static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
}
static int felix_port_policer_add(struct dsa_switch *ds, int port,
struct dsa_mall_policer_tc_entry *policer)
{
struct ocelot *ocelot = ds->priv;
struct ocelot_policer pol = {
.rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
.burst = policer->burst,
};
return ocelot_port_policer_add(ocelot, port, &pol);
}
static void felix_port_policer_del(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_policer_del(ocelot, port);
}
static int felix_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
ingress, extack);
}
static void felix_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_mirror_del(ocelot, port, mirror->ingress);
}
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
if (felix->info->port_setup_tc)
return felix->info->port_setup_tc(ds, port, type, type_data);
else
return -EOPNOTSUPP;
}
static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
}
static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
threshold_type, extack);
}
static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
p_threshold);
}
static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 threshold, struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
threshold, extack);
}
static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
pool_type, p_pool_index,
p_threshold);
}
static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
pool_type, pool_index, threshold,
extack);
}
static int felix_sb_occ_snapshot(struct dsa_switch *ds,
unsigned int sb_index)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_snapshot(ocelot, sb_index);
}
static int felix_sb_occ_max_clear(struct dsa_switch *ds,
unsigned int sb_index)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_max_clear(ocelot, sb_index);
}
static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
p_cur, p_max);
}
static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
pool_type, p_cur, p_max);
}
static int felix_mrp_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot *ocelot = ds->priv;
return ocelot_mrp_add(ocelot, port, mrp);
}
static int felix_mrp_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot *ocelot = ds->priv;
return ocelot_mrp_add(ocelot, port, mrp);
}
static int
felix_mrp_add_ring_role(struct dsa_switch *ds, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot *ocelot = ds->priv;
return ocelot_mrp_add_ring_role(ocelot, port, mrp);
}
static int
felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot *ocelot = ds->priv;
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_get_default_prio(ocelot, port);
}
static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
u8 prio)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_set_default_prio(ocelot, port, prio);
}
static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_get_dscp_prio(ocelot, port, dscp);
}
static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
u8 prio)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
}
static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
u8 prio)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
}
static int felix_get_mm(struct dsa_switch *ds, int port,
struct ethtool_mm_state *state)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_get_mm(ocelot, port, state);
}
static int felix_set_mm(struct dsa_switch *ds, int port,
struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_set_mm(ocelot, port, cfg, extack);
}
static void felix_get_mm_stats(struct dsa_switch *ds, int port,
struct ethtool_mm_stats *stats)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_get_mm_stats(ocelot, port, stats);
}
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
.connect_tag_protocol = felix_connect_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
.get_mm = felix_get_mm,
.set_mm = felix_set_mm,
.get_mm_stats = felix_get_mm_stats,
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
.get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
.get_eth_mac_stats = felix_get_eth_mac_stats,
.get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_get_caps = felix_phylink_get_caps,
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
.lag_fdb_add = felix_lag_fdb_add,
.lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
.port_bridge_flags = felix_bridge_flags,
.port_bridge_join = felix_bridge_join,
.port_bridge_leave = felix_bridge_leave,
.port_lag_join = felix_lag_join,
.port_lag_leave = felix_lag_leave,
.port_lag_change = felix_lag_change,
.port_stp_state_set = felix_bridge_stp_state_set,
.port_vlan_filtering = felix_vlan_filtering,
.port_vlan_add = felix_vlan_add,
.port_vlan_del = felix_vlan_del,
.port_hwtstamp_get = felix_hwtstamp_get,
.port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp,
.port_change_mtu = felix_change_mtu,
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
.port_mirror_add = felix_port_mirror_add,
.port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
.port_setup_tc = felix_port_setup_tc,
.devlink_sb_pool_get = felix_sb_pool_get,
.devlink_sb_pool_set = felix_sb_pool_set,
.devlink_sb_port_pool_get = felix_sb_port_pool_get,
.devlink_sb_port_pool_set = felix_sb_port_pool_set,
.devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
.devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
.devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
.devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
.devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
.devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
.port_mrp_add = felix_mrp_add,
.port_mrp_del = felix_mrp_del,
.port_mrp_add_ring_role = felix_mrp_add_ring_role,
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
.port_get_default_prio = felix_port_get_default_prio,
.port_set_default_prio = felix_port_set_default_prio,
.port_get_dscp_prio = felix_port_get_dscp_prio,
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
.port_change_master = felix_port_change_master,
};
EXPORT_SYMBOL_GPL(felix_switch_ops);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
if (!dsa_is_user_port(ds, port))
return NULL;
return dsa_to_port(ds, port)->slave;
}
EXPORT_SYMBOL_GPL(felix_port_to_netdev);
int felix_netdev_to_port(struct net_device *dev)
{
struct dsa_port *dp;
dp = dsa_port_from_netdev(dev);
if (IS_ERR(dp))
return -EINVAL;
return dp->index;
}
EXPORT_SYMBOL_GPL(felix_netdev_to_port);
MODULE_DESCRIPTION("Felix DSA library");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/ocelot/felix.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright 2017 Microsemi Corporation
* Copyright 2018-2019 NXP
*/
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <net/tc_act/tc_gate.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/time.h>
#include "felix.h"
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
#define VSC9959_TAS_MIN_GATE_LEN_NS 33
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
#define VSC9959_IMDIO_PCI_BAR 0
#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
OCELOT_PORT_MODE_QSGMII | \
OCELOT_PORT_MODE_1000BASEX | \
OCELOT_PORT_MODE_2500BASEX | \
OCELOT_PORT_MODE_USXGMII)
static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
VSC9959_PORT_MODE_SERDES,
VSC9959_PORT_MODE_SERDES,
VSC9959_PORT_MODE_SERDES,
VSC9959_PORT_MODE_SERDES,
OCELOT_PORT_MODE_INTERNAL,
OCELOT_PORT_MODE_INTERNAL,
};
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
REG_RESERVED(ANA_PORT_B_DOMAIN),
REG(ANA_ANAGEFIL, 0x0089ac),
REG(ANA_ANEVENTS, 0x0089b0),
REG(ANA_STORMLIMIT_BURST, 0x0089b4),
REG(ANA_STORMLIMIT_CFG, 0x0089b8),
REG(ANA_ISOLATED_PORTS, 0x0089c8),
REG(ANA_COMMUNITY_PORTS, 0x0089cc),
REG(ANA_AUTOAGE, 0x0089d0),
REG(ANA_MACTOPTIONS, 0x0089d4),
REG(ANA_LEARNDISC, 0x0089d8),
REG(ANA_AGENCTRL, 0x0089dc),
REG(ANA_MIRRORPORTS, 0x0089e0),
REG(ANA_EMIRRORPORTS, 0x0089e4),
REG(ANA_FLOODING, 0x0089e8),
REG(ANA_FLOODING_IPMC, 0x008a08),
REG(ANA_SFLOW_CFG, 0x008a0c),
REG(ANA_PORT_MODE, 0x008a28),
REG(ANA_CUT_THRU_CFG, 0x008a48),
REG(ANA_PGID_PGID, 0x008400),
REG(ANA_TABLES_ANMOVED, 0x007f1c),
REG(ANA_TABLES_MACHDATA, 0x007f20),
REG(ANA_TABLES_MACLDATA, 0x007f24),
REG(ANA_TABLES_STREAMDATA, 0x007f28),
REG(ANA_TABLES_MACACCESS, 0x007f2c),
REG(ANA_TABLES_MACTINDX, 0x007f30),
REG(ANA_TABLES_VLANACCESS, 0x007f34),
REG(ANA_TABLES_VLANTIDX, 0x007f38),
REG(ANA_TABLES_ISDXACCESS, 0x007f3c),
REG(ANA_TABLES_ISDXTIDX, 0x007f40),
REG(ANA_TABLES_ENTRYLIM, 0x007f00),
REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44),
REG(ANA_TABLES_PTP_ID_LOW, 0x007f48),
REG(ANA_TABLES_STREAMACCESS, 0x007f4c),
REG(ANA_TABLES_STREAMTIDX, 0x007f50),
REG(ANA_TABLES_SEQ_HISTORY, 0x007f54),
REG(ANA_TABLES_SEQ_MASK, 0x007f58),
REG(ANA_TABLES_SFID_MASK, 0x007f5c),
REG(ANA_TABLES_SFIDACCESS, 0x007f60),
REG(ANA_TABLES_SFIDTIDX, 0x007f64),
REG(ANA_MSTI_STATE, 0x008600),
REG(ANA_OAM_UPM_LM_CNT, 0x008000),
REG(ANA_SG_ACCESS_CTRL, 0x008a64),
REG(ANA_SG_CONFIG_REG_1, 0x007fb0),
REG(ANA_SG_CONFIG_REG_2, 0x007fb4),
REG(ANA_SG_CONFIG_REG_3, 0x007fb8),
REG(ANA_SG_CONFIG_REG_4, 0x007fbc),
REG(ANA_SG_CONFIG_REG_5, 0x007fc0),
REG(ANA_SG_GCL_GS_CONFIG, 0x007f80),
REG(ANA_SG_GCL_TI_CONFIG, 0x007f90),
REG(ANA_SG_STATUS_REG_1, 0x008980),
REG(ANA_SG_STATUS_REG_2, 0x008984),
REG(ANA_SG_STATUS_REG_3, 0x008988),
REG(ANA_PORT_VLAN_CFG, 0x007800),
REG(ANA_PORT_DROP_CFG, 0x007804),
REG(ANA_PORT_QOS_CFG, 0x007808),
REG(ANA_PORT_VCAP_CFG, 0x00780c),
REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810),
REG(ANA_PORT_VCAP_S2_CFG, 0x00781c),
REG(ANA_PORT_PCP_DEI_MAP, 0x007820),
REG(ANA_PORT_CPU_FWD_CFG, 0x007860),
REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864),
REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868),
REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c),
REG(ANA_PORT_PORT_CFG, 0x007870),
REG(ANA_PORT_POL_CFG, 0x007874),
REG(ANA_PORT_PTP_CFG, 0x007878),
REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c),
REG(ANA_PORT_PTP_DLY2_CFG, 0x007880),
REG(ANA_PORT_SFID_CFG, 0x007884),
REG(ANA_PFC_PFC_CFG, 0x008800),
REG_RESERVED(ANA_PFC_PFC_TIMER),
REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
REG_RESERVED(ANA_IPT_IPT),
REG_RESERVED(ANA_PPT_PPT),
REG_RESERVED(ANA_FID_MAP_FID_MAP),
REG(ANA_AGGR_CFG, 0x008a68),
REG(ANA_CPUQ_CFG, 0x008a6c),
REG_RESERVED(ANA_CPUQ_CFG2),
REG(ANA_CPUQ_8021_CFG, 0x008a74),
REG(ANA_DSCP_CFG, 0x008ab4),
REG(ANA_DSCP_REWR_CFG, 0x008bb4),
REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4),
REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14),
REG_RESERVED(ANA_VRAP_CFG),
REG_RESERVED(ANA_VRAP_HDR_DATA),
REG_RESERVED(ANA_VRAP_HDR_MASK),
REG(ANA_DISCARD_CFG, 0x008c40),
REG(ANA_FID_CFG, 0x008c44),
REG(ANA_POL_PIR_CFG, 0x004000),
REG(ANA_POL_CIR_CFG, 0x004004),
REG(ANA_POL_MODE_CFG, 0x004008),
REG(ANA_POL_PIR_STATE, 0x00400c),
REG(ANA_POL_CIR_STATE, 0x004010),
REG_RESERVED(ANA_POL_STATE),
REG(ANA_POL_FLOWC, 0x008c48),
REG(ANA_POL_HYST, 0x008cb4),
REG_RESERVED(ANA_POL_MISC_CFG),
};
static const u32 vsc9959_qs_regmap[] = {
REG(QS_XTR_GRP_CFG, 0x000000),
REG(QS_XTR_RD, 0x000008),
REG(QS_XTR_FRM_PRUNING, 0x000010),
REG(QS_XTR_FLUSH, 0x000018),
REG(QS_XTR_DATA_PRESENT, 0x00001c),
REG(QS_XTR_CFG, 0x000020),
REG(QS_INJ_GRP_CFG, 0x000024),
REG(QS_INJ_WR, 0x00002c),
REG(QS_INJ_CTRL, 0x000034),
REG(QS_INJ_STATUS, 0x00003c),
REG(QS_INJ_ERR, 0x000040),
REG_RESERVED(QS_INH_DBG),
};
static const u32 vsc9959_vcap_regmap[] = {
/* VCAP_CORE_CFG */
REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
REG(VCAP_CORE_MV_CFG, 0x000004),
/* VCAP_CORE_CACHE */
REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
REG(VCAP_CACHE_MASK_DAT, 0x000108),
REG(VCAP_CACHE_ACTION_DAT, 0x000208),
REG(VCAP_CACHE_CNT_DAT, 0x000308),
REG(VCAP_CACHE_TG_DAT, 0x000388),
/* VCAP_CONST */
REG(VCAP_CONST_VCAP_VER, 0x000398),
REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
REG(VCAP_CONST_CORE_CNT, 0x0003b8),
REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
static const u32 vsc9959_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x00f460),
REG(QSYS_SWITCH_PORT_MODE, 0x00f480),
REG(QSYS_STAT_CNT_CFG, 0x00f49c),
REG(QSYS_EEE_CFG, 0x00f4a0),
REG(QSYS_EEE_THRES, 0x00f4b8),
REG(QSYS_IGR_NO_SHARING, 0x00f4bc),
REG(QSYS_EGR_NO_SHARING, 0x00f4c0),
REG(QSYS_SW_STATUS, 0x00f4c4),
REG(QSYS_EXT_CPU_CFG, 0x00f4e0),
REG_RESERVED(QSYS_PAD_CFG),
REG(QSYS_CPU_GROUP_MAP, 0x00f4e8),
REG_RESERVED(QSYS_QMAP),
REG_RESERVED(QSYS_ISDX_SGRP),
REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
REG(QSYS_TFRM_MISC, 0x00f50c),
REG(QSYS_TFRM_PORT_DLY, 0x00f510),
REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514),
REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518),
REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c),
REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520),
REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524),
REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528),
REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c),
REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530),
REG(QSYS_RED_PROFILE, 0x00f534),
REG(QSYS_RES_QOS_MODE, 0x00f574),
REG(QSYS_RES_CFG, 0x00c000),
REG(QSYS_RES_STAT, 0x00c004),
REG(QSYS_EGR_DROP_MODE, 0x00f578),
REG(QSYS_EQ_CTRL, 0x00f57c),
REG_RESERVED(QSYS_EVENTS_CORE),
REG(QSYS_QMAXSDU_CFG_0, 0x00f584),
REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0),
REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc),
REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8),
REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4),
REG(QSYS_QMAXSDU_CFG_5, 0x00f610),
REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
REG(QSYS_PREEMPTION_CFG, 0x00f664),
REG(QSYS_CIR_CFG, 0x000000),
REG(QSYS_EIR_CFG, 0x000004),
REG(QSYS_SE_CFG, 0x000008),
REG(QSYS_SE_DWRR_CFG, 0x00000c),
REG_RESERVED(QSYS_SE_CONNECT),
REG(QSYS_SE_DLB_SENSE, 0x000040),
REG(QSYS_CIR_STATE, 0x000044),
REG(QSYS_EIR_STATE, 0x000048),
REG_RESERVED(QSYS_SE_STATE),
REG(QSYS_HSCH_MISC_CFG, 0x00f67c),
REG(QSYS_TAG_CONFIG, 0x00f680),
REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698),
REG(QSYS_PORT_MAX_SDU, 0x00f69c),
REG(QSYS_PARAM_CFG_REG_1, 0x00f440),
REG(QSYS_PARAM_CFG_REG_2, 0x00f444),
REG(QSYS_PARAM_CFG_REG_3, 0x00f448),
REG(QSYS_PARAM_CFG_REG_4, 0x00f44c),
REG(QSYS_PARAM_CFG_REG_5, 0x00f450),
REG(QSYS_GCL_CFG_REG_1, 0x00f454),
REG(QSYS_GCL_CFG_REG_2, 0x00f458),
REG(QSYS_PARAM_STATUS_REG_1, 0x00f400),
REG(QSYS_PARAM_STATUS_REG_2, 0x00f404),
REG(QSYS_PARAM_STATUS_REG_3, 0x00f408),
REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c),
REG(QSYS_PARAM_STATUS_REG_5, 0x00f410),
REG(QSYS_PARAM_STATUS_REG_6, 0x00f414),
REG(QSYS_PARAM_STATUS_REG_7, 0x00f418),
REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c),
REG(QSYS_PARAM_STATUS_REG_9, 0x00f420),
REG(QSYS_GCL_STATUS_REG_1, 0x00f424),
REG(QSYS_GCL_STATUS_REG_2, 0x00f428),
};
static const u32 vsc9959_rew_regmap[] = {
REG(REW_PORT_VLAN_CFG, 0x000000),
REG(REW_TAG_CFG, 0x000004),
REG(REW_PORT_CFG, 0x000008),
REG(REW_DSCP_CFG, 0x00000c),
REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
REG(REW_PTP_CFG, 0x000050),
REG(REW_PTP_DLY1_CFG, 0x000054),
REG(REW_RED_TAG_CFG, 0x000058),
REG(REW_DSCP_REMAP_DP1_CFG, 0x000410),
REG(REW_DSCP_REMAP_CFG, 0x000510),
REG_RESERVED(REW_STAT_CFG),
REG_RESERVED(REW_REW_STICKY),
REG_RESERVED(REW_PPT),
};
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
REG(SYS_COUNT_RX_256_511, 0x000030),
REG(SYS_COUNT_RX_512_1023, 0x000034),
REG(SYS_COUNT_RX_1024_1526, 0x000038),
REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
REG(SYS_COUNT_RX_PAUSE, 0x000040),
REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_RX_ASSEMBLY_ERRS, 0x0000b0),
REG(SYS_COUNT_RX_SMD_ERRS, 0x0000b4),
REG(SYS_COUNT_RX_ASSEMBLY_OK, 0x0000b8),
REG(SYS_COUNT_RX_MERGE_FRAGMENTS, 0x0000bc),
REG(SYS_COUNT_RX_PMAC_OCTETS, 0x0000c0),
REG(SYS_COUNT_RX_PMAC_UNICAST, 0x0000c4),
REG(SYS_COUNT_RX_PMAC_MULTICAST, 0x0000c8),
REG(SYS_COUNT_RX_PMAC_BROADCAST, 0x0000cc),
REG(SYS_COUNT_RX_PMAC_SHORTS, 0x0000d0),
REG(SYS_COUNT_RX_PMAC_FRAGMENTS, 0x0000d4),
REG(SYS_COUNT_RX_PMAC_JABBERS, 0x0000d8),
REG(SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS, 0x0000dc),
REG(SYS_COUNT_RX_PMAC_SYM_ERRS, 0x0000e0),
REG(SYS_COUNT_RX_PMAC_64, 0x0000e4),
REG(SYS_COUNT_RX_PMAC_65_127, 0x0000e8),
REG(SYS_COUNT_RX_PMAC_128_255, 0x0000ec),
REG(SYS_COUNT_RX_PMAC_256_511, 0x0000f0),
REG(SYS_COUNT_RX_PMAC_512_1023, 0x0000f4),
REG(SYS_COUNT_RX_PMAC_1024_1526, 0x0000f8),
REG(SYS_COUNT_RX_PMAC_1527_MAX, 0x0000fc),
REG(SYS_COUNT_RX_PMAC_PAUSE, 0x000100),
REG(SYS_COUNT_RX_PMAC_CONTROL, 0x000104),
REG(SYS_COUNT_RX_PMAC_LONGS, 0x000108),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
REG(SYS_COUNT_TX_UNICAST, 0x000204),
REG(SYS_COUNT_TX_MULTICAST, 0x000208),
REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
REG(SYS_COUNT_TX_128_255, 0x000224),
REG(SYS_COUNT_TX_256_511, 0x000228),
REG(SYS_COUNT_TX_512_1023, 0x00022c),
REG(SYS_COUNT_TX_1024_1526, 0x000230),
REG(SYS_COUNT_TX_1527_MAX, 0x000234),
REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGED, 0x000278),
REG(SYS_COUNT_TX_MM_HOLD, 0x00027c),
REG(SYS_COUNT_TX_MERGE_FRAGMENTS, 0x000280),
REG(SYS_COUNT_TX_PMAC_OCTETS, 0x000284),
REG(SYS_COUNT_TX_PMAC_UNICAST, 0x000288),
REG(SYS_COUNT_TX_PMAC_MULTICAST, 0x00028c),
REG(SYS_COUNT_TX_PMAC_BROADCAST, 0x000290),
REG(SYS_COUNT_TX_PMAC_PAUSE, 0x000294),
REG(SYS_COUNT_TX_PMAC_64, 0x000298),
REG(SYS_COUNT_TX_PMAC_65_127, 0x00029c),
REG(SYS_COUNT_TX_PMAC_128_255, 0x0002a0),
REG(SYS_COUNT_TX_PMAC_256_511, 0x0002a4),
REG(SYS_COUNT_TX_PMAC_512_1023, 0x0002a8),
REG(SYS_COUNT_TX_PMAC_1024_1526, 0x0002ac),
REG(SYS_COUNT_TX_PMAC_1527_MAX, 0x0002b0),
REG(SYS_COUNT_DROP_LOCAL, 0x000400),
REG(SYS_COUNT_DROP_TAIL, 0x000404),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
REG(SYS_PORT_MODE, 0x000e0c),
REG(SYS_FRONT_PORT_MODE, 0x000e2c),
REG(SYS_FRM_AGING, 0x000e44),
REG(SYS_STAT_CFG, 0x000e48),
REG(SYS_SW_STATUS, 0x000e4c),
REG_RESERVED(SYS_MISC_CFG),
REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c),
REG(SYS_REW_MAC_LOW_CFG, 0x000e84),
REG(SYS_TIMESTAMP_OFFSET, 0x000e9c),
REG(SYS_PAUSE_CFG, 0x000ea0),
REG(SYS_PAUSE_TOT_CFG, 0x000ebc),
REG(SYS_ATOP, 0x000ec0),
REG(SYS_ATOP_TOT_CFG, 0x000edc),
REG(SYS_MAC_FC_CFG, 0x000ee0),
REG(SYS_MMGT, 0x000ef8),
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
REG(SYS_PTP_CFG, 0x000f20),
REG(SYS_RAM_INIT, 0x000f24),
REG_RESERVED(SYS_CM_ADDR),
REG_RESERVED(SYS_CM_DATA_WR),
REG_RESERVED(SYS_CM_DATA_RD),
REG_RESERVED(SYS_CM_OP),
REG_RESERVED(SYS_CM_DATA),
};
static const u32 vsc9959_ptp_regmap[] = {
REG(PTP_PIN_CFG, 0x000000),
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
static const u32 vsc9959_gcb_regmap[] = {
REG(GCB_SOFT_RST, 0x000004),
};
static const u32 vsc9959_dev_gmii_regmap[] = {
REG(DEV_CLOCK_CFG, 0x0),
REG(DEV_PORT_MISC, 0x4),
REG(DEV_EVENTS, 0x8),
REG(DEV_EEE_CFG, 0xc),
REG(DEV_RX_PATH_DELAY, 0x10),
REG(DEV_TX_PATH_DELAY, 0x14),
REG(DEV_PTP_PREDICT_CFG, 0x18),
REG(DEV_MAC_ENA_CFG, 0x1c),
REG(DEV_MAC_MODE_CFG, 0x20),
REG(DEV_MAC_MAXLEN_CFG, 0x24),
REG(DEV_MAC_TAGS_CFG, 0x28),
REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
REG(DEV_MAC_IFG_CFG, 0x30),
REG(DEV_MAC_HDX_CFG, 0x34),
REG(DEV_MAC_DBG_CFG, 0x38),
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
REG(DEV_MM_ENABLE_CONFIG, 0x48),
REG(DEV_MM_VERIF_CONFIG, 0x4C),
REG(DEV_MM_STATUS, 0x50),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
REG_RESERVED(PCS1G_ANEG_CFG),
REG_RESERVED(PCS1G_ANEG_NP_CFG),
REG_RESERVED(PCS1G_LB_CFG),
REG_RESERVED(PCS1G_DBG_CFG),
REG_RESERVED(PCS1G_CDET_CFG),
REG_RESERVED(PCS1G_ANEG_STATUS),
REG_RESERVED(PCS1G_ANEG_NP_STATUS),
REG_RESERVED(PCS1G_LINK_STATUS),
REG_RESERVED(PCS1G_LINK_DOWN_CNT),
REG_RESERVED(PCS1G_STICKY),
REG_RESERVED(PCS1G_DEBUG_STATUS),
REG_RESERVED(PCS1G_LPI_CFG),
REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT),
REG_RESERVED(PCS1G_LPI_STATUS),
REG_RESERVED(PCS1G_TSTPAT_MODE_CFG),
REG_RESERVED(PCS1G_TSTPAT_STATUS),
REG_RESERVED(DEV_PCS_FX100_CFG),
REG_RESERVED(DEV_PCS_FX100_STATUS),
};
static const u32 *vsc9959_regmap[TARGET_MAX] = {
[ANA] = vsc9959_ana_regmap,
[QS] = vsc9959_qs_regmap,
[QSYS] = vsc9959_qsys_regmap,
[REW] = vsc9959_rew_regmap,
[SYS] = vsc9959_sys_regmap,
[S0] = vsc9959_vcap_regmap,
[S1] = vsc9959_vcap_regmap,
[S2] = vsc9959_vcap_regmap,
[PTP] = vsc9959_ptp_regmap,
[GCB] = vsc9959_gcb_regmap,
[DEV_GMII] = vsc9959_dev_gmii_regmap,
};
/* Addresses are relative to the PCI device's base address */
static const struct resource vsc9959_resources[] = {
DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
static const char * const vsc9959_resource_names[TARGET_MAX] = {
[SYS] = "sys",
[REW] = "rew",
[S0] = "s0",
[S1] = "s1",
[S2] = "s2",
[GCB] = "devcpu_gcb",
[QS] = "qs",
[PTP] = "ptp",
[QSYS] = "qsys",
[ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
static const struct resource vsc9959_imdio_res =
DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
[ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
[ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30),
[ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26),
[ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24),
[ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23),
[ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22),
[ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21),
[ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20),
[ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19),
[ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
[ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17),
[ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15),
[ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14),
[ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13),
[ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12),
[ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
[ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
[ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9),
[ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8),
[ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7),
[ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
[ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
[ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4),
[ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3),
[ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2),
[ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1),
[ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0),
[ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
[ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
[ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
[SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0),
[GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
/* Replicated per number of ports (7), register size 4 per port */
[QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 7, 4),
[QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 7, 4),
[QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 7, 4),
[QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 7, 4),
[QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 7, 4),
[QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 7, 4),
[SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 7, 4),
[SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4),
[SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4),
[SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4),
[SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 7, 4),
[SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 7, 4),
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 3},
[VCAP_ES0_IGR_PORT] = { 3, 3},
[VCAP_ES0_RSV] = { 6, 2},
[VCAP_ES0_L2_MC] = { 8, 1},
[VCAP_ES0_L2_BC] = { 9, 1},
[VCAP_ES0_VID] = { 10, 12},
[VCAP_ES0_DP] = { 22, 1},
[VCAP_ES0_PCP] = { 23, 3},
};
static const struct vcap_field vsc9959_vcap_es0_actions[] = {
[VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
[VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
[VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
[VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
[VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
[VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
[VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
[VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
[VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
[VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
[VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
[VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
[VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
[VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
[VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
[VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
[VCAP_ES0_ACT_RSV] = { 49, 23},
[VCAP_ES0_ACT_HIT_STICKY] = { 72, 1},
};
static const struct vcap_field vsc9959_vcap_is1_keys[] = {
[VCAP_IS1_HK_TYPE] = { 0, 1},
[VCAP_IS1_HK_LOOKUP] = { 1, 2},
[VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 7},
[VCAP_IS1_HK_RSV] = { 10, 9},
[VCAP_IS1_HK_OAM_Y1731] = { 19, 1},
[VCAP_IS1_HK_L2_MC] = { 20, 1},
[VCAP_IS1_HK_L2_BC] = { 21, 1},
[VCAP_IS1_HK_IP_MC] = { 22, 1},
[VCAP_IS1_HK_VLAN_TAGGED] = { 23, 1},
[VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 24, 1},
[VCAP_IS1_HK_TPID] = { 25, 1},
[VCAP_IS1_HK_VID] = { 26, 12},
[VCAP_IS1_HK_DEI] = { 38, 1},
[VCAP_IS1_HK_PCP] = { 39, 3},
/* Specific Fields for IS1 Half Key S1_NORMAL */
[VCAP_IS1_HK_L2_SMAC] = { 42, 48},
[VCAP_IS1_HK_ETYPE_LEN] = { 90, 1},
[VCAP_IS1_HK_ETYPE] = { 91, 16},
[VCAP_IS1_HK_IP_SNAP] = {107, 1},
[VCAP_IS1_HK_IP4] = {108, 1},
/* Layer-3 Information */
[VCAP_IS1_HK_L3_FRAGMENT] = {109, 1},
[VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {110, 1},
[VCAP_IS1_HK_L3_OPTIONS] = {111, 1},
[VCAP_IS1_HK_L3_DSCP] = {112, 6},
[VCAP_IS1_HK_L3_IP4_SIP] = {118, 32},
/* Layer-4 Information */
[VCAP_IS1_HK_TCP_UDP] = {150, 1},
[VCAP_IS1_HK_TCP] = {151, 1},
[VCAP_IS1_HK_L4_SPORT] = {152, 16},
[VCAP_IS1_HK_L4_RNG] = {168, 8},
/* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
[VCAP_IS1_HK_IP4_INNER_TPID] = { 42, 1},
[VCAP_IS1_HK_IP4_INNER_VID] = { 43, 12},
[VCAP_IS1_HK_IP4_INNER_DEI] = { 55, 1},
[VCAP_IS1_HK_IP4_INNER_PCP] = { 56, 3},
[VCAP_IS1_HK_IP4_IP4] = { 59, 1},
[VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 60, 1},
[VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 61, 1},
[VCAP_IS1_HK_IP4_L3_OPTIONS] = { 62, 1},
[VCAP_IS1_HK_IP4_L3_DSCP] = { 63, 6},
[VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 69, 32},
[VCAP_IS1_HK_IP4_L3_IP4_SIP] = {101, 32},
[VCAP_IS1_HK_IP4_L3_PROTO] = {133, 8},
[VCAP_IS1_HK_IP4_TCP_UDP] = {141, 1},
[VCAP_IS1_HK_IP4_TCP] = {142, 1},
[VCAP_IS1_HK_IP4_L4_RNG] = {143, 8},
[VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {151, 32},
};
static const struct vcap_field vsc9959_vcap_is1_actions[] = {
[VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
[VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
[VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
[VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
[VCAP_IS1_ACT_DP_ENA] = { 11, 1},
[VCAP_IS1_ACT_DP_VAL] = { 12, 1},
[VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
[VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
[VCAP_IS1_ACT_RSV] = { 29, 9},
/* The fields below are incorrectly shifted by 2 in the manual */
[VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
[VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
[VCAP_IS1_ACT_FID_SEL] = { 51, 2},
[VCAP_IS1_ACT_FID_VAL] = { 53, 13},
[VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
[VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
[VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
[VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
[VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
[VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
[VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
};
static struct vcap_field vsc9959_vcap_is2_keys[] = {
/* Common: 41 bits */
[VCAP_IS2_TYPE] = { 0, 4},
[VCAP_IS2_HK_FIRST] = { 4, 1},
[VCAP_IS2_HK_PAG] = { 5, 8},
[VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 7},
[VCAP_IS2_HK_RSV2] = { 20, 1},
[VCAP_IS2_HK_HOST_MATCH] = { 21, 1},
[VCAP_IS2_HK_L2_MC] = { 22, 1},
[VCAP_IS2_HK_L2_BC] = { 23, 1},
[VCAP_IS2_HK_VLAN_TAGGED] = { 24, 1},
[VCAP_IS2_HK_VID] = { 25, 12},
[VCAP_IS2_HK_DEI] = { 37, 1},
[VCAP_IS2_HK_PCP] = { 38, 3},
/* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
[VCAP_IS2_HK_L2_DMAC] = { 41, 48},
[VCAP_IS2_HK_L2_SMAC] = { 89, 48},
/* MAC_ETYPE (TYPE=000) */
[VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {137, 16},
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {153, 16},
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {169, 8},
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {177, 3},
/* MAC_LLC (TYPE=001) */
[VCAP_IS2_HK_MAC_LLC_L2_LLC] = {137, 40},
/* MAC_SNAP (TYPE=010) */
[VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {137, 40},
/* MAC_ARP (TYPE=011) */
[VCAP_IS2_HK_MAC_ARP_SMAC] = { 41, 48},
[VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 89, 1},
[VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 90, 1},
[VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 91, 1},
[VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 92, 1},
[VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 93, 1},
[VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 94, 1},
[VCAP_IS2_HK_MAC_ARP_OPCODE] = { 95, 2},
[VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 97, 32},
[VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {129, 32},
[VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {161, 1},
/* IP4_TCP_UDP / IP4_OTHER common */
[VCAP_IS2_HK_IP4] = { 41, 1},
[VCAP_IS2_HK_L3_FRAGMENT] = { 42, 1},
[VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 43, 1},
[VCAP_IS2_HK_L3_OPTIONS] = { 44, 1},
[VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 45, 1},
[VCAP_IS2_HK_L3_TOS] = { 46, 8},
[VCAP_IS2_HK_L3_IP4_DIP] = { 54, 32},
[VCAP_IS2_HK_L3_IP4_SIP] = { 86, 32},
[VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1},
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = {119, 1},
[VCAP_IS2_HK_L4_DPORT] = {120, 16},
[VCAP_IS2_HK_L4_SPORT] = {136, 16},
[VCAP_IS2_HK_L4_RNG] = {152, 8},
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1},
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1},
[VCAP_IS2_HK_L4_FIN] = {162, 1},
[VCAP_IS2_HK_L4_SYN] = {163, 1},
[VCAP_IS2_HK_L4_RST] = {164, 1},
[VCAP_IS2_HK_L4_PSH] = {165, 1},
[VCAP_IS2_HK_L4_ACK] = {166, 1},
[VCAP_IS2_HK_L4_URG] = {167, 1},
[VCAP_IS2_HK_L4_1588_DOM] = {168, 8},
[VCAP_IS2_HK_L4_1588_VER] = {176, 4},
/* IP4_OTHER (TYPE=101) */
[VCAP_IS2_HK_IP4_L3_PROTO] = {119, 8},
[VCAP_IS2_HK_L3_PAYLOAD] = {127, 56},
/* IP6_STD (TYPE=110) */
[VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 41, 1},
[VCAP_IS2_HK_L3_IP6_SIP] = { 42, 128},
[VCAP_IS2_HK_IP6_L3_PROTO] = {170, 8},
/* OAM (TYPE=111) */
[VCAP_IS2_HK_OAM_MEL_FLAGS] = {137, 7},
[VCAP_IS2_HK_OAM_VER] = {144, 5},
[VCAP_IS2_HK_OAM_OPCODE] = {149, 8},
[VCAP_IS2_HK_OAM_FLAGS] = {157, 8},
[VCAP_IS2_HK_OAM_MEPID] = {165, 16},
[VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {181, 1},
[VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1},
};
static struct vcap_field vsc9959_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
[VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
[VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
[VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
[VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
[VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
[VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
[VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
[VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
[VCAP_IS2_ACT_PORT_MASK] = { 20, 6},
[VCAP_IS2_ACT_REW_OP] = { 26, 9},
[VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 35, 1},
[VCAP_IS2_ACT_RSV] = { 36, 2},
[VCAP_IS2_ACT_ACL_ID] = { 38, 6},
[VCAP_IS2_ACT_HIT_CNT] = { 44, 32},
};
static struct vcap_props vsc9959_vcap_props[] = {
[VCAP_ES0] = {
.action_type_width = 0,
.action_table = {
[ES0_ACTION_TYPE_NORMAL] = {
.width = 72, /* HIT_STICKY not included */
.count = 1,
},
},
.target = S0,
.keys = vsc9959_vcap_es0_keys,
.actions = vsc9959_vcap_es0_actions,
},
[VCAP_IS1] = {
.action_type_width = 0,
.action_table = {
[IS1_ACTION_TYPE_NORMAL] = {
.width = 78, /* HIT_STICKY not included */
.count = 4,
},
},
.target = S1,
.keys = vsc9959_vcap_is1_keys,
.actions = vsc9959_vcap_is1_actions,
},
[VCAP_IS2] = {
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
.width = 44,
.count = 2
},
[IS2_ACTION_TYPE_SMAC_SIP] = {
.width = 6,
.count = 4
},
},
.target = S2,
.keys = vsc9959_vcap_is2_keys,
.actions = vsc9959_vcap_is2_actions,
},
};
static const struct ptp_clock_info vsc9959_ptp_caps = {
.owner = THIS_MODULE,
.name = "felix ptp",
.max_adj = 0x7fffffff,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
.adjtime = ocelot_ptp_adjtime,
.adjfine = ocelot_ptp_adjfine,
.verify = ocelot_ptp_verify,
.enable = ocelot_ptp_enable,
};
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
{
int val;
ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
return val;
}
static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
{
return ocelot_read(ocelot, SYS_RAM_INIT);
}
/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
* RAM_INIT is in SYS:RAM_CTRL:RAM_INIT
*/
static int vsc9959_reset(struct ocelot *ocelot)
{
int val, err;
/* soft-reset the switch core */
ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
if (err) {
dev_err(ocelot->dev, "timeout: switch core reset\n");
return err;
}
/* initialize switch mem ~40us */
ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT);
err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val,
VSC9959_SYS_RAMINIT_SLEEP,
VSC9959_INIT_TIMEOUT);
if (err) {
dev_err(ocelot->dev, "timeout: switch sram init\n");
return err;
}
/* enable switch core */
ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
}
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
*/
static u16 vsc9959_wm_enc(u16 value)
{
WARN_ON(value >= 16 * BIT(8));
if (value >= BIT(8))
return BIT(8) | (value / 16);
return value;
}
static u16 vsc9959_wm_dec(u16 wm)
{
WARN_ON(wm & ~GENMASK(8, 0));
if (wm & BIT(8))
return (wm & GENMASK(7, 0)) * 16;
return wm;
}
static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
{
*inuse = (val & GENMASK(23, 12)) >> 12;
*maxuse = val & GENMASK(11, 0);
}
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
struct mii_bus *bus;
int port;
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
memcpy(&res, &vsc9959_imdio_res, sizeof(res));
res.start += imdio_base;
res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
return PTR_ERR(imdio_regs);
hw = enetc_hw_alloc(dev, imdio_regs);
if (IS_ERR(hw)) {
dev_err(dev, "failed to allocate ENETC HW structure\n");
return PTR_ERR(hw);
}
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
bus->name = "VSC9959 internal MDIO bus";
bus->read = enetc_mdio_read_c22;
bus->write = enetc_mdio_write_c22;
bus->read_c45 = enetc_mdio_read_c45;
bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
/* This gets added to imdio_regs, which already maps addresses
* starting with the proper offset.
*/
mdio_priv->mdio_base = 0;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
/* Needed in order to initialize the bus mutex lock */
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
mdiobus_free(bus);
return rc;
}
felix->imdio = bus;
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct phylink_pcs *phylink_pcs;
if (dsa_is_unused_port(felix->ds, port))
continue;
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, port);
if (IS_ERR(phylink_pcs))
continue;
felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
return 0;
}
static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct phylink_pcs *phylink_pcs = felix->pcs[port];
if (phylink_pcs)
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
mdiobus_free(felix->imdio);
}
/* The switch considers any frame (regardless of size) as eligible for
* transmission if the traffic class gate is open for at least 33 ns.
* Overruns are prevented by cropping an interval at the end of the gate time
* slot for which egress scheduling is blocked, but we need to still keep 33 ns
* available for one packet to be transmitted, otherwise the port tc will hang.
* This function returns the size of a gate interval that remains available for
* setting the guard band, after reserving the space for one egress frame.
*/
static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
{
/* Gate always open */
if (gate_len_ns == U64_MAX)
return U64_MAX;
if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS)
return 0;
return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
}
/* Extract shortest continuous gate open intervals in ns for each traffic class
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
* considered U64_MAX. If the gate is always closed, it is considered 0.
*/
static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
u64 min_gate_len[OCELOT_NUM_TC])
{
struct tc_taprio_sched_entry *entry;
u64 gate_len[OCELOT_NUM_TC];
u8 gates_ever_opened = 0;
int tc, i, n;
/* Initialize arrays */
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
min_gate_len[tc] = U64_MAX;
gate_len[tc] = 0;
}
/* If we don't have taprio, consider all gates as permanently open */
if (!taprio)
return;
n = taprio->num_entries;
/* Walk through the gate list twice to determine the length
* of consecutively open gates for a traffic class, including
* open gates that wrap around. We are just interested in the
* minimum window size, and this doesn't change what the
* minimum is (if the gate never closes, min_gate_len will
* remain U64_MAX).
*/
for (i = 0; i < 2 * n; i++) {
entry = &taprio->entries[i % n];
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
if (entry->gate_mask & BIT(tc)) {
gate_len[tc] += entry->interval;
gates_ever_opened |= BIT(tc);
} else {
/* Gate closes now, record a potential new
* minimum and reinitialize length
*/
if (min_gate_len[tc] > gate_len[tc] &&
gate_len[tc])
min_gate_len[tc] = gate_len[tc];
gate_len[tc] = 0;
}
}
}
/* min_gate_len[tc] actually tracks minimum *open* gate time, so for
* permanently closed gates, min_gate_len[tc] will still be U64_MAX.
* Therefore they are currently indistinguishable from permanently
* open gates. Overwrite the gate len with 0 when we know they're
* actually permanently closed, i.e. after the loop above.
*/
for (tc = 0; tc < OCELOT_NUM_TC; tc++)
if (!(gates_ever_opened & BIT(tc)))
min_gate_len[tc] = 0;
}
/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
* so we need to spell out the register access to each traffic class in helper
* functions, to simplify callers
*/
static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
u32 max_sdu)
{
switch (tc) {
case 0:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
port);
break;
case 1:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
port);
break;
case 2:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
port);
break;
case 3:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
port);
break;
case 4:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
port);
break;
case 5:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
port);
break;
case 6:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
port);
break;
case 7:
ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
port);
break;
}
}
static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
{
switch (tc) {
case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
default:
return 0;
}
}
static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
{
if (!taprio || !taprio->max_sdu[tc])
return 0;
return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
}
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
* values (the default value is 1518). Also, for traffic class windows smaller
* than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
* dropping, such that these won't hang the port, as they will never be sent.
*/
static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_mm_state *mm = &ocelot->mm[port];
struct tc_taprio_qopt_offload *taprio;
u64 min_gate_len[OCELOT_NUM_TC];
u32 val, maxlen, add_frag_size;
u64 needed_min_frag_time_ps;
int speed, picos_per_byte;
u64 needed_bit_time_ps;
u8 tas_speed;
int tc;
lockdep_assert_held(&ocelot->fwd_domain_lock);
taprio = ocelot_port->taprio;
val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
switch (tas_speed) {
case OCELOT_SPEED_10:
speed = SPEED_10;
break;
case OCELOT_SPEED_100:
speed = SPEED_100;
break;
case OCELOT_SPEED_1000:
speed = SPEED_1000;
break;
case OCELOT_SPEED_2500:
speed = SPEED_2500;
break;
default:
return;
}
picos_per_byte = (USEC_PER_SEC * 8) / speed;
val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
/* MAXLEN_CFG accounts automatically for VLAN. We need to include it
* manually in the bit time calculation, plus the preamble and SFD.
*/
maxlen = val + 2 * VLAN_HLEN;
/* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
* 4 octets FCS, 12 octets IFG.
*/
needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
/* Preemptible TCs don't need to pass a full MTU, the port will
* automatically emit a HOLD request when a preemptible TC gate closes
*/
val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val);
needed_min_frag_time_ps = picos_per_byte *
(u64)(24 + 2 * ethtool_mm_frag_size_add_to_min(add_frag_size));
dev_dbg(ocelot->dev,
"port %d: max frame size %d needs %llu ps, %llu ps for mPackets at speed %d\n",
port, maxlen, needed_bit_time_ps, needed_min_frag_time_ps,
speed);
vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
u64 remaining_gate_len_ps;
u32 max_sdu;
remaining_gate_len_ps =
vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
if ((mm->active_preemptible_tcs & BIT(tc)) ?
remaining_gate_len_ps > needed_min_frag_time_ps :
remaining_gate_len_ps > needed_bit_time_ps) {
/* Setting QMAXSDU_CFG to 0 disables oversized frame
* dropping.
*/
max_sdu = requested_max_sdu;
dev_dbg(ocelot->dev,
"port %d tc %d min gate len %llu"
", sending all frames\n",
port, tc, min_gate_len[tc]);
} else {
/* If traffic class doesn't support a full MTU sized
* frame, make sure to enable oversize frame dropping
* for frames larger than the smallest that would fit.
*
* However, the exact same register, QSYS_QMAXSDU_CFG_*,
* controls not only oversized frame dropping, but also
* per-tc static guard band lengths, so it reduces the
* useful gate interval length. Therefore, be careful
* to calculate a guard band (and therefore max_sdu)
* that still leaves 33 ns available in the time slot.
*/
max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
* special case where all packets are oversized.
* Any limit smaller than 64 octets accomplishes this
*/
if (!max_sdu)
max_sdu = 1;
/* Take L1 overhead into account, but just don't allow
* max_sdu to go negative or to 0. Here we use 20
* because QSYS_MAXSDU_CFG_* already counts the 4 FCS
* octets as part of packet size.
*/
if (max_sdu > 20)
max_sdu -= 20;
if (requested_max_sdu && requested_max_sdu < max_sdu)
max_sdu = requested_max_sdu;
dev_info(ocelot->dev,
"port %d tc %d min gate length %llu"
" ns not enough for max frame size %d at %d"
" Mbps, dropping frames over %d"
" octets including FCS\n",
port, tc, min_gate_len[tc], maxlen, speed,
max_sdu);
}
vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
}
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
ocelot->ops->cut_through_fwd(ocelot);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 tas_speed;
switch (speed) {
case SPEED_10:
tas_speed = OCELOT_SPEED_10;
break;
case SPEED_100:
tas_speed = OCELOT_SPEED_100;
break;
case SPEED_1000:
tas_speed = OCELOT_SPEED_1000;
break;
case SPEED_2500:
tas_speed = OCELOT_SPEED_2500;
break;
default:
tas_speed = OCELOT_SPEED_1000;
break;
}
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
if (ocelot_port->taprio)
vsc9959_tas_guard_bands_update(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
u64 cycle_time,
struct timespec64 *new_base_ts)
{
struct timespec64 ts;
ktime_t new_base_time;
ktime_t current_time;
ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
current_time = timespec64_to_ktime(ts);
new_base_time = base_time;
if (base_time < current_time) {
u64 nr_of_cycles = current_time - base_time;
do_div(nr_of_cycles, cycle_time);
new_base_time += cycle_time * (nr_of_cycles + 1);
}
*new_base_ts = ktime_to_timespec64(new_base_time);
}
static u32 vsc9959_tas_read_cfg_status(struct ocelot *ocelot)
{
return ocelot_read(ocelot, QSYS_TAS_PARAM_CFG_CTRL);
}
static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
struct tc_taprio_sched_entry *entry)
{
ocelot_write(ocelot,
QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(gcl_ix) |
QSYS_GCL_CFG_REG_1_GATE_STATE(entry->gate_mask),
QSYS_GCL_CFG_REG_1);
ocelot_write(ocelot, entry->interval, QSYS_GCL_CFG_REG_2);
}
static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
struct tc_taprio_qopt_offload *taprio)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct timespec64 base_ts;
int ret, i;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
if (taprio->cmd == TAPRIO_CMD_DESTROY) {
ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
taprio_offload_free(ocelot_port->taprio);
ocelot_port->taprio = NULL;
vsc9959_tas_guard_bands_update(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
} else if (taprio->cmd != TAPRIO_CMD_REPLACE) {
ret = -EOPNOTSUPP;
goto err_unlock;
}
ret = ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
if (ret)
goto err_unlock;
if (taprio->cycle_time > NSEC_PER_SEC ||
taprio->cycle_time_extension >= NSEC_PER_SEC) {
ret = -EINVAL;
goto err_reset_tc;
}
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
ret = -ERANGE;
goto err_reset_tc;
}
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
* guard band which reserves the time of a maximum sized frame at the
* end of the time window.
*
* Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
* need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
* operate on the port number.
*/
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
/* Hardware errata - Admin config could not be overwritten if
* config is pending, need reset the TAS module
*/
val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
ret = -EBUSY;
goto err_reset_tc;
}
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_ENABLE |
QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) |
QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(0xFF),
QSYS_TAG_CONFIG_ENABLE |
QSYS_TAG_CONFIG_INIT_GATE_STATE_M |
QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
QSYS_TAG_CONFIG, port);
vsc9959_new_base_time(ocelot, taprio->base_time,
taprio->cycle_time, &base_ts);
ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), QSYS_PARAM_CFG_REG_2);
val = upper_32_bits(base_ts.tv_sec);
ocelot_write(ocelot,
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val) |
QSYS_PARAM_CFG_REG_3_LIST_LENGTH(taprio->num_entries),
QSYS_PARAM_CFG_REG_3);
ocelot_write(ocelot, taprio->cycle_time, QSYS_PARAM_CFG_REG_4);
ocelot_write(ocelot, taprio->cycle_time_extension, QSYS_PARAM_CFG_REG_5);
for (i = 0; i < taprio->num_entries; i++)
vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
if (ret)
goto err_reset_tc;
ocelot_port->taprio = taprio_offload_get(taprio);
vsc9959_tas_guard_bands_update(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
err_reset_tc:
taprio->mqprio.qopt.num_tc = 0;
ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
err_unlock:
mutex_unlock(&ocelot->fwd_domain_lock);
return ret;
}
static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
{
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
int port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_port = ocelot->ports[port];
taprio = ocelot_port->taprio;
if (!taprio)
continue;
ocelot_rmw(ocelot,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
QSYS_TAS_PARAM_CFG_CTRL);
/* Disable time-aware shaper */
ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
vsc9959_new_base_time(ocelot, taprio->base_time,
taprio->cycle_time, &base_ts);
ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
QSYS_PARAM_CFG_REG_2);
val = upper_32_bits(base_ts.tv_sec);
ocelot_rmw(ocelot,
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
/* Re-enable time-aware shaper */
ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
}
mutex_unlock(&ocelot->fwd_domain_lock);
}
static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *cbs_qopt)
{
struct ocelot *ocelot = ds->priv;
int port_ix = port * 8 + cbs_qopt->queue;
u32 rate, burst;
if (cbs_qopt->queue >= ds->num_tx_queues)
return -EINVAL;
if (!cbs_qopt->enable) {
ocelot_write_gix(ocelot, QSYS_CIR_CFG_CIR_RATE(0) |
QSYS_CIR_CFG_CIR_BURST(0),
QSYS_CIR_CFG, port_ix);
ocelot_rmw_gix(ocelot, 0, QSYS_SE_CFG_SE_AVB_ENA,
QSYS_SE_CFG, port_ix);
return 0;
}
/* Rate unit is 100 kbps */
rate = DIV_ROUND_UP(cbs_qopt->idleslope, 100);
/* Avoid using zero rate */
rate = clamp_t(u32, rate, 1, GENMASK(14, 0));
/* Burst unit is 4kB */
burst = DIV_ROUND_UP(cbs_qopt->hicredit, 4096);
/* Avoid using zero burst size */
burst = clamp_t(u32, burst, 1, GENMASK(5, 0));
ocelot_write_gix(ocelot,
QSYS_CIR_CFG_CIR_RATE(rate) |
QSYS_CIR_CFG_CIR_BURST(burst),
QSYS_CIR_CFG,
port_ix);
ocelot_rmw_gix(ocelot,
QSYS_SE_CFG_SE_FRM_MODE(0) |
QSYS_SE_CFG_SE_AVB_ENA,
QSYS_SE_CFG_SE_AVB_ENA |
QSYS_SE_CFG_SE_FRM_MODE_M,
QSYS_SE_CFG,
port_ix);
return 0;
}
static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
{
switch (base->type) {
case TC_SETUP_QDISC_MQPRIO: {
struct tc_mqprio_caps *caps = base->caps;
caps->validate_queue_counts = true;
return 0;
}
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
caps->supports_queue_max_sdu = true;
return 0;
}
default:
return -EOPNOTSUPP;
}
}
static int vsc9959_qos_port_mqprio(struct ocelot *ocelot, int port,
struct tc_mqprio_qopt_offload *mqprio)
{
int ret;
mutex_lock(&ocelot->fwd_domain_lock);
ret = ocelot_port_mqprio(ocelot, port, mqprio);
mutex_unlock(&ocelot->fwd_domain_lock);
return ret;
}
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
{
struct ocelot *ocelot = ds->priv;
switch (type) {
case TC_QUERY_CAPS:
return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
case TC_SETUP_QDISC_MQPRIO:
return vsc9959_qos_port_mqprio(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
return vsc9959_qos_port_cbs_set(ds, port, type_data);
default:
return -EOPNOTSUPP;
}
}
#define VSC9959_PSFP_SFID_MAX 175
#define VSC9959_PSFP_GATE_ID_MAX 183
#define VSC9959_PSFP_POLICER_BASE 63
#define VSC9959_PSFP_POLICER_MAX 383
#define VSC9959_PSFP_GATE_LIST_NUM 4
#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
struct felix_stream {
struct list_head list;
unsigned long id;
bool dummy;
int ports;
int port;
u8 dmac[ETH_ALEN];
u16 vid;
s8 prio;
u8 sfid_valid;
u8 ssid_valid;
u32 sfid;
u32 ssid;
};
struct felix_stream_filter_counters {
u64 match;
u64 not_pass_gate;
u64 not_pass_sdu;
u64 red;
};
struct felix_stream_filter {
struct felix_stream_filter_counters stats;
struct list_head list;
refcount_t refcount;
u32 index;
u8 enable;
int portmask;
u8 sg_valid;
u32 sgid;
u8 fm_valid;
u32 fmid;
u8 prio_valid;
u8 prio;
u32 maxsdu;
};
struct felix_stream_gate {
u32 index;
u8 enable;
u8 ipv_valid;
u8 init_ipv;
u64 basetime;
u64 cycletime;
u64 cycletime_ext;
u32 num_entries;
struct action_gate_entry entries[];
};
struct felix_stream_gate_entry {
struct list_head list;
refcount_t refcount;
u32 index;
};
static int vsc9959_stream_identify(struct flow_cls_offload *f,
struct felix_stream *stream)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
ether_addr_copy(stream->dmac, match.key->dst);
if (!is_zero_ether_addr(match.mask->src))
return -EOPNOTSUPP;
} else {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
if (match.mask->vlan_priority)
stream->prio = match.key->vlan_priority;
else
stream->prio = -1;
if (!match.mask->vlan_id)
return -EOPNOTSUPP;
stream->vid = match.key->vlan_id;
} else {
return -EOPNOTSUPP;
}
stream->id = f->cookie;
return 0;
}
static int vsc9959_mact_stream_set(struct ocelot *ocelot,
struct felix_stream *stream,
struct netlink_ext_ack *extack)
{
enum macaccess_entry_type type;
int ret, sfid, ssid;
u32 vid, dst_idx;
u8 mac[ETH_ALEN];
ether_addr_copy(mac, stream->dmac);
vid = stream->vid;
/* Stream identification desn't support to add a stream with non
* existent MAC (The MAC entry has not been learned in MAC table).
*/
ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
if (ret) {
if (extack)
NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
return -EOPNOTSUPP;
}
if ((stream->sfid_valid || stream->ssid_valid) &&
type == ENTRYTYPE_NORMAL)
type = ENTRYTYPE_LOCKED;
sfid = stream->sfid_valid ? stream->sfid : -1;
ssid = stream->ssid_valid ? stream->ssid : -1;
ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
sfid, ssid);
return ret;
}
static struct felix_stream *
vsc9959_stream_table_lookup(struct list_head *stream_list,
struct felix_stream *stream)
{
struct felix_stream *tmp;
list_for_each_entry(tmp, stream_list, list)
if (ether_addr_equal(tmp->dmac, stream->dmac) &&
tmp->vid == stream->vid)
return tmp;
return NULL;
}
static int vsc9959_stream_table_add(struct ocelot *ocelot,
struct list_head *stream_list,
struct felix_stream *stream,
struct netlink_ext_ack *extack)
{
struct felix_stream *stream_entry;
int ret;
stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL);
if (!stream_entry)
return -ENOMEM;
if (!stream->dummy) {
ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
if (ret) {
kfree(stream_entry);
return ret;
}
}
list_add_tail(&stream_entry->list, stream_list);
return 0;
}
static struct felix_stream *
vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
{
struct felix_stream *tmp;
list_for_each_entry(tmp, stream_list, list)
if (tmp->id == id)
return tmp;
return NULL;
}
static void vsc9959_stream_table_del(struct ocelot *ocelot,
struct felix_stream *stream)
{
if (!stream->dummy)
vsc9959_mact_stream_set(ocelot, stream, NULL);
list_del(&stream->list);
kfree(stream);
}
static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
}
static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
struct felix_stream_filter *sfi)
{
u32 val;
if (sfi->index > VSC9959_PSFP_SFID_MAX)
return -EINVAL;
if (!sfi->enable) {
ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
ANA_TABLES_SFIDTIDX);
val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
(!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
10, 100000);
}
if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
sfi->fmid > VSC9959_PSFP_POLICER_MAX)
return -EINVAL;
ocelot_write(ocelot,
(sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
(sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
ANA_TABLES_SFIDTIDX);
ocelot_write(ocelot,
(sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
ANA_TABLES_SFIDACCESS);
return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
(!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
10, 100000);
}
static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
{
u32 val;
ocelot_rmw(ocelot,
ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
ANA_TABLES_SFIDTIDX);
ocelot_write(ocelot,
ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
ANA_TABLES_SFID_MASK);
ocelot_rmw(ocelot,
ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
ANA_TABLES_SFIDACCESS);
return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
(!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
10, 100000);
}
static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
struct felix_stream_filter *sfi,
struct list_head *pos)
{
struct felix_stream_filter *sfi_entry;
int ret;
sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL);
if (!sfi_entry)
return -ENOMEM;
refcount_set(&sfi_entry->refcount, 1);
ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
if (ret) {
kfree(sfi_entry);
return ret;
}
vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
list_add(&sfi_entry->list, pos);
return 0;
}
static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
struct felix_stream_filter *sfi)
{
struct list_head *pos, *q, *last;
struct felix_stream_filter *tmp;
struct ocelot_psfp_list *psfp;
u32 insert = 0;
psfp = &ocelot->psfp;
last = &psfp->sfi_list;
list_for_each_safe(pos, q, &psfp->sfi_list) {
tmp = list_entry(pos, struct felix_stream_filter, list);
if (sfi->sg_valid == tmp->sg_valid &&
sfi->fm_valid == tmp->fm_valid &&
sfi->portmask == tmp->portmask &&
tmp->sgid == sfi->sgid &&
tmp->fmid == sfi->fmid) {
sfi->index = tmp->index;
refcount_inc(&tmp->refcount);
return 0;
}
/* Make sure that the index is increasing in order. */
if (tmp->index == insert) {
last = pos;
insert++;
}
}
sfi->index = insert;
return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
}
static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
struct felix_stream_filter *sfi,
struct felix_stream_filter *sfi2)
{
struct felix_stream_filter *tmp;
struct list_head *pos, *q, *last;
struct ocelot_psfp_list *psfp;
u32 insert = 0;
int ret;
psfp = &ocelot->psfp;
last = &psfp->sfi_list;
list_for_each_safe(pos, q, &psfp->sfi_list) {
tmp = list_entry(pos, struct felix_stream_filter, list);
/* Make sure that the index is increasing in order. */
if (tmp->index >= insert + 2)
break;
insert = tmp->index + 1;
last = pos;
}
sfi->index = insert;
ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
if (ret)
return ret;
sfi2->index = insert + 1;
return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
}
static struct felix_stream_filter *
vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
{
struct felix_stream_filter *tmp;
list_for_each_entry(tmp, sfi_list, list)
if (tmp->index == index)
return tmp;
return NULL;
}
static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
{
struct felix_stream_filter *tmp, *n;
struct ocelot_psfp_list *psfp;
u8 z;
psfp = &ocelot->psfp;
list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
if (tmp->index == index) {
z = refcount_dec_and_test(&tmp->refcount);
if (z) {
tmp->enable = 0;
vsc9959_psfp_sfi_set(ocelot, tmp);
list_del(&tmp->list);
kfree(tmp);
}
break;
}
}
static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
struct felix_stream_gate *sgi)
{
sgi->index = entry->hw_index;
sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
sgi->basetime = entry->gate.basetime;
sgi->cycletime = entry->gate.cycletime;
sgi->num_entries = entry->gate.num_entries;
sgi->enable = 1;
memcpy(sgi->entries, entry->gate.entries,
entry->gate.num_entries * sizeof(struct action_gate_entry));
}
static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
}
static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
struct felix_stream_gate *sgi)
{
struct action_gate_entry *e;
struct timespec64 base_ts;
u32 interval_sum = 0;
u32 val;
int i;
if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
return -EINVAL;
ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
ANA_SG_ACCESS_CTRL);
if (!sgi->enable) {
ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
ANA_SG_CONFIG_REG_3_GATE_ENABLE,
ANA_SG_CONFIG_REG_3);
return 0;
}
if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
sgi->cycletime > NSEC_PER_SEC)
return -EINVAL;
if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
return -EINVAL;
vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
val = lower_32_bits(base_ts.tv_sec);
ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
val = upper_32_bits(base_ts.tv_sec);
ocelot_write(ocelot,
(sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
ANA_SG_CONFIG_REG_3_GATE_ENABLE |
ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
ANA_SG_CONFIG_REG_3);
ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
e = sgi->entries;
for (i = 0; i < sgi->num_entries; i++) {
u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
(e[i].gate_state ?
ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
ANA_SG_GCL_GS_CONFIG, i);
interval_sum += e[i].interval;
ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
}
ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
ANA_SG_ACCESS_CTRL);
return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
(!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
10, 100000);
}
static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
struct felix_stream_gate *sgi)
{
struct felix_stream_gate_entry *tmp;
struct ocelot_psfp_list *psfp;
int ret;
psfp = &ocelot->psfp;
list_for_each_entry(tmp, &psfp->sgi_list, list)
if (tmp->index == sgi->index) {
refcount_inc(&tmp->refcount);
return 0;
}
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
ret = vsc9959_psfp_sgi_set(ocelot, sgi);
if (ret) {
kfree(tmp);
return ret;
}
tmp->index = sgi->index;
refcount_set(&tmp->refcount, 1);
list_add_tail(&tmp->list, &psfp->sgi_list);
return 0;
}
static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
u32 index)
{
struct felix_stream_gate_entry *tmp, *n;
struct felix_stream_gate sgi = {0};
struct ocelot_psfp_list *psfp;
u8 z;
psfp = &ocelot->psfp;
list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
if (tmp->index == index) {
z = refcount_dec_and_test(&tmp->refcount);
if (z) {
sgi.index = index;
sgi.enable = 0;
vsc9959_psfp_sgi_set(ocelot, &sgi);
list_del(&tmp->list);
kfree(tmp);
}
break;
}
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
struct flow_cls_offload *f)
{
struct netlink_ext_ack *extack = f->common.extack;
struct felix_stream_filter old_sfi, *sfi_entry;
struct felix_stream_filter sfi = {0};
const struct flow_action_entry *a;
struct felix_stream *stream_entry;
struct felix_stream stream = {0};
struct felix_stream_gate *sgi;
struct ocelot_psfp_list *psfp;
struct ocelot_policer pol;
int ret, i, size;
u64 rate, burst;
u32 index;
psfp = &ocelot->psfp;
ret = vsc9959_stream_identify(f, &stream);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
return ret;
}
mutex_lock(&psfp->lock);
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_GATE:
size = struct_size(sgi, entries, a->gate.num_entries);
sgi = kzalloc(size, GFP_KERNEL);
if (!sgi) {
ret = -ENOMEM;
goto err;
}
vsc9959_psfp_parse_gate(a, sgi);
ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
if (ret) {
kfree(sgi);
goto err;
}
sfi.sg_valid = 1;
sfi.sgid = sgi->index;
kfree(sgi);
break;
case FLOW_ACTION_POLICE:
index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
if (index > VSC9959_PSFP_POLICER_MAX) {
ret = -EINVAL;
goto err;
}
rate = a->police.rate_bytes_ps;
burst = rate * PSCHED_NS2TICKS(a->police.burst);
pol = (struct ocelot_policer) {
.burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
.rate = div_u64(rate, 1000) * 8,
};
ret = ocelot_vcap_policer_add(ocelot, index, &pol);
if (ret)
goto err;
sfi.fm_valid = 1;
sfi.fmid = index;
sfi.maxsdu = a->police.mtu;
break;
default:
mutex_unlock(&psfp->lock);
return -EOPNOTSUPP;
}
}
stream.ports = BIT(port);
stream.port = port;
sfi.portmask = stream.ports;
sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
sfi.prio = (sfi.prio_valid ? stream.prio : 0);
sfi.enable = 1;
/* Check if stream is set. */
stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
if (stream_entry) {
if (stream_entry->ports & BIT(port)) {
NL_SET_ERR_MSG_MOD(extack,
"The stream is added on this port");
ret = -EEXIST;
goto err;
}
if (stream_entry->ports != BIT(stream_entry->port)) {
NL_SET_ERR_MSG_MOD(extack,
"The stream is added on two ports");
ret = -EEXIST;
goto err;
}
stream_entry->ports |= BIT(port);
stream.ports = stream_entry->ports;
sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
stream_entry->sfid);
memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
old_sfi.portmask = stream_entry->ports;
sfi.portmask = stream.ports;
if (stream_entry->port > port) {
ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
&old_sfi);
stream_entry->dummy = true;
} else {
ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
&sfi);
stream.dummy = true;
}
if (ret)
goto err;
stream_entry->sfid = old_sfi.index;
} else {
ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
if (ret)
goto err;
}
stream.sfid = sfi.index;
stream.sfid_valid = 1;
ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
&stream, extack);
if (ret) {
vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
goto err;
}
mutex_unlock(&psfp->lock);
return 0;
err:
if (sfi.sg_valid)
vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
if (sfi.fm_valid)
ocelot_vcap_policer_del(ocelot, sfi.fmid);
mutex_unlock(&psfp->lock);
return ret;
}
static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
struct flow_cls_offload *f)
{
struct felix_stream *stream, tmp, *stream_entry;
struct ocelot_psfp_list *psfp = &ocelot->psfp;
static struct felix_stream_filter *sfi;
mutex_lock(&psfp->lock);
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
if (!stream) {
mutex_unlock(&psfp->lock);
return -ENOMEM;
}
sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
if (!sfi) {
mutex_unlock(&psfp->lock);
return -ENOMEM;
}
if (sfi->sg_valid)
vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
if (sfi->fm_valid)
ocelot_vcap_policer_del(ocelot, sfi->fmid);
vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
memcpy(&tmp, stream, sizeof(tmp));
stream->sfid_valid = 0;
vsc9959_stream_table_del(ocelot, stream);
stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
if (stream_entry) {
stream_entry->ports = BIT(stream_entry->port);
if (stream_entry->dummy) {
stream_entry->dummy = false;
vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
}
vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
stream_entry->ports);
}
mutex_unlock(&psfp->lock);
return 0;
}
static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
struct felix_stream_filter *sfi)
{
struct felix_stream_filter_counters *s = &sfi->stats;
u32 match, not_pass_gate, not_pass_sdu, red;
u32 sfid = sfi->index;
lockdep_assert_held(&ocelot->stat_view_lock);
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
SYS_STAT_CFG_STAT_VIEW_M,
SYS_STAT_CFG);
match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
/* Clear the PSFP counter. */
ocelot_write(ocelot,
SYS_STAT_CFG_STAT_VIEW(sfid) |
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
s->match += match;
s->not_pass_gate += not_pass_gate;
s->not_pass_sdu += not_pass_sdu;
s->red += red;
}
/* Caller must hold &ocelot->stat_view_lock */
static void vsc9959_update_stats(struct ocelot *ocelot)
{
struct ocelot_psfp_list *psfp = &ocelot->psfp;
struct felix_stream_filter *sfi;
mutex_lock(&psfp->lock);
list_for_each_entry(sfi, &psfp->sfi_list, list)
vsc9959_update_sfid_stats(ocelot, sfi);
mutex_unlock(&psfp->lock);
}
static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
struct flow_cls_offload *f,
struct flow_stats *stats)
{
struct ocelot_psfp_list *psfp = &ocelot->psfp;
struct felix_stream_filter_counters *s;
static struct felix_stream_filter *sfi;
struct felix_stream *stream;
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
if (!stream)
return -ENOMEM;
sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
if (!sfi)
return -EINVAL;
mutex_lock(&ocelot->stat_view_lock);
vsc9959_update_sfid_stats(ocelot, sfi);
s = &sfi->stats;
stats->pkts = s->match;
stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
memset(s, 0, sizeof(*s));
mutex_unlock(&ocelot->stat_view_lock);
return 0;
}
static void vsc9959_psfp_init(struct ocelot *ocelot)
{
struct ocelot_psfp_list *psfp = &ocelot->psfp;
INIT_LIST_HEAD(&psfp->stream_list);
INIT_LIST_HEAD(&psfp->sfi_list);
INIT_LIST_HEAD(&psfp->sgi_list);
mutex_init(&psfp->lock);
}
/* When using cut-through forwarding and the egress port runs at a higher data
* rate than the ingress port, the packet currently under transmission would
* suffer an underrun since it would be transmitted faster than it is received.
* The Felix switch implementation of cut-through forwarding does not check in
* hardware whether this condition is satisfied or not, so we must restrict the
* list of ports that have cut-through forwarding enabled on egress to only be
* the ports operating at the lowest link speed within their respective
* forwarding domain.
*/
static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
int tc, port, other_port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_mm_state *mm = &ocelot->mm[port];
int min_speed = ocelot_port->speed;
unsigned long mask = 0;
u32 tmp, val = 0;
/* Disable cut-through on ports that are down */
if (ocelot_port->speed <= 0)
goto set;
if (dsa_is_cpu_port(ds, port)) {
/* Ocelot switches forward from the NPI port towards
* any port, regardless of it being in the NPI port's
* forwarding domain or not.
*/
mask = dsa_user_ports(ds);
} else {
mask = ocelot_get_bridge_fwd_mask(ocelot, port);
mask &= ~BIT(port);
if (ocelot->npi >= 0)
mask |= BIT(ocelot->npi);
else
mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
port);
}
/* Calculate the minimum link speed, among the ports that are
* up, of this source port's forwarding domain.
*/
for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
struct ocelot_port *other_ocelot_port;
other_ocelot_port = ocelot->ports[other_port];
if (other_ocelot_port->speed <= 0)
continue;
if (min_speed > other_ocelot_port->speed)
min_speed = other_ocelot_port->speed;
}
/* Enable cut-through forwarding for all traffic classes that
* don't have oversized dropping enabled, since this check is
* bypassed in cut-through mode. Also exclude preemptible
* traffic classes, since these would hang the port for some
* reason, if sent as cut-through.
*/
if (ocelot_port->speed == min_speed) {
val = GENMASK(7, 0) & ~mm->active_preemptible_tcs;
for (tc = 0; tc < OCELOT_NUM_TC; tc++)
if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
val &= ~BIT(tc);
}
set:
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
if (tmp == val)
continue;
dev_dbg(ocelot->dev,
"port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
port, mask, ocelot_port->speed, min_speed,
val ? "enabling" : "disabling", val);
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
}
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
.wm_dec = vsc9959_wm_dec,
.wm_stat = vsc9959_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
.psfp_init = vsc9959_psfp_init,
.psfp_filter_add = vsc9959_psfp_filter_add,
.psfp_filter_del = vsc9959_psfp_filter_del,
.psfp_stats_get = vsc9959_psfp_stats_get,
.cut_through_fwd = vsc9959_cut_through_fwd,
.tas_clock_adjust = vsc9959_tas_clock_adjust,
.update_stats = vsc9959_update_stats,
.tas_guard_bands_update = vsc9959_tas_guard_bands_update,
};
static const struct felix_info felix_info_vsc9959 = {
.resources = vsc9959_resources,
.num_resources = ARRAY_SIZE(vsc9959_resources),
.resource_names = vsc9959_resource_names,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
.vcap = vsc9959_vcap_props,
.vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
.vcap_pol_base2 = 0,
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
};
/* The INTB interrupt is shared between for PTP TX timestamp availability
* notification and MAC Merge status change on each port.
*/
static irqreturn_t felix_irq_handler(int irq, void *data)
{
struct ocelot *ocelot = (struct ocelot *)data;
ocelot_get_txtstamp(ocelot);
ocelot_mm_irq(ocelot);
return IRQ_HANDLED;
}
static int felix_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct dsa_switch *ds;
struct ocelot *ocelot;
struct felix *felix;
int err;
if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
dev_info(&pdev->dev, "device is disabled, skipping\n");
return -ENODEV;
}
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "device enable failed\n");
goto err_pci_enable;
}
felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
if (!felix) {
err = -ENOMEM;
dev_err(&pdev->dev, "Failed to allocate driver memory\n");
goto err_alloc_felix;
}
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
pci_set_master(pdev);
err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
&felix_irq_handler, IRQF_ONESHOT,
"felix-intb", ocelot);
if (err) {
dev_err(&pdev->dev, "Failed to request irq\n");
goto err_alloc_irq;
}
ocelot->ptp = 1;
ocelot->mm_supported = true;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
if (!ds) {
err = -ENOMEM;
dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
goto err_alloc_ds;
}
ds->dev = &pdev->dev;
ds->num_ports = felix->info->num_ports;
ds->num_tx_queues = felix->info->num_tx_queues;
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
felix->tag_proto = DSA_TAG_PROTO_OCELOT;
err = dsa_register_switch(ds);
if (err) {
dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
return 0;
err_register_ds:
kfree(ds);
err_alloc_ds:
err_alloc_irq:
kfree(felix);
err_alloc_felix:
pci_disable_device(pdev);
err_pci_enable:
return err;
}
static void felix_pci_remove(struct pci_dev *pdev)
{
struct felix *felix = pci_get_drvdata(pdev);
if (!felix)
return;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
pci_disable_device(pdev);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
{
struct felix *felix = pci_get_drvdata(pdev);
if (!felix)
return;
dsa_switch_shutdown(felix->ds);
pci_set_drvdata(pdev, NULL);
}
static struct pci_device_id felix_ids[] = {
{
/* NXP LS1028A */
PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, felix_ids);
static struct pci_driver felix_vsc9959_pci_driver = {
.name = "mscc_felix",
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
.shutdown = felix_pci_shutdown,
};
module_pci_driver(felix_vsc9959_pci_driver);
MODULE_DESCRIPTION("Felix Switch driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/ocelot/felix_vsc9959.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Distributed Switch Architecture VSC9953 driver
* Copyright (C) 2020, Maxim Kochetkov <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/types.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/mdio/mdio-mscc-miim.h>
#include <linux/mod_devicetable.h>
#include <linux/of_mdio.h>
#include <linux/pcs-lynx.h>
#include <linux/dsa/ocelot.h>
#include <linux/iopoll.h>
#include "felix.h"
#define VSC9953_NUM_PORTS 10
#define VSC9953_VCAP_POLICER_BASE 11
#define VSC9953_VCAP_POLICER_MAX 31
#define VSC9953_VCAP_POLICER_BASE2 120
#define VSC9953_VCAP_POLICER_MAX2 161
#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \
OCELOT_PORT_MODE_SGMII | \
OCELOT_PORT_MODE_QSGMII)
static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
VSC9953_PORT_MODE_SERDES,
OCELOT_PORT_MODE_INTERNAL,
OCELOT_PORT_MODE_INTERNAL,
};
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
REG(ANA_VLANMASK, 0x00b504),
REG_RESERVED(ANA_PORT_B_DOMAIN),
REG(ANA_ANAGEFIL, 0x00b50c),
REG(ANA_ANEVENTS, 0x00b510),
REG(ANA_STORMLIMIT_BURST, 0x00b514),
REG(ANA_STORMLIMIT_CFG, 0x00b518),
REG(ANA_ISOLATED_PORTS, 0x00b528),
REG(ANA_COMMUNITY_PORTS, 0x00b52c),
REG(ANA_AUTOAGE, 0x00b530),
REG(ANA_MACTOPTIONS, 0x00b534),
REG(ANA_LEARNDISC, 0x00b538),
REG(ANA_AGENCTRL, 0x00b53c),
REG(ANA_MIRRORPORTS, 0x00b540),
REG(ANA_EMIRRORPORTS, 0x00b544),
REG(ANA_FLOODING, 0x00b548),
REG(ANA_FLOODING_IPMC, 0x00b54c),
REG(ANA_SFLOW_CFG, 0x00b550),
REG(ANA_PORT_MODE, 0x00b57c),
REG_RESERVED(ANA_CUT_THRU_CFG),
REG(ANA_PGID_PGID, 0x00b600),
REG(ANA_TABLES_ANMOVED, 0x00b4ac),
REG(ANA_TABLES_MACHDATA, 0x00b4b0),
REG(ANA_TABLES_MACLDATA, 0x00b4b4),
REG_RESERVED(ANA_TABLES_STREAMDATA),
REG(ANA_TABLES_MACACCESS, 0x00b4b8),
REG(ANA_TABLES_MACTINDX, 0x00b4bc),
REG(ANA_TABLES_VLANACCESS, 0x00b4c0),
REG(ANA_TABLES_VLANTIDX, 0x00b4c4),
REG_RESERVED(ANA_TABLES_ISDXACCESS),
REG_RESERVED(ANA_TABLES_ISDXTIDX),
REG(ANA_TABLES_ENTRYLIM, 0x00b480),
REG_RESERVED(ANA_TABLES_PTP_ID_HIGH),
REG_RESERVED(ANA_TABLES_PTP_ID_LOW),
REG_RESERVED(ANA_TABLES_STREAMACCESS),
REG_RESERVED(ANA_TABLES_STREAMTIDX),
REG_RESERVED(ANA_TABLES_SEQ_HISTORY),
REG_RESERVED(ANA_TABLES_SEQ_MASK),
REG_RESERVED(ANA_TABLES_SFID_MASK),
REG_RESERVED(ANA_TABLES_SFIDACCESS),
REG_RESERVED(ANA_TABLES_SFIDTIDX),
REG_RESERVED(ANA_MSTI_STATE),
REG_RESERVED(ANA_OAM_UPM_LM_CNT),
REG_RESERVED(ANA_SG_ACCESS_CTRL),
REG_RESERVED(ANA_SG_CONFIG_REG_1),
REG_RESERVED(ANA_SG_CONFIG_REG_2),
REG_RESERVED(ANA_SG_CONFIG_REG_3),
REG_RESERVED(ANA_SG_CONFIG_REG_4),
REG_RESERVED(ANA_SG_CONFIG_REG_5),
REG_RESERVED(ANA_SG_GCL_GS_CONFIG),
REG_RESERVED(ANA_SG_GCL_TI_CONFIG),
REG_RESERVED(ANA_SG_STATUS_REG_1),
REG_RESERVED(ANA_SG_STATUS_REG_2),
REG_RESERVED(ANA_SG_STATUS_REG_3),
REG(ANA_PORT_VLAN_CFG, 0x000000),
REG(ANA_PORT_DROP_CFG, 0x000004),
REG(ANA_PORT_QOS_CFG, 0x000008),
REG(ANA_PORT_VCAP_CFG, 0x00000c),
REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x000010),
REG(ANA_PORT_VCAP_S2_CFG, 0x00001c),
REG(ANA_PORT_PCP_DEI_MAP, 0x000020),
REG(ANA_PORT_CPU_FWD_CFG, 0x000060),
REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x000064),
REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x000068),
REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00006c),
REG(ANA_PORT_PORT_CFG, 0x000070),
REG(ANA_PORT_POL_CFG, 0x000074),
REG_RESERVED(ANA_PORT_PTP_CFG),
REG_RESERVED(ANA_PORT_PTP_DLY1_CFG),
REG_RESERVED(ANA_PORT_PTP_DLY2_CFG),
REG_RESERVED(ANA_PORT_SFID_CFG),
REG(ANA_PFC_PFC_CFG, 0x00c000),
REG_RESERVED(ANA_PFC_PFC_TIMER),
REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
REG_RESERVED(ANA_IPT_IPT),
REG_RESERVED(ANA_PPT_PPT),
REG_RESERVED(ANA_FID_MAP_FID_MAP),
REG(ANA_AGGR_CFG, 0x00c600),
REG(ANA_CPUQ_CFG, 0x00c604),
REG_RESERVED(ANA_CPUQ_CFG2),
REG(ANA_CPUQ_8021_CFG, 0x00c60c),
REG(ANA_DSCP_CFG, 0x00c64c),
REG(ANA_DSCP_REWR_CFG, 0x00c74c),
REG(ANA_VCAP_RNG_TYPE_CFG, 0x00c78c),
REG(ANA_VCAP_RNG_VAL_CFG, 0x00c7ac),
REG_RESERVED(ANA_VRAP_CFG),
REG_RESERVED(ANA_VRAP_HDR_DATA),
REG_RESERVED(ANA_VRAP_HDR_MASK),
REG(ANA_DISCARD_CFG, 0x00c7d8),
REG(ANA_FID_CFG, 0x00c7dc),
REG(ANA_POL_PIR_CFG, 0x00a000),
REG(ANA_POL_CIR_CFG, 0x00a004),
REG(ANA_POL_MODE_CFG, 0x00a008),
REG(ANA_POL_PIR_STATE, 0x00a00c),
REG(ANA_POL_CIR_STATE, 0x00a010),
REG_RESERVED(ANA_POL_STATE),
REG(ANA_POL_FLOWC, 0x00c280),
REG(ANA_POL_HYST, 0x00c2ec),
REG_RESERVED(ANA_POL_MISC_CFG),
};
static const u32 vsc9953_qs_regmap[] = {
REG(QS_XTR_GRP_CFG, 0x000000),
REG(QS_XTR_RD, 0x000008),
REG(QS_XTR_FRM_PRUNING, 0x000010),
REG(QS_XTR_FLUSH, 0x000018),
REG(QS_XTR_DATA_PRESENT, 0x00001c),
REG(QS_XTR_CFG, 0x000020),
REG(QS_INJ_GRP_CFG, 0x000024),
REG(QS_INJ_WR, 0x00002c),
REG(QS_INJ_CTRL, 0x000034),
REG(QS_INJ_STATUS, 0x00003c),
REG(QS_INJ_ERR, 0x000040),
REG_RESERVED(QS_INH_DBG),
};
static const u32 vsc9953_vcap_regmap[] = {
/* VCAP_CORE_CFG */
REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
REG(VCAP_CORE_MV_CFG, 0x000004),
/* VCAP_CORE_CACHE */
REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
REG(VCAP_CACHE_MASK_DAT, 0x000108),
REG(VCAP_CACHE_ACTION_DAT, 0x000208),
REG(VCAP_CACHE_CNT_DAT, 0x000308),
REG(VCAP_CACHE_TG_DAT, 0x000388),
/* VCAP_CONST */
REG(VCAP_CONST_VCAP_VER, 0x000398),
REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
REG_RESERVED(VCAP_CONST_CORE_CNT),
REG_RESERVED(VCAP_CONST_IF_CNT),
};
static const u32 vsc9953_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x003600),
REG(QSYS_SWITCH_PORT_MODE, 0x003630),
REG(QSYS_STAT_CNT_CFG, 0x00365c),
REG(QSYS_EEE_CFG, 0x003660),
REG(QSYS_EEE_THRES, 0x003688),
REG(QSYS_IGR_NO_SHARING, 0x00368c),
REG(QSYS_EGR_NO_SHARING, 0x003690),
REG(QSYS_SW_STATUS, 0x003694),
REG(QSYS_EXT_CPU_CFG, 0x0036c0),
REG_RESERVED(QSYS_PAD_CFG),
REG(QSYS_CPU_GROUP_MAP, 0x0036c8),
REG_RESERVED(QSYS_QMAP),
REG_RESERVED(QSYS_ISDX_SGRP),
REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
REG_RESERVED(QSYS_TFRM_MISC),
REG_RESERVED(QSYS_TFRM_PORT_DLY),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_1),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_2),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_3),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_4),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_5),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_6),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_7),
REG_RESERVED(QSYS_TFRM_TIMER_CFG_8),
REG(QSYS_RED_PROFILE, 0x003724),
REG(QSYS_RES_QOS_MODE, 0x003764),
REG(QSYS_RES_CFG, 0x004000),
REG(QSYS_RES_STAT, 0x004004),
REG(QSYS_EGR_DROP_MODE, 0x003768),
REG(QSYS_EQ_CTRL, 0x00376c),
REG_RESERVED(QSYS_EVENTS_CORE),
REG_RESERVED(QSYS_QMAXSDU_CFG_0),
REG_RESERVED(QSYS_QMAXSDU_CFG_1),
REG_RESERVED(QSYS_QMAXSDU_CFG_2),
REG_RESERVED(QSYS_QMAXSDU_CFG_3),
REG_RESERVED(QSYS_QMAXSDU_CFG_4),
REG_RESERVED(QSYS_QMAXSDU_CFG_5),
REG_RESERVED(QSYS_QMAXSDU_CFG_6),
REG_RESERVED(QSYS_QMAXSDU_CFG_7),
REG_RESERVED(QSYS_PREEMPTION_CFG),
REG(QSYS_CIR_CFG, 0x000000),
REG_RESERVED(QSYS_EIR_CFG),
REG(QSYS_SE_CFG, 0x000008),
REG(QSYS_SE_DWRR_CFG, 0x00000c),
REG_RESERVED(QSYS_SE_CONNECT),
REG_RESERVED(QSYS_SE_DLB_SENSE),
REG(QSYS_CIR_STATE, 0x000044),
REG_RESERVED(QSYS_EIR_STATE),
REG_RESERVED(QSYS_SE_STATE),
REG(QSYS_HSCH_MISC_CFG, 0x003774),
REG_RESERVED(QSYS_TAG_CONFIG),
REG_RESERVED(QSYS_TAS_PARAM_CFG_CTRL),
REG_RESERVED(QSYS_PORT_MAX_SDU),
REG_RESERVED(QSYS_PARAM_CFG_REG_1),
REG_RESERVED(QSYS_PARAM_CFG_REG_2),
REG_RESERVED(QSYS_PARAM_CFG_REG_3),
REG_RESERVED(QSYS_PARAM_CFG_REG_4),
REG_RESERVED(QSYS_PARAM_CFG_REG_5),
REG_RESERVED(QSYS_GCL_CFG_REG_1),
REG_RESERVED(QSYS_GCL_CFG_REG_2),
REG_RESERVED(QSYS_PARAM_STATUS_REG_1),
REG_RESERVED(QSYS_PARAM_STATUS_REG_2),
REG_RESERVED(QSYS_PARAM_STATUS_REG_3),
REG_RESERVED(QSYS_PARAM_STATUS_REG_4),
REG_RESERVED(QSYS_PARAM_STATUS_REG_5),
REG_RESERVED(QSYS_PARAM_STATUS_REG_6),
REG_RESERVED(QSYS_PARAM_STATUS_REG_7),
REG_RESERVED(QSYS_PARAM_STATUS_REG_8),
REG_RESERVED(QSYS_PARAM_STATUS_REG_9),
REG_RESERVED(QSYS_GCL_STATUS_REG_1),
REG_RESERVED(QSYS_GCL_STATUS_REG_2),
};
static const u32 vsc9953_rew_regmap[] = {
REG(REW_PORT_VLAN_CFG, 0x000000),
REG(REW_TAG_CFG, 0x000004),
REG(REW_PORT_CFG, 0x000008),
REG(REW_DSCP_CFG, 0x00000c),
REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
REG_RESERVED(REW_PTP_CFG),
REG_RESERVED(REW_PTP_DLY1_CFG),
REG_RESERVED(REW_RED_TAG_CFG),
REG(REW_DSCP_REMAP_DP1_CFG, 0x000610),
REG(REW_DSCP_REMAP_CFG, 0x000710),
REG_RESERVED(REW_STAT_CFG),
REG_RESERVED(REW_REW_STICKY),
REG_RESERVED(REW_PPT),
};
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
REG(SYS_COUNT_RX_256_511, 0x000030),
REG(SYS_COUNT_RX_512_1023, 0x000034),
REG(SYS_COUNT_RX_1024_1526, 0x000038),
REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
REG(SYS_COUNT_RX_PAUSE, 0x000040),
REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
REG(SYS_COUNT_TX_128_255, 0x000124),
REG(SYS_COUNT_TX_256_511, 0x000128),
REG(SYS_COUNT_TX_512_1023, 0x00012c),
REG(SYS_COUNT_TX_1024_1526, 0x000130),
REG(SYS_COUNT_TX_1527_MAX, 0x000134),
REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGED, 0x000178),
REG(SYS_COUNT_DROP_LOCAL, 0x000200),
REG(SYS_COUNT_DROP_TAIL, 0x000204),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
REG(SYS_PORT_MODE, 0x000324),
REG(SYS_FRONT_PORT_MODE, 0x000354),
REG(SYS_FRM_AGING, 0x00037c),
REG(SYS_STAT_CFG, 0x000380),
REG_RESERVED(SYS_SW_STATUS),
REG_RESERVED(SYS_MISC_CFG),
REG_RESERVED(SYS_REW_MAC_HIGH_CFG),
REG_RESERVED(SYS_REW_MAC_LOW_CFG),
REG_RESERVED(SYS_TIMESTAMP_OFFSET),
REG(SYS_PAUSE_CFG, 0x00044c),
REG(SYS_PAUSE_TOT_CFG, 0x000478),
REG(SYS_ATOP, 0x00047c),
REG(SYS_ATOP_TOT_CFG, 0x0004a8),
REG(SYS_MAC_FC_CFG, 0x0004ac),
REG(SYS_MMGT, 0x0004d4),
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
REG_RESERVED(SYS_PTP_CFG),
REG_RESERVED(SYS_RAM_INIT),
REG_RESERVED(SYS_CM_ADDR),
REG_RESERVED(SYS_CM_DATA_WR),
REG_RESERVED(SYS_CM_DATA_RD),
REG_RESERVED(SYS_CM_OP),
REG_RESERVED(SYS_CM_DATA),
};
static const u32 vsc9953_gcb_regmap[] = {
REG(GCB_SOFT_RST, 0x000008),
REG(GCB_MIIM_MII_STATUS, 0x0000ac),
REG(GCB_MIIM_MII_CMD, 0x0000b4),
REG(GCB_MIIM_MII_DATA, 0x0000b8),
};
static const u32 vsc9953_dev_gmii_regmap[] = {
REG(DEV_CLOCK_CFG, 0x0),
REG(DEV_PORT_MISC, 0x4),
REG_RESERVED(DEV_EVENTS),
REG(DEV_EEE_CFG, 0xc),
REG_RESERVED(DEV_RX_PATH_DELAY),
REG_RESERVED(DEV_TX_PATH_DELAY),
REG_RESERVED(DEV_PTP_PREDICT_CFG),
REG(DEV_MAC_ENA_CFG, 0x10),
REG(DEV_MAC_MODE_CFG, 0x14),
REG(DEV_MAC_MAXLEN_CFG, 0x18),
REG(DEV_MAC_TAGS_CFG, 0x1c),
REG(DEV_MAC_ADV_CHK_CFG, 0x20),
REG(DEV_MAC_IFG_CFG, 0x24),
REG(DEV_MAC_HDX_CFG, 0x28),
REG_RESERVED(DEV_MAC_DBG_CFG),
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x30),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x34),
REG(DEV_MAC_STICKY, 0x38),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
REG_RESERVED(PCS1G_ANEG_CFG),
REG_RESERVED(PCS1G_ANEG_NP_CFG),
REG_RESERVED(PCS1G_LB_CFG),
REG_RESERVED(PCS1G_DBG_CFG),
REG_RESERVED(PCS1G_CDET_CFG),
REG_RESERVED(PCS1G_ANEG_STATUS),
REG_RESERVED(PCS1G_ANEG_NP_STATUS),
REG_RESERVED(PCS1G_LINK_STATUS),
REG_RESERVED(PCS1G_LINK_DOWN_CNT),
REG_RESERVED(PCS1G_STICKY),
REG_RESERVED(PCS1G_DEBUG_STATUS),
REG_RESERVED(PCS1G_LPI_CFG),
REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT),
REG_RESERVED(PCS1G_LPI_STATUS),
REG_RESERVED(PCS1G_TSTPAT_MODE_CFG),
REG_RESERVED(PCS1G_TSTPAT_STATUS),
REG_RESERVED(DEV_PCS_FX100_CFG),
REG_RESERVED(DEV_PCS_FX100_STATUS),
};
static const u32 *vsc9953_regmap[TARGET_MAX] = {
[ANA] = vsc9953_ana_regmap,
[QS] = vsc9953_qs_regmap,
[QSYS] = vsc9953_qsys_regmap,
[REW] = vsc9953_rew_regmap,
[SYS] = vsc9953_sys_regmap,
[S0] = vsc9953_vcap_regmap,
[S1] = vsc9953_vcap_regmap,
[S2] = vsc9953_vcap_regmap,
[GCB] = vsc9953_gcb_regmap,
[DEV_GMII] = vsc9953_dev_gmii_regmap,
};
/* Addresses are relative to the device's base address */
static const struct resource vsc9953_resources[] = {
DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
static const char * const vsc9953_resource_names[TARGET_MAX] = {
[SYS] = "sys",
[REW] = "rew",
[S0] = "s0",
[S1] = "s1",
[S2] = "s2",
[GCB] = "devcpu_gcb",
[QS] = "qs",
[PTP] = "ptp",
[QSYS] = "qsys",
[ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 10, 10),
[ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 9),
[ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
[ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
[ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
[ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
[ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
[ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
[ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
[ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
[ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
[ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
[ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
[ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
[ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
[ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
[ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
[ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
[ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
[ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
[ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
[ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
[ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
[ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
[ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
[ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
[ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
[ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
[SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 7, 7),
[SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 6, 6),
[SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 5, 5),
[GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
[GCB_MIIM_MII_STATUS_PENDING] = REG_FIELD(GCB_MIIM_MII_STATUS, 2, 2),
[GCB_MIIM_MII_STATUS_BUSY] = REG_FIELD(GCB_MIIM_MII_STATUS, 3, 3),
/* Replicated per number of ports (11), register size 4 per port */
[QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 13, 13, 11, 4),
[QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4),
[QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4),
[QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4),
[QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4),
[SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 4, 5, 11, 4),
[SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 2, 3, 11, 4),
[SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4),
[SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 11, 20, 11, 4),
[SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 10, 11, 4),
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4},
[VCAP_ES0_IGR_PORT] = { 4, 4},
[VCAP_ES0_RSV] = { 8, 2},
[VCAP_ES0_L2_MC] = { 10, 1},
[VCAP_ES0_L2_BC] = { 11, 1},
[VCAP_ES0_VID] = { 12, 12},
[VCAP_ES0_DP] = { 24, 1},
[VCAP_ES0_PCP] = { 25, 3},
};
static const struct vcap_field vsc9953_vcap_es0_actions[] = {
[VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
[VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
[VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
[VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
[VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
[VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
[VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
[VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
[VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
[VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
[VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
[VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
[VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
[VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
[VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
[VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
[VCAP_ES0_ACT_RSV] = { 49, 24},
[VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
};
static const struct vcap_field vsc9953_vcap_is1_keys[] = {
[VCAP_IS1_HK_TYPE] = { 0, 1},
[VCAP_IS1_HK_LOOKUP] = { 1, 2},
[VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 11},
[VCAP_IS1_HK_RSV] = { 14, 10},
/* VCAP_IS1_HK_OAM_Y1731 not supported */
[VCAP_IS1_HK_L2_MC] = { 24, 1},
[VCAP_IS1_HK_L2_BC] = { 25, 1},
[VCAP_IS1_HK_IP_MC] = { 26, 1},
[VCAP_IS1_HK_VLAN_TAGGED] = { 27, 1},
[VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 28, 1},
[VCAP_IS1_HK_TPID] = { 29, 1},
[VCAP_IS1_HK_VID] = { 30, 12},
[VCAP_IS1_HK_DEI] = { 42, 1},
[VCAP_IS1_HK_PCP] = { 43, 3},
/* Specific Fields for IS1 Half Key S1_NORMAL */
[VCAP_IS1_HK_L2_SMAC] = { 46, 48},
[VCAP_IS1_HK_ETYPE_LEN] = { 94, 1},
[VCAP_IS1_HK_ETYPE] = { 95, 16},
[VCAP_IS1_HK_IP_SNAP] = {111, 1},
[VCAP_IS1_HK_IP4] = {112, 1},
/* Layer-3 Information */
[VCAP_IS1_HK_L3_FRAGMENT] = {113, 1},
[VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {114, 1},
[VCAP_IS1_HK_L3_OPTIONS] = {115, 1},
[VCAP_IS1_HK_L3_DSCP] = {116, 6},
[VCAP_IS1_HK_L3_IP4_SIP] = {122, 32},
/* Layer-4 Information */
[VCAP_IS1_HK_TCP_UDP] = {154, 1},
[VCAP_IS1_HK_TCP] = {155, 1},
[VCAP_IS1_HK_L4_SPORT] = {156, 16},
[VCAP_IS1_HK_L4_RNG] = {172, 8},
/* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
[VCAP_IS1_HK_IP4_INNER_TPID] = { 46, 1},
[VCAP_IS1_HK_IP4_INNER_VID] = { 47, 12},
[VCAP_IS1_HK_IP4_INNER_DEI] = { 59, 1},
[VCAP_IS1_HK_IP4_INNER_PCP] = { 60, 3},
[VCAP_IS1_HK_IP4_IP4] = { 63, 1},
[VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 64, 1},
[VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 65, 1},
[VCAP_IS1_HK_IP4_L3_OPTIONS] = { 66, 1},
[VCAP_IS1_HK_IP4_L3_DSCP] = { 67, 6},
[VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 73, 32},
[VCAP_IS1_HK_IP4_L3_IP4_SIP] = {105, 32},
[VCAP_IS1_HK_IP4_L3_PROTO] = {137, 8},
[VCAP_IS1_HK_IP4_TCP_UDP] = {145, 1},
[VCAP_IS1_HK_IP4_TCP] = {146, 1},
[VCAP_IS1_HK_IP4_L4_RNG] = {147, 8},
[VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {155, 32},
};
static const struct vcap_field vsc9953_vcap_is1_actions[] = {
[VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
[VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
[VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
[VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
[VCAP_IS1_ACT_DP_ENA] = { 11, 1},
[VCAP_IS1_ACT_DP_VAL] = { 12, 1},
[VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
[VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
[VCAP_IS1_ACT_RSV] = { 29, 11},
[VCAP_IS1_ACT_VID_REPLACE_ENA] = { 40, 1},
[VCAP_IS1_ACT_VID_ADD_VAL] = { 41, 12},
[VCAP_IS1_ACT_FID_SEL] = { 53, 2},
[VCAP_IS1_ACT_FID_VAL] = { 55, 13},
[VCAP_IS1_ACT_PCP_DEI_ENA] = { 68, 1},
[VCAP_IS1_ACT_PCP_VAL] = { 69, 3},
[VCAP_IS1_ACT_DEI_VAL] = { 72, 1},
[VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 73, 1},
[VCAP_IS1_ACT_VLAN_POP_CNT] = { 74, 2},
[VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 76, 4},
[VCAP_IS1_ACT_HIT_STICKY] = { 80, 1},
};
static struct vcap_field vsc9953_vcap_is2_keys[] = {
/* Common: 41 bits */
[VCAP_IS2_TYPE] = { 0, 4},
[VCAP_IS2_HK_FIRST] = { 4, 1},
[VCAP_IS2_HK_PAG] = { 5, 8},
[VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 11},
[VCAP_IS2_HK_RSV2] = { 24, 1},
[VCAP_IS2_HK_HOST_MATCH] = { 25, 1},
[VCAP_IS2_HK_L2_MC] = { 26, 1},
[VCAP_IS2_HK_L2_BC] = { 27, 1},
[VCAP_IS2_HK_VLAN_TAGGED] = { 28, 1},
[VCAP_IS2_HK_VID] = { 29, 12},
[VCAP_IS2_HK_DEI] = { 41, 1},
[VCAP_IS2_HK_PCP] = { 42, 3},
/* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
[VCAP_IS2_HK_L2_DMAC] = { 45, 48},
[VCAP_IS2_HK_L2_SMAC] = { 93, 48},
/* MAC_ETYPE (TYPE=000) */
[VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {141, 16},
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {157, 16},
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {173, 8},
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {181, 3},
/* MAC_LLC (TYPE=001) */
[VCAP_IS2_HK_MAC_LLC_L2_LLC] = {141, 40},
/* MAC_SNAP (TYPE=010) */
[VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {141, 40},
/* MAC_ARP (TYPE=011) */
[VCAP_IS2_HK_MAC_ARP_SMAC] = { 45, 48},
[VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 93, 1},
[VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 94, 1},
[VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 95, 1},
[VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 96, 1},
[VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 97, 1},
[VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 98, 1},
[VCAP_IS2_HK_MAC_ARP_OPCODE] = { 99, 2},
[VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {101, 32},
[VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {133, 32},
[VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {165, 1},
/* IP4_TCP_UDP / IP4_OTHER common */
[VCAP_IS2_HK_IP4] = { 45, 1},
[VCAP_IS2_HK_L3_FRAGMENT] = { 46, 1},
[VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 47, 1},
[VCAP_IS2_HK_L3_OPTIONS] = { 48, 1},
[VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 49, 1},
[VCAP_IS2_HK_L3_TOS] = { 50, 8},
[VCAP_IS2_HK_L3_IP4_DIP] = { 58, 32},
[VCAP_IS2_HK_L3_IP4_SIP] = { 90, 32},
[VCAP_IS2_HK_DIP_EQ_SIP] = {122, 1},
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = {123, 1},
[VCAP_IS2_HK_L4_DPORT] = {124, 16},
[VCAP_IS2_HK_L4_SPORT] = {140, 16},
[VCAP_IS2_HK_L4_RNG] = {156, 8},
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {164, 1},
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {165, 1},
[VCAP_IS2_HK_L4_FIN] = {166, 1},
[VCAP_IS2_HK_L4_SYN] = {167, 1},
[VCAP_IS2_HK_L4_RST] = {168, 1},
[VCAP_IS2_HK_L4_PSH] = {169, 1},
[VCAP_IS2_HK_L4_ACK] = {170, 1},
[VCAP_IS2_HK_L4_URG] = {171, 1},
/* IP4_OTHER (TYPE=101) */
[VCAP_IS2_HK_IP4_L3_PROTO] = {123, 8},
[VCAP_IS2_HK_L3_PAYLOAD] = {131, 56},
/* IP6_STD (TYPE=110) */
[VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 45, 1},
[VCAP_IS2_HK_L3_IP6_SIP] = { 46, 128},
[VCAP_IS2_HK_IP6_L3_PROTO] = {174, 8},
};
static struct vcap_field vsc9953_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
[VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
[VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
[VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
[VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
[VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
[VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
[VCAP_IS2_ACT_POLICE_IDX] = { 10, 8},
[VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 21, 1},
[VCAP_IS2_ACT_PORT_MASK] = { 22, 10},
[VCAP_IS2_ACT_ACL_ID] = { 44, 6},
[VCAP_IS2_ACT_HIT_CNT] = { 50, 32},
};
static struct vcap_props vsc9953_vcap_props[] = {
[VCAP_ES0] = {
.action_type_width = 0,
.action_table = {
[ES0_ACTION_TYPE_NORMAL] = {
.width = 73, /* HIT_STICKY not included */
.count = 1,
},
},
.target = S0,
.keys = vsc9953_vcap_es0_keys,
.actions = vsc9953_vcap_es0_actions,
},
[VCAP_IS1] = {
.action_type_width = 0,
.action_table = {
[IS1_ACTION_TYPE_NORMAL] = {
.width = 80, /* HIT_STICKY not included */
.count = 4,
},
},
.target = S1,
.keys = vsc9953_vcap_is1_keys,
.actions = vsc9953_vcap_is1_actions,
},
[VCAP_IS2] = {
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
.width = 50, /* HIT_CNT not included */
.count = 2
},
[IS2_ACTION_TYPE_SMAC_SIP] = {
.width = 6,
.count = 4
},
},
.target = S2,
.keys = vsc9953_vcap_is2_keys,
.actions = vsc9953_vcap_is2_actions,
},
};
#define VSC9953_INIT_TIMEOUT 50000
#define VSC9953_GCB_RST_SLEEP 100
#define VSC9953_SYS_RAMINIT_SLEEP 80
static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
{
int val;
ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
return val;
}
static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
{
int val;
ocelot_field_read(ocelot, SYS_RESET_CFG_MEM_INIT, &val);
return val;
}
/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
* MEM_INIT is in SYS:SYSTEM:RESET_CFG
* MEM_ENA is in SYS:SYSTEM:RESET_CFG
*/
static int vsc9953_reset(struct ocelot *ocelot)
{
int val, err;
/* soft-reset the switch core */
ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
err = readx_poll_timeout(vsc9953_gcb_soft_rst_status, ocelot, val, !val,
VSC9953_GCB_RST_SLEEP, VSC9953_INIT_TIMEOUT);
if (err) {
dev_err(ocelot->dev, "timeout: switch core reset\n");
return err;
}
/* initialize switch mem ~40us */
ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val,
VSC9953_SYS_RAMINIT_SLEEP,
VSC9953_INIT_TIMEOUT);
if (err) {
dev_err(ocelot->dev, "timeout: switch sram init\n");
return err;
}
/* enable switch core */
ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
}
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
*/
static u16 vsc9953_wm_enc(u16 value)
{
WARN_ON(value >= 16 * BIT(9));
if (value >= BIT(9))
return BIT(9) | (value / 16);
return value;
}
static u16 vsc9953_wm_dec(u16 wm)
{
WARN_ON(wm & ~GENMASK(9, 0));
if (wm & BIT(9))
return (wm & GENMASK(8, 0)) * 16;
return wm;
}
static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
{
*inuse = (val & GENMASK(25, 13)) >> 13;
*maxuse = val & GENMASK(12, 0);
}
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
.wm_dec = vsc9953_wm_dec,
.wm_stat = vsc9953_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct device *dev = ocelot->dev;
struct mii_bus *bus;
int port;
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
ocelot->targets[GCB],
ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
true);
if (rc) {
dev_err(dev, "failed to setup MDIO bus\n");
return rc;
}
/* Needed in order to initialize the bus mutex lock */
rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
}
felix->imdio = bus;
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct phylink_pcs *phylink_pcs;
int addr = port + 4;
if (dsa_is_unused_port(felix->ds, port))
continue;
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, addr);
if (IS_ERR(phylink_pcs))
continue;
felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
return 0;
}
static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct phylink_pcs *phylink_pcs = felix->pcs[port];
if (phylink_pcs)
lynx_pcs_destroy(phylink_pcs);
}
/* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
.resources = vsc9953_resources,
.num_resources = ARRAY_SIZE(vsc9953_resources),
.resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
.vcap = vsc9953_vcap_props,
.vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.port_modes = vsc9953_port_modes,
};
static int seville_probe(struct platform_device *pdev)
{
struct dsa_switch *ds;
struct ocelot *ocelot;
struct resource *res;
struct felix *felix;
int err;
felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
if (!felix) {
err = -ENOMEM;
dev_err(&pdev->dev, "Failed to allocate driver memory\n");
goto err_alloc_felix;
}
platform_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = 1;
felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
dev_err(&pdev->dev, "Invalid resource\n");
goto err_alloc_felix;
}
felix->switch_base = res->start;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
if (!ds) {
err = -ENOMEM;
dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
goto err_alloc_ds;
}
ds->dev = &pdev->dev;
ds->num_ports = felix->info->num_ports;
ds->ops = &felix_switch_ops;
ds->priv = ocelot;
felix->ds = ds;
felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
err = dsa_register_switch(ds);
if (err) {
dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
goto err_register_ds;
}
return 0;
err_register_ds:
kfree(ds);
err_alloc_ds:
err_alloc_felix:
kfree(felix);
return err;
}
static int seville_remove(struct platform_device *pdev)
{
struct felix *felix = platform_get_drvdata(pdev);
if (!felix)
return 0;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
return 0;
}
static void seville_shutdown(struct platform_device *pdev)
{
struct felix *felix = platform_get_drvdata(pdev);
if (!felix)
return;
dsa_switch_shutdown(felix->ds);
platform_set_drvdata(pdev, NULL);
}
static const struct of_device_id seville_of_match[] = {
{ .compatible = "mscc,vsc9953-switch" },
{ },
};
MODULE_DEVICE_TABLE(of, seville_of_match);
static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
.shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
.of_match_table = seville_of_match,
},
};
module_platform_driver(seville_vsc9953_driver);
MODULE_DESCRIPTION("Seville Switch driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/ocelot/seville_vsc9953.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 NovaTech LLC
* George McCollister <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/of.h>
#include "xrs700x.h"
#include "xrs700x_reg.h"
#define XRS_MDIO_IBA0 0x10
#define XRS_MDIO_IBA1 0x11
#define XRS_MDIO_IBD 0x14
#define XRS_IB_READ 0x0
#define XRS_IB_WRITE 0x1
static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct mdio_device *mdiodev = context;
struct device *dev = &mdiodev->dev;
u16 uval;
int ret;
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
ret = mdiodev_read(mdiodev, XRS_MDIO_IBD);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
return ret;
}
*val = (unsigned int)ret;
return 0;
}
static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct mdio_device *mdiodev = context;
struct device *dev = &mdiodev->dev;
u16 uval;
int ret;
ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
return 0;
}
static const struct regmap_config xrs700x_mdio_regmap_config = {
.val_bits = 16,
.reg_stride = 2,
.reg_bits = 32,
.pad_bits = 0,
.write_flag_mask = 0,
.read_flag_mask = 0,
.reg_read = xrs700x_mdio_reg_read,
.reg_write = xrs700x_mdio_reg_write,
.max_register = XRS_VLAN(VLAN_N_VID - 1),
.cache_type = REGCACHE_NONE,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.val_format_endian = REGMAP_ENDIAN_BIG
};
static int xrs700x_mdio_probe(struct mdio_device *mdiodev)
{
struct xrs700x *priv;
int ret;
priv = xrs700x_switch_alloc(&mdiodev->dev, mdiodev);
if (!priv)
return -ENOMEM;
priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev,
&xrs700x_mdio_regmap_config);
if (IS_ERR(priv->regmap)) {
ret = PTR_ERR(priv->regmap);
dev_err(&mdiodev->dev, "Failed to initialize regmap: %d\n", ret);
return ret;
}
dev_set_drvdata(&mdiodev->dev, priv);
ret = xrs700x_switch_register(priv);
/* Main DSA driver may not be started yet. */
if (ret)
return ret;
return 0;
}
static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
{
struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
xrs700x_switch_remove(priv);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
{
struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
xrs700x_switch_shutdown(priv);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
{ .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
{ .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
{ .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
{ .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
{},
};
MODULE_DEVICE_TABLE(of, xrs700x_mdio_dt_ids);
static struct mdio_driver xrs700x_mdio_driver = {
.mdiodrv.driver = {
.name = "xrs700x-mdio",
.of_match_table = of_match_ptr(xrs700x_mdio_dt_ids),
},
.probe = xrs700x_mdio_probe,
.remove = xrs700x_mdio_remove,
.shutdown = xrs700x_mdio_shutdown,
};
mdio_module_driver(xrs700x_mdio_driver);
MODULE_AUTHOR("George McCollister <[email protected]>");
MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA MDIO driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/xrs700x/xrs700x_mdio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 NovaTech LLC
* George McCollister <[email protected]>
*/
#include <net/dsa.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/of.h>
#include <linux/netdev_features.h>
#include <linux/if_hsr.h>
#include "xrs700x.h"
#include "xrs700x_reg.h"
#define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
#define XRS7000X_SUPPORTED_HSR_FEATURES \
(NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
#define XRS7003E_ID 0x100
#define XRS7003F_ID 0x101
#define XRS7004E_ID 0x200
#define XRS7004F_ID 0x201
const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
EXPORT_SYMBOL(xrs7003e_info);
const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
EXPORT_SYMBOL(xrs7003f_info);
const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
EXPORT_SYMBOL(xrs7004e_info);
const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
EXPORT_SYMBOL(xrs7004f_info);
struct xrs700x_regfield {
struct reg_field rf;
struct regmap_field **rmf;
};
struct xrs700x_mib {
unsigned int offset;
const char *name;
int stats64_offset;
};
#define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
#define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
static const struct xrs700x_mib xrs700x_mibs[] = {
XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
};
static const u8 eth_hsrsup_addr[ETH_ALEN] = {
0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
static void xrs700x_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
}
static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
return ARRAY_SIZE(xrs700x_mibs);
}
static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
mutex_lock(&p->mib_mutex);
/* Capture counter values */
regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
unsigned int high = 0, low = 0, reg;
reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
regmap_read(priv->regmap, reg, &low);
regmap_read(priv->regmap, reg + 2, &high);
p->mib_data[i] += (high << 16) | low;
if (xrs700x_mibs[i].stats64_offset >= 0) {
u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
*(u64 *)s += p->mib_data[i];
}
}
/* multicast must be added to rx_packets (which already includes
* unicast and broadcast)
*/
stats.rx_packets += stats.multicast;
flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
static void xrs700x_mib_work(struct work_struct *work)
{
struct xrs700x *priv = container_of(work, struct xrs700x,
mib_work.work);
int i;
for (i = 0; i < priv->ds->num_ports; i++)
xrs700x_read_port_counters(priv, i);
schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
}
static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
u64 *data)
{
struct xrs700x *priv = ds->priv;
struct xrs700x_port *p = &priv->ports[port];
xrs700x_read_port_counters(priv, port);
mutex_lock(&p->mib_mutex);
memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
mutex_unlock(&p->mib_mutex);
}
static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
struct xrs700x *priv = ds->priv;
struct xrs700x_port *p = &priv->ports[port];
unsigned int start;
do {
start = u64_stats_fetch_begin(&p->syncp);
*s = p->stats64;
} while (u64_stats_fetch_retry(&p->syncp, start));
}
static int xrs700x_setup_regmap_range(struct xrs700x *priv)
{
struct xrs700x_regfield regfields[] = {
{
.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
priv->ds->num_ports,
XRS_PORT_OFFSET),
.rmf = &priv->ps_forward
},
{
.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
priv->ds->num_ports,
XRS_PORT_OFFSET),
.rmf = &priv->ps_management
},
{
.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
priv->ds->num_ports,
XRS_PORT_OFFSET),
.rmf = &priv->ps_sel_speed
},
{
.rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
priv->ds->num_ports,
XRS_PORT_OFFSET),
.rmf = &priv->ps_cur_speed
}
};
int i = 0;
for (; i < ARRAY_SIZE(regfields); i++) {
*regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
priv->regmap,
regfields[i].rf);
if (IS_ERR(*regfields[i].rmf))
return PTR_ERR(*regfields[i].rmf);
}
return 0;
}
static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol m)
{
return DSA_TAG_PROTO_XRS700X;
}
static int xrs700x_reset(struct dsa_switch *ds)
{
struct xrs700x *priv = ds->priv;
unsigned int val;
int ret;
ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
if (ret)
goto error;
ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
val, !(val & XRS_GENERAL_RESET),
10, 1000);
error:
if (ret) {
dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
ret);
}
return ret;
}
static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
struct xrs700x *priv = ds->priv;
unsigned int bpdus = 1;
unsigned int val;
switch (state) {
case BR_STATE_DISABLED:
bpdus = 0;
fallthrough;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
val = XRS_PORT_DISABLED;
break;
case BR_STATE_LEARNING:
val = XRS_PORT_LEARNING;
break;
case BR_STATE_FORWARDING:
val = XRS_PORT_FORWARDING;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
return;
}
regmap_fields_write(priv->ps_forward, port, val);
/* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
* which allows BPDU forwarding to the CPU port when the front facing
* port is in disabled/learning state.
*/
regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
__func__, port, state, val);
}
/* Add an inbound policy filter which matches the BPDU destination MAC
* and forwards to the CPU port. Leave the policy disabled, it will be
* enabled as needed.
*/
static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
{
struct xrs700x *priv = ds->priv;
unsigned int val = 0;
int i = 0;
int ret;
/* Compare all 48 bits of the destination MAC address. */
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
if (ret)
return ret;
/* match BPDU destination 01:80:c2:00:00:00 */
for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
eth_stp_addr[i] |
(eth_stp_addr[i + 1] << 8));
if (ret)
return ret;
}
/* Mirror BPDU to CPU port */
for (i = 0; i < ds->num_ports; i++) {
if (dsa_is_cpu_port(ds, i))
val |= BIT(i);
}
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
if (ret)
return ret;
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
if (ret)
return ret;
return 0;
}
/* Add an inbound policy filter which matches the HSR/PRP supervision MAC
* range and forwards to the CPU port without discarding duplicates.
* This is required to correctly populate the HSR/PRP node_table.
* Leave the policy disabled, it will be enabled as needed.
*/
static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
int fwdport)
{
struct xrs700x *priv = ds->priv;
unsigned int val = 0;
int i = 0;
int ret;
/* Compare 40 bits of the destination MAC address. */
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
if (ret)
return ret;
/* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
eth_hsrsup_addr[i] |
(eth_hsrsup_addr[i + 1] << 8));
if (ret)
return ret;
}
/* Mirror HSR/PRP supervision to CPU port */
for (i = 0; i < ds->num_ports; i++) {
if (dsa_is_cpu_port(ds, i))
val |= BIT(i);
}
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
if (ret)
return ret;
if (fwdport >= 0)
val |= BIT(fwdport);
/* Allow must be set prevent duplicate discard */
ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
if (ret)
return ret;
return 0;
}
static int xrs700x_port_setup(struct dsa_switch *ds, int port)
{
bool cpu_port = dsa_is_cpu_port(ds, port);
struct xrs700x *priv = ds->priv;
unsigned int val = 0;
int ret, i;
xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
/* Disable forwarding to non-CPU ports */
for (i = 0; i < ds->num_ports; i++) {
if (!dsa_is_cpu_port(ds, i))
val |= BIT(i);
}
/* 1 = Disable forwarding to the port */
ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
if (ret)
return ret;
val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
ret = regmap_fields_write(priv->ps_management, port, val);
if (ret)
return ret;
if (!cpu_port) {
ret = xrs700x_port_add_bpdu_ipf(ds, port);
if (ret)
return ret;
}
return 0;
}
static int xrs700x_setup(struct dsa_switch *ds)
{
struct xrs700x *priv = ds->priv;
int ret, i;
ret = xrs700x_reset(ds);
if (ret)
return ret;
for (i = 0; i < ds->num_ports; i++) {
ret = xrs700x_port_setup(ds, i);
if (ret)
return ret;
}
schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
return 0;
}
static void xrs700x_teardown(struct dsa_switch *ds)
{
struct xrs700x *priv = ds->priv;
cancel_delayed_work_sync(&priv->mib_work);
}
static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0:
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
config->mac_capabilities = MAC_10FD | MAC_100FD;
break;
case 1:
case 2:
case 3:
phy_interface_set_rgmii(config->supported_interfaces);
config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
break;
default:
dev_err(ds->dev, "Unsupported port: %i\n", port);
break;
}
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct xrs700x *priv = ds->priv;
unsigned int val;
switch (speed) {
case SPEED_1000:
val = XRS_PORT_SPEED_1000;
break;
case SPEED_100:
val = XRS_PORT_SPEED_100;
break;
case SPEED_10:
val = XRS_PORT_SPEED_10;
break;
default:
return;
}
regmap_fields_write(priv->ps_sel_speed, port, val);
dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
__func__, port, mode, speed);
}
static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool join)
{
unsigned int i, cpu_mask = 0, mask = 0;
struct xrs700x *priv = ds->priv;
int ret;
for (i = 0; i < ds->num_ports; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
cpu_mask |= BIT(i);
if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
mask |= BIT(i);
}
for (i = 0; i < ds->num_ports; i++) {
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* 1 = Disable forwarding to the port */
ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
if (ret)
return ret;
}
if (!join) {
ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
cpu_mask);
if (ret)
return ret;
}
return 0;
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
xrs700x_bridge_common(ds, port, bridge, false);
}
static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
struct net_device *hsr)
{
unsigned int val = XRS_HSR_CFG_HSR_PRP;
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
struct net_device *slave;
int ret, i, hsr_pair[2];
enum hsr_version ver;
bool fwd = false;
ret = hsr_get_version(hsr, &ver);
if (ret)
return ret;
/* Only ports 1 and 2 can be HSR/PRP redundant ports. */
if (port != 1 && port != 2)
return -EOPNOTSUPP;
if (ver == HSR_V1)
val |= XRS_HSR_CFG_HSR;
else if (ver == PRP_V1)
val |= XRS_HSR_CFG_PRP;
else
return -EOPNOTSUPP;
dsa_hsr_foreach_port(dp, ds, hsr) {
if (dp->index != port) {
partner = dp;
break;
}
}
/* We can't enable redundancy on the switch until both
* redundant ports have signed up.
*/
if (!partner)
return 0;
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_DISABLED);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
val | XRS_HSR_CFG_LANID_A);
regmap_write(priv->regmap, XRS_HSR_CFG(port),
val | XRS_HSR_CFG_LANID_B);
/* Clear bits for both redundant ports (HSR only) and the CPU port to
* enable forwarding.
*/
val = GENMASK(ds->num_ports - 1, 0);
if (ver == HSR_V1) {
val &= ~BIT(partner->index);
val &= ~BIT(port);
fwd = true;
}
val &= ~BIT(dsa_upstream_port(ds, port));
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
/* Enable inbound policy which allows HSR/PRP supervision forwarding
* to the CPU port without discarding duplicates. Continue to
* forward to redundant ports when in HSR mode while discarding
* duplicates.
*/
ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
if (ret)
return ret;
ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
if (ret)
return ret;
regmap_update_bits(priv->regmap,
XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
slave = dsa_to_port(ds, hsr_pair[i])->slave;
slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
}
static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
struct net_device *hsr)
{
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
struct net_device *slave;
int i, hsr_pair[2];
unsigned int val;
dsa_hsr_foreach_port(dp, ds, hsr) {
if (dp->index != port) {
partner = dp;
break;
}
}
if (!partner)
return 0;
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_DISABLED);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
/* Clear bit for the CPU port to enable forwarding. */
val = GENMASK(ds->num_ports - 1, 0);
val &= ~BIT(dsa_upstream_port(ds, port));
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
regmap_fields_write(priv->ps_forward, partner->index,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
/* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
* which allows HSR/PRP supervision forwarding to the CPU port without
* discarding duplicates.
*/
regmap_update_bits(priv->regmap,
XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
slave = dsa_to_port(ds, hsr_pair[i])->slave;
slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
}
static const struct dsa_switch_ops xrs700x_ops = {
.get_tag_protocol = xrs700x_get_tag_protocol,
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
.phylink_get_caps = xrs700x_phylink_get_caps,
.phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
.get_ethtool_stats = xrs700x_get_ethtool_stats,
.get_stats64 = xrs700x_get_stats64,
.port_bridge_join = xrs700x_bridge_join,
.port_bridge_leave = xrs700x_bridge_leave,
.port_hsr_join = xrs700x_hsr_join,
.port_hsr_leave = xrs700x_hsr_leave,
};
static int xrs700x_detect(struct xrs700x *priv)
{
const struct xrs700x_info *info;
unsigned int id;
int ret;
ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
if (ret) {
dev_err(priv->dev, "error %d while reading switch id.\n",
ret);
return ret;
}
info = of_device_get_match_data(priv->dev);
if (!info)
return -EINVAL;
if (info->id == id) {
priv->ds->num_ports = info->num_ports;
dev_info(priv->dev, "%s detected.\n", info->name);
return 0;
}
dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
info->id, id);
return -ENODEV;
}
struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
{
struct dsa_switch *ds;
struct xrs700x *priv;
ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
ds->dev = base;
priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
ds->ops = &xrs700x_ops;
ds->priv = priv;
priv->dev = base;
priv->ds = ds;
priv->priv = devpriv;
return priv;
}
EXPORT_SYMBOL(xrs700x_switch_alloc);
static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
sizeof(*p->mib_data), GFP_KERNEL);
if (!p->mib_data)
return -ENOMEM;
mutex_init(&p->mib_mutex);
u64_stats_init(&p->syncp);
return 0;
}
int xrs700x_switch_register(struct xrs700x *priv)
{
int ret;
int i;
ret = xrs700x_detect(priv);
if (ret)
return ret;
ret = xrs700x_setup_regmap_range(priv);
if (ret)
return ret;
priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
sizeof(*priv->ports), GFP_KERNEL);
if (!priv->ports)
return -ENOMEM;
for (i = 0; i < priv->ds->num_ports; i++) {
ret = xrs700x_alloc_port_mib(priv, i);
if (ret)
return ret;
}
return dsa_register_switch(priv->ds);
}
EXPORT_SYMBOL(xrs700x_switch_register);
void xrs700x_switch_remove(struct xrs700x *priv)
{
dsa_unregister_switch(priv->ds);
}
EXPORT_SYMBOL(xrs700x_switch_remove);
void xrs700x_switch_shutdown(struct xrs700x *priv)
{
dsa_switch_shutdown(priv->ds);
}
EXPORT_SYMBOL(xrs700x_switch_shutdown);
MODULE_AUTHOR("George McCollister <[email protected]>");
MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/xrs700x/xrs700x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 NovaTech LLC
* George McCollister <[email protected]>
*/
#include <linux/bits.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include "xrs700x.h"
#include "xrs700x_reg.h"
struct xrs700x_i2c_cmd {
__be32 reg;
__be16 val;
} __packed;
static int xrs700x_i2c_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct xrs700x_i2c_cmd cmd;
int ret;
cmd.reg = cpu_to_be32(reg | 1);
ret = i2c_master_send(i2c, (char *)&cmd.reg, sizeof(cmd.reg));
if (ret < 0) {
dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
return ret;
}
ret = i2c_master_recv(i2c, (char *)&cmd.val, sizeof(cmd.val));
if (ret < 0) {
dev_err(dev, "xrs i2c_master_recv returned %d\n", ret);
return ret;
}
*val = be16_to_cpu(cmd.val);
return 0;
}
static int xrs700x_i2c_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct xrs700x_i2c_cmd cmd;
int ret;
cmd.reg = cpu_to_be32(reg);
cmd.val = cpu_to_be16(val);
ret = i2c_master_send(i2c, (char *)&cmd, sizeof(cmd));
if (ret < 0) {
dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
return ret;
}
return 0;
}
static const struct regmap_config xrs700x_i2c_regmap_config = {
.val_bits = 16,
.reg_stride = 2,
.reg_bits = 32,
.pad_bits = 0,
.write_flag_mask = 0,
.read_flag_mask = 0,
.reg_read = xrs700x_i2c_reg_read,
.reg_write = xrs700x_i2c_reg_write,
.max_register = 0,
.cache_type = REGCACHE_NONE,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.val_format_endian = REGMAP_ENDIAN_BIG
};
static int xrs700x_i2c_probe(struct i2c_client *i2c)
{
struct xrs700x *priv;
int ret;
priv = xrs700x_switch_alloc(&i2c->dev, i2c);
if (!priv)
return -ENOMEM;
priv->regmap = devm_regmap_init(&i2c->dev, NULL, &i2c->dev,
&xrs700x_i2c_regmap_config);
if (IS_ERR(priv->regmap)) {
ret = PTR_ERR(priv->regmap);
dev_err(&i2c->dev, "Failed to initialize regmap: %d\n", ret);
return ret;
}
i2c_set_clientdata(i2c, priv);
ret = xrs700x_switch_register(priv);
/* Main DSA driver may not be started yet. */
if (ret)
return ret;
return 0;
}
static void xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
return;
xrs700x_switch_remove(priv);
}
static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
return;
xrs700x_switch_shutdown(priv);
i2c_set_clientdata(i2c, NULL);
}
static const struct i2c_device_id xrs700x_i2c_id[] = {
{ "xrs700x-switch", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
static const struct of_device_id __maybe_unused xrs700x_i2c_dt_ids[] = {
{ .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
{ .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
{ .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
{ .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
{},
};
MODULE_DEVICE_TABLE(of, xrs700x_i2c_dt_ids);
static struct i2c_driver xrs700x_i2c_driver = {
.driver = {
.name = "xrs700x-i2c",
.of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
},
.probe = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
.shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
};
module_i2c_driver(xrs700x_i2c_driver);
MODULE_AUTHOR("George McCollister <[email protected]>");
MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA I2C driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/xrs700x/xrs700x_i2c.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx System Management Interface (SMI) support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2019 Vivien Didelot <[email protected]>
*/
#include "chip.h"
#include "smi.h"
/* The switch ADDR[4:1] configuration pins define the chip SMI device address
* (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
*
* When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
* is the only device connected to the SMI master. In this mode it responds to
* all 32 possible SMI addresses, and thus maps directly the internal devices.
*
* When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
* multiple devices to share the SMI interface. In this mode it responds to only
* 2 registers, used to indirectly access the internal SMI devices.
*
* Some chips use a different scheme: Only the ADDR4 pin is used for
* configuration, and the device responds to 16 of the 32 SMI
* addresses, allowing two to coexist on the same SMI interface.
*/
static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip,
int dev, int reg, u16 *data)
{
int ret;
ret = mdiobus_read_nested(chip->bus, dev, reg);
if (ret < 0)
return ret;
*data = ret & 0xffff;
return 0;
}
static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
int dev, int reg, u16 data)
{
int ret;
ret = mdiobus_write_nested(chip->bus, dev, reg, data);
if (ret < 0)
return ret;
return 0;
}
static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
int dev, int reg, int bit, int val)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
/* Even if the initial poll takes longer than 50ms, always do
* at least one more attempt.
*/
for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
if (err)
return err;
if (!!(data & BIT(bit)) == !!val)
return 0;
if (i < 2)
cpu_relax();
else
usleep_range(1000, 2000);
}
return -ETIMEDOUT;
}
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = {
.read = mv88e6xxx_smi_direct_read,
.write = mv88e6xxx_smi_direct_write,
};
static int mv88e6xxx_smi_dual_direct_read(struct mv88e6xxx_chip *chip,
int dev, int reg, u16 *data)
{
return mv88e6xxx_smi_direct_read(chip, chip->sw_addr + dev, reg, data);
}
static int mv88e6xxx_smi_dual_direct_write(struct mv88e6xxx_chip *chip,
int dev, int reg, u16 data)
{
return mv88e6xxx_smi_direct_write(chip, chip->sw_addr + dev, reg, data);
}
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_dual_direct_ops = {
.read = mv88e6xxx_smi_dual_direct_read,
.write = mv88e6xxx_smi_dual_direct_write,
};
/* Offset 0x00: SMI Command Register
* Offset 0x01: SMI Data Register
*/
static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
int dev, int reg, u16 *data)
{
int err;
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
MV88E6XXX_SMI_CMD_MODE_22 |
MV88E6XXX_SMI_CMD_OP_22_READ |
(dev << 5) | reg);
if (err)
return err;
err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD, 15, 0);
if (err)
return err;
return mv88e6xxx_smi_direct_read(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
}
static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
int dev, int reg, u16 data)
{
int err;
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
if (err)
return err;
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
MV88E6XXX_SMI_CMD_MODE_22 |
MV88E6XXX_SMI_CMD_OP_22_WRITE |
(dev << 5) | reg);
if (err)
return err;
return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD, 15, 0);
}
static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
{
/* Ensure that the chip starts out in the ready state. As both
* reads and writes always ensure this on return, they can
* safely depend on the chip not being busy on entry.
*/
return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD, 15, 0);
}
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
.read = mv88e6xxx_smi_indirect_read,
.write = mv88e6xxx_smi_indirect_write,
.init = mv88e6xxx_smi_indirect_init,
};
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
if (chip->info->dual_chip)
chip->smi_ops = &mv88e6xxx_smi_dual_direct_ops;
else if (sw_addr == 0)
chip->smi_ops = &mv88e6xxx_smi_direct_ops;
else if (chip->info->multi_chip)
chip->smi_ops = &mv88e6xxx_smi_indirect_ops;
else
return -EINVAL;
chip->bus = bus;
chip->sw_addr = sw_addr;
if (chip->smi_ops->init)
return chip->smi_ops->init(chip);
return 0;
}
| linux-master | drivers/net/dsa/mv88e6xxx/smi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright 2022 NXP
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | drivers/net/dsa/mv88e6xxx/trace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* switchdev.c
*
* Authors:
* Hans J. Schultz <[email protected]>
*
*/
#include <net/switchdev.h>
#include "chip.h"
#include "global1.h"
#include "switchdev.h"
struct mv88e6xxx_fid_search_ctx {
u16 fid_search;
u16 vid_found;
};
static int __mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip,
const struct mv88e6xxx_vtu_entry *entry,
void *priv)
{
struct mv88e6xxx_fid_search_ctx *ctx = priv;
if (ctx->fid_search == entry->fid) {
ctx->vid_found = entry->vid;
return 1;
}
return 0;
}
static int mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, u16 fid, u16 *vid)
{
struct mv88e6xxx_fid_search_ctx ctx;
int err;
ctx.fid_search = fid;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_vtu_walk(chip, __mv88e6xxx_find_vid, &ctx);
mv88e6xxx_reg_unlock(chip);
if (err < 0)
return err;
if (err == 1)
*vid = ctx.vid_found;
else
return -ENOENT;
return 0;
}
int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
struct mv88e6xxx_atu_entry *entry, u16 fid)
{
struct switchdev_notifier_fdb_info info = {
.addr = entry->mac,
.locked = true,
};
struct net_device *brport;
struct dsa_port *dp;
u16 vid;
int err;
err = mv88e6xxx_find_vid(chip, fid, &vid);
if (err)
return err;
info.vid = vid;
dp = dsa_to_port(chip->ds, port);
rtnl_lock();
brport = dsa_port_to_bridge_port(dp);
if (!brport) {
rtnl_unlock();
return -ENODEV;
}
err = call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
brport, &info.info, NULL);
rtnl_unlock();
return err;
}
| linux-master | drivers/net/dsa/mv88e6xxx/switchdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88e6xxx Ethernet switch single-chip support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2016 Andrew Lunn <[email protected]>
*
* Copyright (c) 2016-2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mv88e6xxx.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/phylink.h>
#include <net/dsa.h>
#include "chip.h"
#include "devlink.h"
#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
#include "phy.h"
#include "port.h"
#include "ptp.h"
#include "serdes.h"
#include "smi.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
if (unlikely(!mutex_is_locked(&chip->reg_lock))) {
dev_err(chip->dev, "Switch registers lock not held!\n");
dump_stack();
}
}
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val)
{
int err;
assert_reg_lock(chip);
err = mv88e6xxx_smi_read(chip, addr, reg, val);
if (err)
return err;
dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, *val);
return 0;
}
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
{
int err;
assert_reg_lock(chip);
err = mv88e6xxx_smi_write(chip, addr, reg, val);
if (err)
return err;
dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, val);
return 0;
}
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
/* There's no bus specific operation to wait for a mask. Even
* if the initial poll takes longer than 50ms, always do at
* least one more attempt.
*/
for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
if ((data & mask) == val)
return 0;
if (i < 2)
cpu_relax();
else
usleep_range(1000, 2000);
}
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
if ((data & mask) == val)
return 0;
dev_err(chip->dev, "Timeout while waiting for switch\n");
return -ETIMEDOUT;
}
int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
int bit, int val)
{
return mv88e6xxx_wait_mask(chip, addr, reg, BIT(bit),
val ? BIT(bit) : 0x0000);
}
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
list);
if (!mdio_bus)
return NULL;
return mdio_bus->bus;
}
static void mv88e6xxx_g1_irq_mask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int n = d->hwirq;
chip->g1_irq.masked |= (1 << n);
}
static void mv88e6xxx_g1_irq_unmask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int n = d->hwirq;
chip->g1_irq.masked &= ~(1 << n);
}
static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
{
unsigned int nhandled = 0;
unsigned int sub_irq;
unsigned int n;
u16 reg;
u16 ctl1;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
do {
for (n = 0; n < chip->g1_irq.nirqs; ++n) {
if (reg & (1 << n)) {
sub_irq = irq_find_mapping(chip->g1_irq.domain,
n);
handle_nested_irq(sub_irq);
++nhandled;
}
}
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
if (err)
goto unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
} while (reg & ctl1);
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
return mv88e6xxx_g1_irq_thread_work(chip);
}
static void mv88e6xxx_g1_irq_bus_lock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
mv88e6xxx_reg_lock(chip);
}
static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
u16 mask = GENMASK(chip->g1_irq.nirqs, 0);
u16 reg;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, ®);
if (err)
goto out;
reg &= ~mask;
reg |= (~chip->g1_irq.masked & mask);
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, reg);
if (err)
goto out;
out:
mv88e6xxx_reg_unlock(chip);
}
static const struct irq_chip mv88e6xxx_g1_irq_chip = {
.name = "mv88e6xxx-g1",
.irq_mask = mv88e6xxx_g1_irq_mask,
.irq_unmask = mv88e6xxx_g1_irq_unmask,
.irq_bus_lock = mv88e6xxx_g1_irq_bus_lock,
.irq_bus_sync_unlock = mv88e6xxx_g1_irq_bus_sync_unlock,
};
static int mv88e6xxx_g1_irq_domain_map(struct irq_domain *d,
unsigned int irq,
irq_hw_number_t hwirq)
{
struct mv88e6xxx_chip *chip = d->host_data;
irq_set_chip_data(irq, d->host_data);
irq_set_chip_and_handler(irq, &chip->g1_irq.chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
.map = mv88e6xxx_g1_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
/* To be called with reg_lock held */
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
{
int irq, virq;
u16 mask;
mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
for (irq = 0; irq < chip->g1_irq.nirqs; irq++) {
virq = irq_find_mapping(chip->g1_irq.domain, irq);
irq_dispose_mapping(virq);
}
irq_domain_remove(chip->g1_irq.domain);
}
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
{
/*
* free_irq must be called without reg_lock taken because the irq
* handler takes this lock, too.
*/
free_irq(chip->irq, chip);
mv88e6xxx_reg_lock(chip);
mv88e6xxx_g1_irq_free_common(chip);
mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
{
int err, irq, virq;
u16 reg, mask;
chip->g1_irq.nirqs = chip->info->g1_irqs;
chip->g1_irq.domain = irq_domain_add_simple(
NULL, chip->g1_irq.nirqs, 0,
&mv88e6xxx_g1_irq_domain_ops, chip);
if (!chip->g1_irq.domain)
return -ENOMEM;
for (irq = 0; irq < chip->g1_irq.nirqs; irq++)
irq_create_mapping(chip->g1_irq.domain, irq);
chip->g1_irq.chip = mv88e6xxx_g1_irq_chip;
chip->g1_irq.masked = ~0;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
if (err)
goto out_mapping;
mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
if (err)
goto out_disable;
/* Reading the interrupt status clears (most of) them */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
if (err)
goto out_disable;
return 0;
out_disable:
mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
out_mapping:
for (irq = 0; irq < 16; irq++) {
virq = irq_find_mapping(chip->g1_irq.domain, irq);
irq_dispose_mapping(virq);
}
irq_domain_remove(chip->g1_irq.domain);
return err;
}
static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
{
static struct lock_class_key lock_key;
static struct lock_class_key request_key;
int err;
err = mv88e6xxx_g1_irq_setup_common(chip);
if (err)
return err;
/* These lock classes tells lockdep that global 1 irqs are in
* a different category than their parent GPIO, so it won't
* report false recursion.
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
snprintf(chip->irq_name, sizeof(chip->irq_name),
"mv88e6xxx-%s", dev_name(chip->dev));
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
chip->irq_name, chip);
mv88e6xxx_reg_lock(chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
return err;
}
static void mv88e6xxx_irq_poll(struct kthread_work *work)
{
struct mv88e6xxx_chip *chip = container_of(work,
struct mv88e6xxx_chip,
irq_poll_work.work);
mv88e6xxx_g1_irq_thread_work(chip);
kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work,
msecs_to_jiffies(100));
}
static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6xxx_g1_irq_setup_common(chip);
if (err)
return err;
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work,
msecs_to_jiffies(100));
return 0;
}
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
{
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
mv88e6xxx_reg_lock(chip);
mv88e6xxx_g1_irq_free_common(chip);
mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_port_config_interface(struct mv88e6xxx_chip *chip,
int port, phy_interface_t interface)
{
int err;
if (chip->info->ops->port_set_rgmii_delay) {
err = chip->info->ops->port_set_rgmii_delay(chip, port,
interface);
if (err && err != -EOPNOTSUPP)
return err;
}
if (chip->info->ops->port_set_cmode) {
err = chip->info->ops->port_set_cmode(chip, port,
interface);
if (err && err != -EOPNOTSUPP)
return err;
}
return 0;
}
static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
int link, int speed, int duplex, int pause,
phy_interface_t mode)
{
int err;
if (!chip->info->ops->port_set_link)
return 0;
/* Port's MAC control must not be changed unless the link is down */
err = chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
if (err)
return err;
if (chip->info->ops->port_set_speed_duplex) {
err = chip->info->ops->port_set_speed_duplex(chip, port,
speed, duplex);
if (err && err != -EOPNOTSUPP)
goto restore_link;
}
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
goto restore_link;
}
err = mv88e6xxx_port_config_interface(chip, port, mode);
restore_link:
if (chip->info->ops->port_set_link(chip, port, link))
dev_err(chip->dev, "p%d: failed to restore MAC's link\n", port);
return err;
}
static int mv88e6xxx_phy_is_internal(struct mv88e6xxx_chip *chip, int port)
{
return port >= chip->info->internal_phys_offset &&
port < chip->info->num_internal_phys +
chip->info->internal_phys_offset;
}
static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
{
u16 reg;
int err;
/* The 88e6250 family does not have the PHY detect bit. Instead,
* report whether the port is internal.
*/
if (chip->info->family == MV88E6XXX_FAMILY_6250)
return mv88e6xxx_phy_is_internal(chip, port);
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err) {
dev_err(chip->dev,
"p%d: %s: failed to read port status\n",
port, __func__);
return err;
}
return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT);
}
static const u8 mv88e6185_phy_interface_modes[] = {
[MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
[MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
[MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
[MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
[MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
[MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
[MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
};
static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
u8 cmode = chip->ports[port].cmode;
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
if (mv88e6xxx_phy_is_internal(chip, port)) {
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
} else {
if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
mv88e6185_phy_interface_modes[cmode])
__set_bit(mv88e6185_phy_interface_modes[cmode],
config->supported_interfaces);
config->mac_capabilities |= MAC_1000FD;
}
}
static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
u8 cmode = chip->ports[port].cmode;
if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
mv88e6185_phy_interface_modes[cmode])
__set_bit(mv88e6185_phy_interface_modes[cmode],
config->supported_interfaces);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
}
static const u8 mv88e6xxx_phy_interface_modes[] = {
[MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_REVMII,
[MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
[MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
[MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_REVRMII,
[MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
[MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
[MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
[MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
/* higher interface modes are not needed here, since ports supporting
* them are writable, and so the supported interfaces are filled in the
* corresponding .phylink_set_interfaces() implementation below
*/
};
static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
{
if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
mv88e6xxx_phy_interface_modes[cmode])
__set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
phy_interface_set_rgmii(supported);
}
static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
u16 reg, val;
int err;
err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
/* If PHY_DETECT is zero, then we are not in auto-media mode */
if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
return 0xf;
val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
if (err)
return err;
err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
if (err)
return err;
/* Restore PHY_DETECT value */
err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
return val & MV88E6XXX_PORT_STS_CMODE_MASK;
}
static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
int err, cmode;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
/* Port 4 supports automedia if the serdes is associated with it. */
if (port == 4) {
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
if (err < 0)
dev_err(chip->dev, "p%d: failed to read scratch\n",
port);
if (err <= 0)
return;
cmode = mv88e6352_get_port4_serdes_cmode(chip);
if (cmode < 0)
dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
port);
else
mv88e6xxx_translate_cmode(cmode, supported);
}
}
static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
/* The C_Mode field is programmable on port 5 */
if (port == 5) {
__set_bit(PHY_INTERFACE_MODE_SGMII, supported);
__set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
__set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
config->mac_capabilities |= MAC_2500FD;
}
}
static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
/* The C_Mode field is programmable on ports 9 and 10 */
if (port == 9 || port == 10) {
__set_bit(PHY_INTERFACE_MODE_SGMII, supported);
__set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
__set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
config->mac_capabilities |= MAC_2500FD;
}
}
static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
mv88e6390_phylink_get_caps(chip, port, config);
/* For the 6x90X, ports 2-7 can be in automedia mode.
* (Note that 6x90 doesn't support RXAUI nor XAUI).
*
* Port 2 can also support 1000BASE-X in automedia mode if port 9 is
* configured for 1000BASE-X, SGMII or 2500BASE-X.
* Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
* configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
*
* Port 5 can also support 1000BASE-X in automedia mode if port 10 is
* configured for 1000BASE-X, SGMII or 2500BASE-X.
* Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
* configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
*
* For now, be permissive (as the old code was) and allow 1000BASE-X
* on ports 2..7.
*/
if (port >= 2 && port <= 7)
__set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
/* The C_Mode field can also be programmed for 10G speeds */
if (port == 9 || port == 10) {
__set_bit(PHY_INTERFACE_MODE_XAUI, supported);
__set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
config->mac_capabilities |= MAC_10000FD;
}
}
static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
bool is_6191x =
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
bool is_6361 =
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6361;
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
/* The C_Mode field can be programmed for ports 0, 9 and 10 */
if (port == 0 || port == 9 || port == 10) {
__set_bit(PHY_INTERFACE_MODE_SGMII, supported);
__set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
/* 6191X supports >1G modes only on port 10 */
if (!is_6191x || port == 10) {
__set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
config->mac_capabilities |= MAC_2500FD;
/* 6361 only supports up to 2500BaseX */
if (!is_6361) {
__set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
__set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
__set_bit(PHY_INTERFACE_MODE_USXGMII, supported);
config->mac_capabilities |= MAC_5000FD |
MAC_10000FD;
}
}
}
if (port == 0) {
__set_bit(PHY_INTERFACE_MODE_RMII, supported);
__set_bit(PHY_INTERFACE_MODE_RGMII, supported);
__set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
__set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
__set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
}
}
static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mv88e6xxx_chip *chip = ds->priv;
mv88e6xxx_reg_lock(chip);
chip->info->ops->phylink_get_caps(chip, port, config);
mv88e6xxx_reg_unlock(chip);
if (mv88e6xxx_phy_is_internal(chip, port)) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
/* Internal ports with no phy-mode need GMII for PHYLIB */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
}
}
static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds,
int port,
phy_interface_t interface)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
if (chip->info->ops->pcs_ops)
pcs = chip->info->ops->pcs_ops->pcs_select(chip, port,
interface);
return pcs;
}
static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err = 0;
/* In inband mode, the link may come up at any time while the link
* is not forced down. Force the link down while we reconfigure the
* interface mode.
*/
if (mode == MLO_AN_INBAND &&
chip->ports[port].interface != interface &&
chip->info->ops->port_set_link) {
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->port_set_link(chip, port,
LINK_FORCED_DOWN);
mv88e6xxx_reg_unlock(chip);
}
return err;
}
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err = 0;
mv88e6xxx_reg_lock(chip);
if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(chip, port)) {
err = mv88e6xxx_port_config_interface(chip, port,
state->interface);
if (err && err != -EOPNOTSUPP)
goto err_unlock;
}
err_unlock:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
}
static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err = 0;
/* Undo the forced down state above after completing configuration
* irrespective of its state on entry, which allows the link to come
* up in the in-band case where there is no separate SERDES. Also
* ensure that the link can come up if the PPU is in use and we are
* in PHY mode (we treat the PPU as an effective in-band mechanism.)
*/
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_link &&
((mode == MLO_AN_INBAND &&
chip->ports[port].interface != interface) ||
(mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
err = chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
mv88e6xxx_reg_unlock(chip);
chip->ports[port].interface = interface;
return err;
}
static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct mv88e6xxx_chip *chip = ds->priv;
const struct mv88e6xxx_ops *ops;
int err = 0;
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
/* Force the link down if we know the port may not be automatically
* updated by the switch or if we are using fixed-link mode.
*/
if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) && ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, false);
if (!err && ops->port_set_speed_duplex)
err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
DUPLEX_UNFORCED);
mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(chip->dev,
"p%d: failed to force MAC link down\n", port);
}
static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct mv88e6xxx_chip *chip = ds->priv;
const struct mv88e6xxx_ops *ops;
int err = 0;
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
/* Configure and force the link up if we know that the port may not
* automatically updated by the switch or if we are using fixed-link
* mode.
*/
if (!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) {
if (ops->port_set_speed_duplex) {
err = ops->port_set_speed_duplex(chip, port,
speed, duplex);
if (err && err != -EOPNOTSUPP)
goto error;
}
if (ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, true);
}
error:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev,
"p%d: failed to configure MAC link up\n", port);
}
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
if (!chip->info->ops->stats_snapshot)
return -EOPNOTSUPP;
return chip->info->ops->stats_snapshot(chip, port);
}
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
{ "in_good_octets", 8, 0x00, STATS_TYPE_BANK0, },
{ "in_bad_octets", 4, 0x02, STATS_TYPE_BANK0, },
{ "in_unicast", 4, 0x04, STATS_TYPE_BANK0, },
{ "in_broadcasts", 4, 0x06, STATS_TYPE_BANK0, },
{ "in_multicasts", 4, 0x07, STATS_TYPE_BANK0, },
{ "in_pause", 4, 0x16, STATS_TYPE_BANK0, },
{ "in_undersize", 4, 0x18, STATS_TYPE_BANK0, },
{ "in_fragments", 4, 0x19, STATS_TYPE_BANK0, },
{ "in_oversize", 4, 0x1a, STATS_TYPE_BANK0, },
{ "in_jabber", 4, 0x1b, STATS_TYPE_BANK0, },
{ "in_rx_error", 4, 0x1c, STATS_TYPE_BANK0, },
{ "in_fcs_error", 4, 0x1d, STATS_TYPE_BANK0, },
{ "out_octets", 8, 0x0e, STATS_TYPE_BANK0, },
{ "out_unicast", 4, 0x10, STATS_TYPE_BANK0, },
{ "out_broadcasts", 4, 0x13, STATS_TYPE_BANK0, },
{ "out_multicasts", 4, 0x12, STATS_TYPE_BANK0, },
{ "out_pause", 4, 0x15, STATS_TYPE_BANK0, },
{ "excessive", 4, 0x11, STATS_TYPE_BANK0, },
{ "collisions", 4, 0x1e, STATS_TYPE_BANK0, },
{ "deferred", 4, 0x05, STATS_TYPE_BANK0, },
{ "single", 4, 0x14, STATS_TYPE_BANK0, },
{ "multiple", 4, 0x17, STATS_TYPE_BANK0, },
{ "out_fcs_error", 4, 0x03, STATS_TYPE_BANK0, },
{ "late", 4, 0x1f, STATS_TYPE_BANK0, },
{ "hist_64bytes", 4, 0x08, STATS_TYPE_BANK0, },
{ "hist_65_127bytes", 4, 0x09, STATS_TYPE_BANK0, },
{ "hist_128_255bytes", 4, 0x0a, STATS_TYPE_BANK0, },
{ "hist_256_511bytes", 4, 0x0b, STATS_TYPE_BANK0, },
{ "hist_512_1023bytes", 4, 0x0c, STATS_TYPE_BANK0, },
{ "hist_1024_max_bytes", 4, 0x0d, STATS_TYPE_BANK0, },
{ "sw_in_discards", 4, 0x10, STATS_TYPE_PORT, },
{ "sw_in_filtered", 2, 0x12, STATS_TYPE_PORT, },
{ "sw_out_filtered", 2, 0x13, STATS_TYPE_PORT, },
{ "in_discards", 4, 0x00, STATS_TYPE_BANK1, },
{ "in_filtered", 4, 0x01, STATS_TYPE_BANK1, },
{ "in_accepted", 4, 0x02, STATS_TYPE_BANK1, },
{ "in_bad_accepted", 4, 0x03, STATS_TYPE_BANK1, },
{ "in_good_avb_class_a", 4, 0x04, STATS_TYPE_BANK1, },
{ "in_good_avb_class_b", 4, 0x05, STATS_TYPE_BANK1, },
{ "in_bad_avb_class_a", 4, 0x06, STATS_TYPE_BANK1, },
{ "in_bad_avb_class_b", 4, 0x07, STATS_TYPE_BANK1, },
{ "tcam_counter_0", 4, 0x08, STATS_TYPE_BANK1, },
{ "tcam_counter_1", 4, 0x09, STATS_TYPE_BANK1, },
{ "tcam_counter_2", 4, 0x0a, STATS_TYPE_BANK1, },
{ "tcam_counter_3", 4, 0x0b, STATS_TYPE_BANK1, },
{ "in_da_unknown", 4, 0x0e, STATS_TYPE_BANK1, },
{ "in_management", 4, 0x0f, STATS_TYPE_BANK1, },
{ "out_queue_0", 4, 0x10, STATS_TYPE_BANK1, },
{ "out_queue_1", 4, 0x11, STATS_TYPE_BANK1, },
{ "out_queue_2", 4, 0x12, STATS_TYPE_BANK1, },
{ "out_queue_3", 4, 0x13, STATS_TYPE_BANK1, },
{ "out_queue_4", 4, 0x14, STATS_TYPE_BANK1, },
{ "out_queue_5", 4, 0x15, STATS_TYPE_BANK1, },
{ "out_queue_6", 4, 0x16, STATS_TYPE_BANK1, },
{ "out_queue_7", 4, 0x17, STATS_TYPE_BANK1, },
{ "out_cut_through", 4, 0x18, STATS_TYPE_BANK1, },
{ "out_octets_a", 4, 0x1a, STATS_TYPE_BANK1, },
{ "out_octets_b", 4, 0x1b, STATS_TYPE_BANK1, },
{ "out_management", 4, 0x1f, STATS_TYPE_BANK1, },
};
static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_hw_stat *s,
int port, u16 bank1_select,
u16 histogram)
{
u32 low;
u32 high = 0;
u16 reg = 0;
int err;
u64 value;
switch (s->type) {
case STATS_TYPE_PORT:
err = mv88e6xxx_port_read(chip, port, s->reg, ®);
if (err)
return U64_MAX;
low = reg;
if (s->size == 4) {
err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®);
if (err)
return U64_MAX;
low |= ((u32)reg) << 16;
}
break;
case STATS_TYPE_BANK1:
reg = bank1_select;
fallthrough;
case STATS_TYPE_BANK0:
reg |= s->reg | histogram;
mv88e6xxx_g1_stats_read(chip, reg, &low);
if (s->size == 8)
mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
break;
default:
return U64_MAX;
}
value = (((u64)high) << 32) | low;
return value;
}
static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data, int types)
{
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
if (stat->type & types) {
memcpy(data + j * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
j++;
}
}
return j;
}
static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data)
{
return mv88e6xxx_stats_get_strings(chip, data,
STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data)
{
return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
}
static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data)
{
return mv88e6xxx_stats_get_strings(chip, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
}
static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
"atu_member_violation",
"atu_miss_violation",
"atu_full_violation",
"vtu_member_violation",
"vtu_miss_violation",
};
static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
strscpy(data + i * ETH_GSTRING_LEN,
mv88e6xxx_atu_vtu_stats_strings[i],
ETH_GSTRING_LEN);
}
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
int count = 0;
if (stringset != ETH_SS_STATS)
return;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_strings)
count = chip->info->ops->stats_get_strings(chip, data);
if (chip->info->ops->serdes_get_strings) {
data += count * ETH_GSTRING_LEN;
count = chip->info->ops->serdes_get_strings(chip, port, data);
}
data += count * ETH_GSTRING_LEN;
mv88e6xxx_atu_vtu_get_strings(data);
mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
int types)
{
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
if (stat->type & types)
j++;
}
return j;
}
static int mv88e6095_stats_get_sset_count(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
STATS_TYPE_PORT);
}
static int mv88e6250_stats_get_sset_count(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0);
}
static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
STATS_TYPE_BANK1);
}
static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct mv88e6xxx_chip *chip = ds->priv;
int serdes_count = 0;
int count = 0;
if (sset != ETH_SS_STATS)
return 0;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_sset_count)
count = chip->info->ops->stats_get_sset_count(chip);
if (count < 0)
goto out;
if (chip->info->ops->serdes_get_sset_count)
serdes_count = chip->info->ops->serdes_get_sset_count(chip,
port);
if (serdes_count < 0) {
count = serdes_count;
goto out;
}
count += serdes_count;
count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings);
out:
mv88e6xxx_reg_unlock(chip);
return count;
}
static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data, int types,
u16 bank1_select, u16 histogram)
{
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
if (stat->type & types) {
mv88e6xxx_reg_lock(chip);
data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
bank1_select,
histogram);
mv88e6xxx_reg_unlock(chip);
j++;
}
}
return j;
}
static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_PORT,
0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
0);
}
static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
*data++ = chip->ports[port].atu_member_violation;
*data++ = chip->ports[port].atu_miss_violation;
*data++ = chip->ports[port].atu_full_violation;
*data++ = chip->ports[port].vtu_member_violation;
*data++ = chip->ports[port].vtu_miss_violation;
}
static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
int count = 0;
if (chip->info->ops->stats_get_stats)
count = chip->info->ops->stats_get_stats(chip, port, data);
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->serdes_get_stats) {
data += count;
count = chip->info->ops->serdes_get_stats(chip, port, data);
}
data += count;
mv88e6xxx_atu_vtu_get_stats(chip, port, data);
mv88e6xxx_reg_unlock(chip);
}
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
int ret;
mv88e6xxx_reg_lock(chip);
ret = mv88e6xxx_stats_snapshot(chip, port);
mv88e6xxx_reg_unlock(chip);
if (ret < 0)
return;
mv88e6xxx_get_stats(chip, port, data);
}
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
int len;
len = 32 * sizeof(u16);
if (chip->info->ops->serdes_get_regs_len)
len += chip->info->ops->serdes_get_regs_len(chip, port);
return len;
}
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
u16 reg;
u16 *p = _p;
int i;
regs->version = chip->info->prod_num;
memset(p, 0xff, 32 * sizeof(u16));
mv88e6xxx_reg_lock(chip);
for (i = 0; i < 32; i++) {
err = mv88e6xxx_port_read(chip, port, i, ®);
if (!err)
p[i] = reg;
}
if (chip->info->ops->serdes_get_regs)
chip->info->ops->serdes_get_regs(chip, port, &p[i]);
mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
/* Mask of the local ports allowed to receive frames from a given fabric port */
static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp, *other_dp;
bool found = false;
u16 pvlan;
/* dev is a physical switch */
if (dev <= dst->last_switch) {
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index == dev && dp->index == port) {
/* dp might be a DSA link or a user port, so it
* might or might not have a bridge.
* Use the "found" variable for both cases.
*/
found = true;
break;
}
}
/* dev is a virtual bridge */
} else {
list_for_each_entry(dp, &dst->ports, list) {
unsigned int bridge_num = dsa_port_bridge_num_get(dp);
if (!bridge_num)
continue;
if (bridge_num + dst->last_switch != dev)
continue;
found = true;
break;
}
}
/* Prevent frames from unknown switch or virtual bridge */
if (!found)
return 0;
/* Frames from DSA links and CPU ports can egress any local port */
if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
pvlan = 0;
/* Frames from standalone user ports can only egress on the
* upstream port.
*/
if (!dsa_port_bridge_dev_get(dp))
return BIT(dsa_switch_upstream_port(ds));
/* Frames from bridged user ports can egress any local DSA
* links and CPU ports, as well as any local member of their
* bridge group.
*/
dsa_switch_for_each_port(other_dp, ds)
if (other_dp->type == DSA_PORT_TYPE_CPU ||
other_dp->type == DSA_PORT_TYPE_DSA ||
dsa_port_bridge_same(dp, other_dp))
pvlan |= BIT(other_dp->index);
return pvlan;
}
static int mv88e6xxx_port_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
u16 output_ports = mv88e6xxx_port_vlan(chip, chip->ds->index, port);
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
return mv88e6xxx_port_set_vlan_map(chip, port, output_ports);
}
static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_state(chip, port, state);
mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(ds->dev, "p%d: failed to update state\n", port);
}
static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
{
int err;
if (chip->info->ops->ieee_pri_map) {
err = chip->info->ops->ieee_pri_map(chip);
if (err)
return err;
}
if (chip->info->ops->ip_pri_map) {
err = chip->info->ops->ip_pri_map(chip);
if (err)
return err;
}
return 0;
}
static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
{
struct dsa_switch *ds = chip->ds;
int target, port;
int err;
if (!chip->info->global2_addr)
return 0;
/* Initialize the routing port to the 32 possible target devices */
for (target = 0; target < 32; target++) {
port = dsa_routing_port(ds, target);
if (port == ds->num_ports)
port = 0x1f;
err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
if (err)
return err;
}
if (chip->info->ops->set_cascade_port) {
port = MV88E6XXX_CASCADE_PORT_MULTIPLE;
err = chip->info->ops->set_cascade_port(chip, port);
if (err)
return err;
}
err = mv88e6xxx_g1_set_device_number(chip, chip->ds->index);
if (err)
return err;
return 0;
}
static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip)
{
/* Clear all trunk masks and mapping */
if (chip->info->global2_addr)
return mv88e6xxx_g2_trunk_clear(chip);
return 0;
}
static int mv88e6xxx_rmu_setup(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->rmu_disable)
return chip->info->ops->rmu_disable(chip);
return 0;
}
static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->pot_clear)
return chip->info->ops->pot_clear(chip);
return 0;
}
static int mv88e6xxx_rsvd2cpu_setup(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->mgmt_rsvd2cpu)
return chip->info->ops->mgmt_rsvd2cpu(chip);
return 0;
}
static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6xxx_g1_atu_flush(chip, 0, true);
if (err)
return err;
/* The chips that have a "learn2all" bit in Global1, ATU
* Control are precisely those whose port registers have a
* Message Port bit in Port Control 1 and hence implement
* ->port_setup_message_port.
*/
if (chip->info->ops->port_setup_message_port) {
err = mv88e6xxx_g1_atu_set_learn2all(chip, true);
if (err)
return err;
}
return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
}
static int mv88e6xxx_irl_setup(struct mv88e6xxx_chip *chip)
{
int port;
int err;
if (!chip->info->ops->irl_init_all)
return 0;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
/* Disable ingress rate limiting by resetting all per port
* ingress rate limit resources to their initial state.
*/
err = chip->info->ops->irl_init_all(chip, port);
if (err)
return err;
}
return 0;
}
static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->set_switch_mac) {
u8 addr[ETH_ALEN];
eth_random_addr(addr);
return chip->info->ops->set_switch_mac(chip, addr);
}
return 0;
}
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
struct dsa_switch_tree *dst = chip->ds->dst;
struct dsa_switch *ds;
struct dsa_port *dp;
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
return 0;
/* Skip the local source device, which uses in-chip port VLAN */
if (dev != chip->ds->index) {
pvlan = mv88e6xxx_port_vlan(chip, dev, port);
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
* the LAG ID (one-based) as the port number
* (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
port = dsa_port_lag_id_get(dp) - 1;
}
}
return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
}
static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
{
int dev, port;
int err;
if (!mv88e6xxx_has_pvt(chip))
return 0;
/* Clear 5 Bit Port for usage with Marvell Link Street devices:
* use 4 bits for the Src_Port/Src_Trunk and 5 bits for the Src_Dev.
*/
err = mv88e6xxx_g2_misc_4_bit_port(chip);
if (err)
return err;
for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; ++dev) {
for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; ++port) {
err = mv88e6xxx_pvt_map(chip, dev, port);
if (err)
return err;
}
}
return 0;
}
static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
u16 fid)
{
if (dsa_to_port(chip->ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
*/
return -EOPNOTSUPP;
return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
}
static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
port, err);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
{
if (!mv88e6xxx_max_vid(chip))
return 0;
return mv88e6xxx_g1_vtu_flush(chip);
}
static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
struct mv88e6xxx_vtu_entry *entry)
{
int err;
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
entry->valid = false;
err = chip->info->ops->vtu_getnext(chip, entry);
if (entry->vid != vid)
entry->valid = false;
return err;
}
int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
int (*cb)(struct mv88e6xxx_chip *chip,
const struct mv88e6xxx_vtu_entry *entry,
void *priv),
void *priv)
{
struct mv88e6xxx_vtu_entry entry = {
.vid = mv88e6xxx_max_vid(chip),
.valid = false,
};
int err;
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
do {
err = chip->info->ops->vtu_getnext(chip, &entry);
if (err)
return err;
if (!entry.valid)
break;
err = cb(chip, &entry, priv);
if (err)
return err;
} while (entry.vid < mv88e6xxx_max_vid(chip));
return 0;
}
static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
if (!chip->info->ops->vtu_loadpurge)
return -EOPNOTSUPP;
return chip->info->ops->vtu_loadpurge(chip, entry);
}
static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
const struct mv88e6xxx_vtu_entry *entry,
void *_fid_bitmap)
{
unsigned long *fid_bitmap = _fid_bitmap;
set_bit(entry->fid, fid_bitmap);
return 0;
}
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Every FID has an associated VID, so walking the VTU
* will discover the full set of FIDs in use.
*/
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
}
static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
int err;
err = mv88e6xxx_fid_map(chip, fid_bitmap);
if (err)
return err;
*fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
/* Clear the database */
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_stu_entry *entry)
{
if (!chip->info->ops->stu_loadpurge)
return -EOPNOTSUPP;
return chip->info->ops->stu_loadpurge(chip, entry);
}
static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_stu_entry stu = {
.valid = true,
.sid = 0
};
if (!mv88e6xxx_has_stu(chip))
return 0;
/* Make sure that SID 0 is always valid. This is used by VTU
* entries that do not make use of the STU, e.g. when creating
* a VLAN upper on a port that is also part of a VLAN
* filtering bridge.
*/
return mv88e6xxx_stu_loadpurge(chip, &stu);
}
static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
{
DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
struct mv88e6xxx_mst *mst;
__set_bit(0, busy);
list_for_each_entry(mst, &chip->msts, node)
__set_bit(mst->stu.sid, busy);
*sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
}
static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
{
struct mv88e6xxx_mst *mst, *tmp;
int err;
if (!sid)
return 0;
list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
if (mst->stu.sid != sid)
continue;
if (!refcount_dec_and_test(&mst->refcnt))
return 0;
mst->stu.valid = false;
err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
if (err) {
refcount_set(&mst->refcnt, 1);
return err;
}
list_del(&mst->node);
kfree(mst);
return 0;
}
return -ENOENT;
}
static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
u16 msti, u8 *sid)
{
struct mv88e6xxx_mst *mst;
int err, i;
if (!mv88e6xxx_has_stu(chip)) {
err = -EOPNOTSUPP;
goto err;
}
if (!msti) {
*sid = 0;
return 0;
}
list_for_each_entry(mst, &chip->msts, node) {
if (mst->br == br && mst->msti == msti) {
refcount_inc(&mst->refcnt);
*sid = mst->stu.sid;
return 0;
}
}
err = mv88e6xxx_sid_get(chip, sid);
if (err)
goto err;
mst = kzalloc(sizeof(*mst), GFP_KERNEL);
if (!mst) {
err = -ENOMEM;
goto err;
}
INIT_LIST_HEAD(&mst->node);
refcount_set(&mst->refcnt, 1);
mst->br = br;
mst->msti = msti;
mst->stu.valid = true;
mst->stu.sid = *sid;
/* The bridge starts out all ports in the disabled state. But
* a STU state of disabled means to go by the port-global
* state. So we set all user port's initial state to blocking,
* to match the bridge's behavior.
*/
for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
MV88E6XXX_PORT_CTL0_STATE_DISABLED;
err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
if (err)
goto err_free;
list_add_tail(&mst->node, &chip->msts);
return 0;
err_free:
kfree(mst);
err:
return err;
}
static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
const struct switchdev_mst_state *st)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_mst *mst;
u8 state;
int err;
if (!mv88e6xxx_has_stu(chip))
return -EOPNOTSUPP;
switch (st->state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
break;
case BR_STATE_LEARNING:
state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
break;
case BR_STATE_FORWARDING:
state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
break;
default:
return -EINVAL;
}
list_for_each_entry(mst, &chip->msts, node) {
if (mst->br == dsa_port_bridge_dev_get(dp) &&
mst->msti == st->msti) {
if (mst->stu.state[port] == state)
return 0;
mst->stu.state[port] = state;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
mv88e6xxx_reg_unlock(chip);
return err;
}
}
return -ENOENT;
}
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
int err;
/* DSA and CPU ports have to be members of multiple vlans */
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
if (!vlan.valid)
return 0;
dsa_switch_for_each_user_port(other_dp, ds) {
struct net_device *other_br;
if (vlan.member[other_dp->index] ==
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
if (dsa_port_bridge_same(dp, other_dp))
break; /* same bridge, check next VLAN */
other_br = dsa_port_bridge_dev_get(other_dp);
if (!other_br)
continue;
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
port, vlan.vid, other_dp->index, netdev_name(other_br));
return -EOPNOTSUPP;
}
return 0;
}
static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_port *dp = dsa_to_port(chip->ds, port);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct mv88e6xxx_port *p = &chip->ports[port];
u16 pvid = MV88E6XXX_VID_STANDALONE;
bool drop_untagged = false;
int err;
if (br) {
if (br_vlan_enabled(br)) {
pvid = p->bridge_pvid.vid;
drop_untagged = !p->bridge_pvid.valid;
} else {
pvid = MV88E6XXX_VID_BRIDGED;
}
}
err = mv88e6xxx_port_set_pvid(chip, port, pvid);
if (err)
return err;
return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged);
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
if (err)
goto unlock;
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int
mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
/* If the requested port doesn't belong to the same bridge as the VLAN
* members, do not support it (yet) and fallback to software VLAN.
*/
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
* standalone and one for when the port is under a bridge and the
* 802.1Q mode is disabled. When the port is standalone, DSA wants its
* address database to remain 100% empty, so we never load an ATU entry
* into a standalone port's database. Therefore, translate the null
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
/* switchdev expects -EOPNOTSUPP to honor software VLANs */
if (!vlan.valid)
return -EOPNOTSUPP;
fid = vlan.fid;
}
entry.state = 0;
ether_addr_copy(entry.mac, addr);
eth_addr_dec(entry.mac);
err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
if (err)
return err;
/* Initialize a fresh ATU entry if it isn't found */
if (!entry.state || !ether_addr_equal(entry.mac, addr)) {
memset(&entry, 0, sizeof(entry));
ether_addr_copy(entry.mac, addr);
}
/* Purge the ATU entry only if no port is using it anymore */
if (!state) {
entry.portvec &= ~BIT(port);
if (!entry.portvec)
entry.state = 0;
} else {
if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
entry.portvec = BIT(port);
else
entry.portvec |= BIT(port);
entry.state = state;
}
return mv88e6xxx_g1_atu_loadpurge(chip, fid, &entry);
}
static int mv88e6xxx_policy_apply(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_policy *policy)
{
enum mv88e6xxx_policy_mapping mapping = policy->mapping;
enum mv88e6xxx_policy_action action = policy->action;
const u8 *addr = policy->addr;
u16 vid = policy->vid;
u8 state;
int err;
int id;
if (!chip->info->ops->port_set_policy)
return -EOPNOTSUPP;
switch (mapping) {
case MV88E6XXX_POLICY_MAPPING_DA:
case MV88E6XXX_POLICY_MAPPING_SA:
if (action == MV88E6XXX_POLICY_ACTION_NORMAL)
state = 0; /* Dissociate the port and address */
else if (action == MV88E6XXX_POLICY_ACTION_DISCARD &&
is_multicast_ether_addr(addr))
state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY;
else if (action == MV88E6XXX_POLICY_ACTION_DISCARD &&
is_unicast_ether_addr(addr))
state = MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY;
else
return -EOPNOTSUPP;
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
state);
if (err)
return err;
break;
default:
return -EOPNOTSUPP;
}
/* Skip the port's policy clearing if the mapping is still in use */
if (action == MV88E6XXX_POLICY_ACTION_NORMAL)
idr_for_each_entry(&chip->policies, policy, id)
if (policy->port == port &&
policy->mapping == mapping &&
policy->action != action)
return 0;
return chip->info->ops->port_set_policy(chip, port, mapping, action);
}
static int mv88e6xxx_policy_insert(struct mv88e6xxx_chip *chip, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethhdr *mac_entry = &fs->h_u.ether_spec;
struct ethhdr *mac_mask = &fs->m_u.ether_spec;
enum mv88e6xxx_policy_mapping mapping;
enum mv88e6xxx_policy_action action;
struct mv88e6xxx_policy *policy;
u16 vid = 0;
u8 *addr;
int err;
int id;
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
action = MV88E6XXX_POLICY_ACTION_DISCARD;
else
return -EOPNOTSUPP;
switch (fs->flow_type & ~FLOW_EXT) {
case ETHER_FLOW:
if (!is_zero_ether_addr(mac_mask->h_dest) &&
is_zero_ether_addr(mac_mask->h_source)) {
mapping = MV88E6XXX_POLICY_MAPPING_DA;
addr = mac_entry->h_dest;
} else if (is_zero_ether_addr(mac_mask->h_dest) &&
!is_zero_ether_addr(mac_mask->h_source)) {
mapping = MV88E6XXX_POLICY_MAPPING_SA;
addr = mac_entry->h_source;
} else {
/* Cannot support DA and SA mapping in the same rule */
return -EOPNOTSUPP;
}
break;
default:
return -EOPNOTSUPP;
}
if ((fs->flow_type & FLOW_EXT) && fs->m_ext.vlan_tci) {
if (fs->m_ext.vlan_tci != htons(0xffff))
return -EOPNOTSUPP;
vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
}
idr_for_each_entry(&chip->policies, policy, id) {
if (policy->port == port && policy->mapping == mapping &&
policy->action == action && policy->vid == vid &&
ether_addr_equal(policy->addr, addr))
return -EEXIST;
}
policy = devm_kzalloc(chip->dev, sizeof(*policy), GFP_KERNEL);
if (!policy)
return -ENOMEM;
fs->location = 0;
err = idr_alloc_u32(&chip->policies, policy, &fs->location, 0xffffffff,
GFP_KERNEL);
if (err) {
devm_kfree(chip->dev, policy);
return err;
}
memcpy(&policy->fs, fs, sizeof(*fs));
ether_addr_copy(policy->addr, addr);
policy->mapping = mapping;
policy->action = action;
policy->port = port;
policy->vid = vid;
err = mv88e6xxx_policy_apply(chip, port, policy);
if (err) {
idr_remove(&chip->policies, fs->location);
devm_kfree(chip->dev, policy);
return err;
}
return 0;
}
static int mv88e6xxx_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct ethtool_rx_flow_spec *fs = &rxnfc->fs;
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_policy *policy;
int err;
int id;
mv88e6xxx_reg_lock(chip);
switch (rxnfc->cmd) {
case ETHTOOL_GRXCLSRLCNT:
rxnfc->data = 0;
rxnfc->data |= RX_CLS_LOC_SPECIAL;
rxnfc->rule_cnt = 0;
idr_for_each_entry(&chip->policies, policy, id)
if (policy->port == port)
rxnfc->rule_cnt++;
err = 0;
break;
case ETHTOOL_GRXCLSRULE:
err = -ENOENT;
policy = idr_find(&chip->policies, fs->location);
if (policy) {
memcpy(fs, &policy->fs, sizeof(*fs));
err = 0;
}
break;
case ETHTOOL_GRXCLSRLALL:
rxnfc->data = 0;
rxnfc->rule_cnt = 0;
idr_for_each_entry(&chip->policies, policy, id)
if (policy->port == port)
rule_locs[rxnfc->rule_cnt++] = id;
err = 0;
break;
default:
err = -EOPNOTSUPP;
break;
}
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *rxnfc)
{
struct ethtool_rx_flow_spec *fs = &rxnfc->fs;
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_policy *policy;
int err;
mv88e6xxx_reg_lock(chip);
switch (rxnfc->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = mv88e6xxx_policy_insert(chip, port, fs);
break;
case ETHTOOL_SRXCLSRLDEL:
err = -ENOENT;
policy = idr_remove(&chip->policies, fs->location);
if (policy) {
policy->action = MV88E6XXX_POLICY_ACTION_NORMAL;
err = mv88e6xxx_policy_apply(chip, port, policy);
devm_kfree(chip->dev, policy);
}
break;
default:
err = -EOPNOTSUPP;
break;
}
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port,
u16 vid)
{
u8 state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
u8 broadcast[ETH_ALEN];
eth_broadcast_addr(broadcast);
return mv88e6xxx_port_db_load_purge(chip, port, broadcast, vid, state);
}
static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
{
int port;
int err;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
struct dsa_port *dp = dsa_to_port(chip->ds, port);
struct net_device *brport;
if (dsa_is_unused_port(chip->ds, port))
continue;
brport = dsa_port_to_bridge_port(dp);
if (brport && !br_port_flag_is_set(brport, BR_BCAST_FLOOD))
/* Skip bridged user ports where broadcast
* flooding is disabled.
*/
continue;
err = mv88e6xxx_port_add_broadcast(chip, port, vid);
if (err)
return err;
}
return 0;
}
struct mv88e6xxx_port_broadcast_sync_ctx {
int port;
bool flood;
};
static int
mv88e6xxx_port_broadcast_sync_vlan(struct mv88e6xxx_chip *chip,
const struct mv88e6xxx_vtu_entry *vlan,
void *_ctx)
{
struct mv88e6xxx_port_broadcast_sync_ctx *ctx = _ctx;
u8 broadcast[ETH_ALEN];
u8 state;
if (ctx->flood)
state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
else
state = MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED;
eth_broadcast_addr(broadcast);
return mv88e6xxx_port_db_load_purge(chip, ctx->port, broadcast,
vlan->vid, state);
}
static int mv88e6xxx_port_broadcast_sync(struct mv88e6xxx_chip *chip, int port,
bool flood)
{
struct mv88e6xxx_port_broadcast_sync_ctx ctx = {
.port = port,
.flood = flood,
};
struct mv88e6xxx_vtu_entry vid0 = {
.vid = 0,
};
int err;
/* Update the port's private database... */
err = mv88e6xxx_port_broadcast_sync_vlan(chip, &vid0, &ctx);
if (err)
return err;
/* ...and the database for all VLANs. */
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_broadcast_sync_vlan,
&ctx);
}
static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
u16 vid, u8 member, bool warn)
{
const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
struct mv88e6xxx_vtu_entry vlan;
int i, err;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
if (vid == MV88E6XXX_VID_STANDALONE)
vlan.policy = true;
err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
if (i == port)
vlan.member[i] = member;
else
vlan.member[i] = non_member;
vlan.vid = vid;
vlan.valid = true;
err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
err = mv88e6xxx_broadcast_setup(chip, vlan.vid);
if (err)
return err;
} else if (vlan.member[port] != member) {
vlan.member[port] = member;
err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
} else if (warn) {
dev_info(chip->dev, "p%d: already a member of VLAN %d\n",
port, vid);
}
return 0;
}
static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mv88e6xxx_port *p = &chip->ports[port];
bool warn;
u8 member;
int err;
if (!vlan->vid)
return 0;
err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
if (err)
return err;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED;
else if (untagged)
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED;
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
/* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port
* and then the CPU port. Do not warn for duplicates for the CPU port.
*/
warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port);
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_vlan_join(chip, port, vlan->vid, member, warn);
if (err) {
dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
vlan->vid, untagged ? 'u' : 't');
goto out;
}
if (pvid) {
p->bridge_pvid.vid = vlan->vid;
p->bridge_pvid.valid = true;
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto out;
} else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) {
/* The old pvid was reinstalled as a non-pvid VLAN */
p->bridge_pvid.valid = false;
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto out;
}
out:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
int port, u16 vid)
{
struct mv88e6xxx_vtu_entry vlan;
int i, err;
if (!vid)
return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
return err;
/* If the VLAN doesn't exist in hardware or the port isn't a member,
* tell switchdev that this VLAN is likely handled in software.
*/
if (!vlan.valid ||
vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (vlan.member[i] !=
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
vlan.valid = true;
break;
}
}
err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
if (!vlan.valid) {
err = mv88e6xxx_mst_put(chip, vlan.sid);
if (err)
return err;
}
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port *p = &chip->ports[port];
int err = 0;
u16 pvid;
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
/* The ATU removal procedure needs the FID to be mapped in the VTU,
* but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
* switchdev workqueue to ensure that all FDB entries are deleted
* before we remove the VLAN.
*/
dsa_flush_workqueue();
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
if (err)
goto unlock;
err = mv88e6xxx_port_vlan_leave(chip, port, vlan->vid);
if (err)
goto unlock;
if (vlan->vid == pvid) {
p->bridge_pvid.valid = false;
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
}
unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
goto unlock;
err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
struct dsa_bridge bridge,
const struct switchdev_vlan_msti *msti)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
u8 old_sid, new_sid;
int err;
if (!mv88e6xxx_has_stu(chip))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
if (err)
goto unlock;
if (!vlan.valid) {
err = -EINVAL;
goto unlock;
}
old_sid = vlan.sid;
err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
if (err)
goto unlock;
if (new_sid != old_sid) {
vlan.sid = new_sid;
err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err) {
mv88e6xxx_mst_put(chip, new_sid);
goto unlock;
}
}
err = mv88e6xxx_mst_put(chip, old_sid);
unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, 0);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
u16 fid, u16 vid, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mv88e6xxx_atu_entry addr;
bool is_static;
int err;
addr.state = 0;
eth_broadcast_addr(addr.mac);
do {
err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
if (err)
return err;
if (!addr.state)
break;
if (addr.trunk || (addr.portvec & BIT(port)) == 0)
continue;
if (!is_unicast_ether_addr(addr.mac))
continue;
is_static = (addr.state ==
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
err = cb(addr.mac, vid, is_static, data);
if (err)
return err;
} while (!is_broadcast_ether_addr(addr.mac));
return err;
}
struct mv88e6xxx_port_db_dump_vlan_ctx {
int port;
dsa_fdb_dump_cb_t *cb;
void *data;
};
static int mv88e6xxx_port_db_dump_vlan(struct mv88e6xxx_chip *chip,
const struct mv88e6xxx_vtu_entry *entry,
void *_data)
{
struct mv88e6xxx_port_db_dump_vlan_ctx *ctx = _data;
return mv88e6xxx_port_db_dump_fid(chip, entry->fid, entry->vid,
ctx->port, ctx->cb, ctx->data);
}
static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mv88e6xxx_port_db_dump_vlan_ctx ctx = {
.port = port,
.cb = cb,
.data = data,
};
u16 fid;
int err;
/* Dump port's default Filtering Information Database (VLAN ID 0) */
err = mv88e6xxx_port_get_fid(chip, port, &fid);
if (err)
return err;
err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, cb, data);
if (err)
return err;
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_db_dump_vlan, &ctx);
}
static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_dump(chip, port, cb, data);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
struct dsa_bridge bridge)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp;
int err;
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_offloads_bridge(dp, &bridge)) {
if (dp->ds == ds) {
/* This is a local bridge group member,
* remap its Port VLAN Map.
*/
err = mv88e6xxx_port_vlan_map(chip, dp->index);
if (err)
return err;
} else {
/* This is an external bridge group member,
* remap its cross-chip Port VLAN Table entry.
*/
err = mv88e6xxx_pvt_map(chip, dp->ds->index,
dp->index);
if (err)
return err;
}
}
}
return 0;
}
/* Treat the software bridge as a virtual single-port switch behind the
* CPU and map in the PVT. First dst->last_switch elements are taken by
* physical switches, so start from beyond that range.
*/
static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
unsigned int bridge_num)
{
u8 dev = bridge_num + ds->dst->last_switch;
struct mv88e6xxx_chip *chip = ds->priv;
return mv88e6xxx_pvt_map(chip, dev, 0);
}
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_bridge_map(chip, bridge);
if (err)
goto unlock;
err = mv88e6xxx_port_set_map_da(chip, port, true);
if (err)
goto unlock;
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
if (mv88e6xxx_has_pvt(chip)) {
err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
if (err)
goto unlock;
*tx_fwd_offload = true;
}
unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
if (bridge.tx_fwd_offload &&
mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
if (mv88e6xxx_bridge_map(chip, bridge) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
err = mv88e6xxx_port_set_map_da(chip, port, false);
if (err)
dev_err(ds->dev,
"port %d failed to restore map-DA: %pe\n",
port, ERR_PTR(err));
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
dev_err(ds->dev,
"port %d failed to restore standalone pvid: %pe\n",
port, ERR_PTR(err));
mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
int port, struct dsa_bridge bridge,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (tree_index != ds->dst->index)
return 0;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, sw_index, port);
err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
mv88e6xxx_reg_unlock(chip);
return err;
}
static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
int tree_index, int sw_index,
int port, struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
if (tree_index != ds->dst->index)
return;
mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, sw_index, port) ||
mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->reset)
return chip->info->ops->reset(chip);
return 0;
}
static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
struct gpio_desc *gpiod = chip->reset;
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
/* If the switch has just been reset and not yet completed
* loading EEPROM, the reset may interrupt the I2C transaction
* mid-byte, causing the first EEPROM read after the reset
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
mv88e6xxx_g1_wait_eeprom_done(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
mv88e6xxx_g1_wait_eeprom_done(chip);
}
}
static int mv88e6xxx_disable_ports(struct mv88e6xxx_chip *chip)
{
int i, err;
/* Set all ports to the Disabled state */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_port_set_state(chip, i, BR_STATE_DISABLED);
if (err)
return err;
}
/* Wait for transmit queues to drain,
* i.e. 2ms for a maximum frame to be transmitted at 10 Mbps.
*/
usleep_range(2000, 4000);
return 0;
}
static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6xxx_disable_ports(chip);
if (err)
return err;
mv88e6xxx_hardware_reset(chip);
return mv88e6xxx_software_reset(chip);
}
static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode frame,
enum mv88e6xxx_egress_mode egress, u16 etype)
{
int err;
if (!chip->info->ops->port_set_frame_mode)
return -EOPNOTSUPP;
err = mv88e6xxx_port_set_egress_mode(chip, port, egress);
if (err)
return err;
err = chip->info->ops->port_set_frame_mode(chip, port, frame);
if (err)
return err;
if (chip->info->ops->port_set_ether_type)
return chip->info->ops->port_set_ether_type(chip, port, etype);
return 0;
}
static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
MV88E6XXX_EGRESS_MODE_UNMODIFIED,
MV88E6XXX_PORT_ETH_TYPE_DEFAULT);
}
static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
MV88E6XXX_EGRESS_MODE_UNMODIFIED,
MV88E6XXX_PORT_ETH_TYPE_DEFAULT);
}
static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_set_port_mode(chip, port,
MV88E6XXX_FRAME_MODE_ETHERTYPE,
MV88E6XXX_EGRESS_MODE_ETHERTYPE,
ETH_P_EDSA);
}
static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
{
if (dsa_is_dsa_port(chip->ds, port))
return mv88e6xxx_set_port_mode_dsa(chip, port);
if (dsa_is_user_port(chip->ds, port))
return mv88e6xxx_set_port_mode_normal(chip, port);
/* Setup CPU port mode depending on its supported tag format */
if (chip->tag_protocol == DSA_TAG_PROTO_DSA)
return mv88e6xxx_set_port_mode_dsa(chip, port);
if (chip->tag_protocol == DSA_TAG_PROTO_EDSA)
return mv88e6xxx_set_port_mode_edsa(chip, port);
return -EINVAL;
}
static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
{
bool message = dsa_is_dsa_port(chip->ds, port);
return mv88e6xxx_port_set_message_port(chip, port, message);
}
static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
{
int err;
if (chip->info->ops->port_set_ucast_flood) {
err = chip->info->ops->port_set_ucast_flood(chip, port, true);
if (err)
return err;
}
if (chip->info->ops->port_set_mcast_flood) {
err = chip->info->ops->port_set_mcast_flood(chip, port, true);
if (err)
return err;
}
return 0;
}
static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
{
int err;
if (!chip->info->ops->set_egress_port)
return -EOPNOTSUPP;
err = chip->info->ops->set_egress_port(chip, direction, port);
if (err)
return err;
if (direction == MV88E6XXX_EGRESS_DIR_INGRESS)
chip->ingress_dest_port = port;
else
chip->egress_dest_port = port;
return 0;
}
static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
int upstream_port;
int err;
upstream_port = dsa_upstream_port(ds, port);
if (chip->info->ops->port_set_upstream_port) {
err = chip->info->ops->port_set_upstream_port(chip, port,
upstream_port);
if (err)
return err;
}
if (port == upstream_port) {
if (chip->info->ops->set_cpu_port) {
err = chip->info->ops->set_cpu_port(chip,
upstream_port);
if (err)
return err;
}
err = mv88e6xxx_set_egress_port(chip,
MV88E6XXX_EGRESS_DIR_INGRESS,
upstream_port);
if (err && err != -EOPNOTSUPP)
return err;
err = mv88e6xxx_set_egress_port(chip,
MV88E6XXX_EGRESS_DIR_EGRESS,
upstream_port);
if (err && err != -EOPNOTSUPP)
return err;
}
return 0;
}
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
struct dsa_port *dp;
int tx_amp;
int err;
u16 reg;
chip->ports[port].chip = chip;
chip->ports[port].port = port;
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
PAUSE_ON, PHY_INTERFACE_MODE_NA);
if (err)
return err;
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
* disable Header mode, enable IGMP/MLD snooping, disable VLAN
* tunneling, determine priority by looking at 802.1p and IP
* priority fields (IP prio has precedence), and set STP state
* to Forwarding.
*
* If this is the CPU link, use DSA or EDSA tagging depending
* on which tagging mode was configured.
*
* If this is a link to another switch, use DSA tagging mode.
*
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
/* Forward any IPv4 IGMP or IPv6 MLD frames received
* by a USER port to the CPU port to allow snooping.
*/
if (dsa_is_user_port(ds, port))
reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
err = mv88e6xxx_setup_port_mode(chip, port);
if (err)
return err;
err = mv88e6xxx_setup_egress_floods(chip, port);
if (err)
return err;
/* Port Control 2: don't force a good FCS, set the MTU size to
* 10222 bytes, disable 802.1q tags checking, don't discard
* tagged or untagged frames on this port, skip destination
* address lookup on user ports, disable ARP mirroring and don't
* send a copy of all transmitted/received frames on this port
* to the CPU.
*/
err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
if (err)
return err;
err = mv88e6xxx_setup_upstream_port(chip, port);
if (err)
return err;
/* On chips that support it, set all downstream DSA ports'
* VLAN policy to TRAP. In combination with loading
* MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
* provides a better isolation barrier between standalone
* ports, as the ATU is bypassed on any intermediate switches
* between the incoming port and the CPU.
*/
if (dsa_is_downstream_port(ds, port) &&
chip->info->ops->port_set_policy) {
err = chip->info->ops->port_set_policy(chip, port,
MV88E6XXX_POLICY_MAPPING_VTU,
MV88E6XXX_POLICY_ACTION_TRAP);
if (err)
return err;
}
/* User ports start out in standalone mode and 802.1Q is
* therefore disabled. On DSA ports, all valid VIDs are always
* loaded in the VTU - therefore, enable 802.1Q in order to take
* advantage of VLAN policy on chips that supports it.
*/
err = mv88e6xxx_port_set_8021q_mode(chip, port,
dsa_is_user_port(ds, port) ?
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
if (err)
return err;
/* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
* virtue of the fact that mv88e6xxx_atu_new() will pick it as
* the first free FID. This will be used as the private PVID for
* unbridged ports. Shared (DSA and CPU) ports must also be
* members of this VID, in order to trap all frames assigned to
* it to the CPU.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
/* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the
* ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as
* the first free FID after MV88E6XXX_FID_STANDALONE. This will be used
* as the private PVID on ports under a VLAN-unaware bridge.
* Shared (DSA and CPU) ports must also be members of it, to translate
* the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of
* relying on their port default FID.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
if (chip->info->ops->port_set_jumbo_size) {
err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
if (err)
return err;
}
/* Port Association Vector: disable automatic address learning
* on all user ports since they start out in standalone
* mode. When joining a bridge, learning will be configured to
* match the bridge port settings. Enable learning on all
* DSA/CPU ports. NOTE: FROM_CPU frames always bypass the
* learning process.
*
* Disable HoldAt1, IntOnAgeOut, LockedPort, IgnoreWrongData,
* and RefreshLocked. I.e. setup standard automatic learning.
*/
if (dsa_is_user_port(ds, port))
reg = 0;
else
reg = 1 << port;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
reg);
if (err)
return err;
/* Egress rate control 2: disable egress rate control. */
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL2,
0x0000);
if (err)
return err;
if (chip->info->ops->port_pause_limit) {
err = chip->info->ops->port_pause_limit(chip, port, 0, 0);
if (err)
return err;
}
if (chip->info->ops->port_disable_learn_limit) {
err = chip->info->ops->port_disable_learn_limit(chip, port);
if (err)
return err;
}
if (chip->info->ops->port_disable_pri_override) {
err = chip->info->ops->port_disable_pri_override(chip, port);
if (err)
return err;
}
if (chip->info->ops->port_tag_remap) {
err = chip->info->ops->port_tag_remap(chip, port);
if (err)
return err;
}
if (chip->info->ops->port_egress_rate_limiting) {
err = chip->info->ops->port_egress_rate_limiting(chip, port);
if (err)
return err;
}
if (chip->info->ops->port_setup_message_port) {
err = chip->info->ops->port_setup_message_port(chip, port);
if (err)
return err;
}
if (chip->info->ops->serdes_set_tx_amplitude) {
dp = dsa_to_port(ds, port);
if (dp)
phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
if (phy_handle && !of_property_read_u32(phy_handle,
"tx-p2p-microvolt",
&tx_amp))
err = chip->info->ops->serdes_set_tx_amplitude(chip,
port, tx_amp);
if (phy_handle) {
of_node_put(phy_handle);
if (err)
return err;
}
}
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE);
if (err)
return err;
err = mv88e6xxx_port_vlan_map(chip, port);
if (err)
return err;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN, 0);
}
static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
if (chip->info->ops->port_set_jumbo_size)
return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
return ETH_DATA_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
/* For families where we don't know how to alter the MTU,
* just accept any value up to ETH_DATA_LEN
*/
if (!chip->info->ops->port_set_jumbo_size &&
!chip->info->ops->set_max_frame_size) {
if (new_mtu > ETH_DATA_LEN)
return -EINVAL;
return 0;
}
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
new_mtu += EDSA_HLEN;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
else if (chip->info->ops->set_max_frame_size)
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
mv88e6xxx_reg_unlock(chip);
return ret;
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
{
int err;
/* Initialize the statistics unit */
if (chip->info->ops->stats_set_histogram) {
err = chip->info->ops->stats_set_histogram(chip);
if (err)
return err;
}
return mv88e6xxx_g1_stats_clear(chip);
}
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
int port;
int err;
u16 val;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
err = mv88e6xxx_port_hidden_read(chip, 0xf, port, 0, &val);
if (err) {
dev_err(chip->dev,
"Error reading hidden register: %d\n", err);
return false;
}
if (val != 0x01c0)
return false;
}
return true;
}
/* The 6390 copper ports have an errata which require poking magic
* values into undocumented hidden registers and then performing a
* software reset.
*/
static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
{
int port;
int err;
if (mv88e6390_setup_errata_applied(chip))
return 0;
/* Set the ports into blocking mode */
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
if (err)
return err;
}
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
err = mv88e6xxx_port_hidden_write(chip, 0xf, port, 0, 0x01c0);
if (err)
return err;
}
return mv88e6xxx_software_reset(chip);
}
/* prod_id for switch families which do not have a PHY model number */
static const u16 family_prod_id_table[] = {
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
[MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
[MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
};
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
u16 prod_id;
u16 val;
int err;
if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mv88e6xxx_reg_unlock(chip);
/* Some internal PHYs don't have a model number. */
if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
prod_id = family_prod_id_table[chip->info->family];
if (prod_id)
val |= prod_id >> 4;
}
return err ? err : val;
}
static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
u16 val;
int err;
if (!chip->info->ops->phy_read_c45)
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
mv88e6xxx_reg_unlock(chip);
return err ? err : val;
}
static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
int err;
if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_mdio_write_c45(struct mii_bus *bus, int phy, int devad,
int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
int err;
if (!chip->info->ops->phy_write_c45)
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_write_c45(chip, bus, phy, devad, reg, val);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
struct device_node *np,
bool external)
{
static int index;
struct mv88e6xxx_mdio_bus *mdio_bus;
struct mii_bus *bus;
int err;
if (external) {
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
mv88e6xxx_reg_unlock(chip);
if (err)
return err;
}
bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
mdio_bus = bus->priv;
mdio_bus->bus = bus;
mdio_bus->chip = chip;
INIT_LIST_HEAD(&mdio_bus->list);
mdio_bus->external = external;
if (np) {
bus->name = np->full_name;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np);
} else {
bus->name = "mv88e6xxx SMI";
snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
}
bus->read = mv88e6xxx_mdio_read;
bus->write = mv88e6xxx_mdio_write;
bus->read_c45 = mv88e6xxx_mdio_read_c45;
bus->write_c45 = mv88e6xxx_mdio_write_c45;
bus->parent = chip->dev;
bus->phy_mask = ~GENMASK(chip->info->phy_base_addr +
mv88e6xxx_num_ports(chip) - 1,
chip->info->phy_base_addr);
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
goto out;
}
if (external)
list_add_tail(&mdio_bus->list, &chip->mdios);
else
list_add(&mdio_bus->list, &chip->mdios);
return 0;
out:
mdiobus_free(bus);
return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
mdiobus_free(bus);
}
}
static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip)
{
struct device_node *np = chip->dev->of_node;
struct device_node *child;
int err;
/* Always register one mdio bus for the internal/default mdio
* bus. This maybe represented in the device tree, but is
* optional.
*/
child = of_get_child_by_name(np, "mdio");
err = mv88e6xxx_mdio_register(chip, child, false);
of_node_put(child);
if (err)
return err;
/* Walk the device tree, and see if there are any other nodes
* which say they are compatible with the external mdio
* bus.
*/
for_each_available_child_of_node(np, child) {
if (of_device_is_compatible(
child, "marvell,mv88e6xxx-mdio-external")) {
err = mv88e6xxx_mdio_register(chip, child, true);
if (err) {
mv88e6xxx_mdios_unregister(chip);
of_node_put(child);
return err;
}
}
}
return 0;
}
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
mv88e6xxx_mdios_unregister(chip);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
u8 cmode;
int err;
int i;
err = mv88e6xxx_mdios_register(chip);
if (err)
return err;
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
/* Since virtual bridges are mapped in the PVT, the number we support
* depends on the physical switch topology. We need to let DSA figure
* that out and therefore we cannot set this at dsa_register_switch()
* time.
*/
if (mv88e6xxx_has_pvt(chip))
ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
ds->dst->last_switch - 1;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->setup_errata) {
err = chip->info->ops->setup_errata(chip);
if (err)
goto unlock;
}
/* Cache the cmode of each port. */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (chip->info->ops->port_get_cmode) {
err = chip->info->ops->port_get_cmode(chip, i, &cmode);
if (err)
goto unlock;
chip->ports[i].cmode = cmode;
}
}
err = mv88e6xxx_vtu_setup(chip);
if (err)
goto unlock;
/* Must be called after mv88e6xxx_vtu_setup (which flushes the
* VTU, thereby also flushing the STU).
*/
err = mv88e6xxx_stu_setup(chip);
if (err)
goto unlock;
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
continue;
/* Prevent the use of an invalid port. */
if (mv88e6xxx_is_invalid_port(chip, i)) {
dev_err(chip->dev, "port %d is invalid\n", i);
err = -EINVAL;
goto unlock;
}
err = mv88e6xxx_setup_port(chip, i);
if (err)
goto unlock;
}
err = mv88e6xxx_irl_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_mac_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_phy_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_pvt_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_atu_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_broadcast_setup(chip, 0);
if (err)
goto unlock;
err = mv88e6xxx_pot_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_rmu_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_rsvd2cpu_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_trunk_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_devmap_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_pri_setup(chip);
if (err)
goto unlock;
/* Setup PTP Hardware Clock and timestamping */
if (chip->info->ptp_support) {
err = mv88e6xxx_ptp_setup(chip);
if (err)
goto unlock;
err = mv88e6xxx_hwtstamp_setup(chip);
if (err)
goto unlock;
}
err = mv88e6xxx_stats_setup(chip);
if (err)
goto unlock;
unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
goto out_mdios;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
* the reverse order when getting/setting parameters or
* resource occupancy.
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
goto out_mdios;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
goto out_resources;
err = mv88e6xxx_setup_devlink_regions_global(ds);
if (err)
goto out_params;
return 0;
out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
out_mdios:
mv88e6xxx_mdios_unregister(chip);
return err;
}
static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (chip->info->ops->pcs_ops->pcs_init) {
err = chip->info->ops->pcs_ops->pcs_init(chip, port);
if (err)
return err;
}
return mv88e6xxx_setup_devlink_regions_port(ds, port);
}
static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
mv88e6xxx_teardown_devlink_regions_port(ds, port);
if (chip->info->ops->pcs_ops->pcs_teardown)
chip->info->ops->pcs_ops->pcs_teardown(chip, port);
}
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
return chip->eeprom_len;
}
static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!chip->info->ops->get_eeprom)
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->get_eeprom(chip, eeprom, data);
mv88e6xxx_reg_unlock(chip);
if (err)
return err;
eeprom->magic = 0xc3ec4951;
return 0;
}
static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!chip->info->ops->set_eeprom)
return -EOPNOTSUPP;
if (eeprom->magic != 0xc3ec4951)
return -EINVAL;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->set_eeprom(chip, eeprom, data);
mv88e6xxx_reg_unlock(chip);
return err;
}
static const struct mv88e6xxx_ops mv88e6085_ops = {
/* MV88E6XXX_FAMILY_6097 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
/* MV88E6XXX_FAMILY_6095 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
.port_set_mcast_flood = mv88e6185_port_set_default_forward,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6095_phylink_get_caps,
.pcs_ops = &mv88e6185_pcs_ops,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6097_ops = {
/* MV88E6XXX_FAMILY_6097 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6095_phylink_get_caps,
.pcs_ops = &mv88e6185_pcs_ops,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
/* MV88E6XXX_FAMILY_6165 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6131_ops = {
/* MV88E6XXX_FAMILY_6185 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
.port_set_mcast_flood = mv88e6185_port_set_default_forward,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_set_pause = mv88e6185_port_set_pause,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
.set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
/* MV88E6XXX_FAMILY_6341 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6341_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.gpio_ops = &mv88e6352_gpio_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_get_caps = mv88e6341_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
/* MV88E6XXX_FAMILY_6165 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
/* MV88E6XXX_FAMILY_6165 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6165_phy_read,
.phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
/* MV88E6XXX_FAMILY_6351 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
/* MV88E6XXX_FAMILY_6352 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
/* MV88E6XXX_FAMILY_6351 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
/* MV88E6XXX_FAMILY_6352 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
/* MV88E6XXX_FAMILY_6185 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
.port_set_mcast_flood = mv88e6185_port_set_default_forward,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_set_pause = mv88e6185_port_set_pause,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
.set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.pcs_ops = &mv88e6185_pcs_ops,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6390x_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
/* MV88E6XXX_FAMILY_6352 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
/* MV88E6XXX_FAMILY_6250 */
.ieee_pri_map = mv88e6250_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6250_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6250_stats_get_sset_count,
.stats_get_strings = mv88e6250_stats_get_strings,
.stats_get_stats = mv88e6250_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6250_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
.phylink_get_caps = mv88e6250_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6320_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6320_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
/* MV88E6XXX_FAMILY_6341 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6341_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_get_caps = mv88e6341_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
/* MV88E6XXX_FAMILY_6351 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
/* MV88E6XXX_FAMILY_6351 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
/* MV88E6XXX_FAMILY_6352 */
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
.serdes_get_strings = mv88e6352_serdes_get_strings,
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6390_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_get_caps = mv88e6390_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390x_phylink_get_caps,
.pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
/* MV88E6XXX_FAMILY_6393 */
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read_c22,
.phy_write = mv88e6xxx_g2_smi_phy_write_c22,
.phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
.phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6393x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6393x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6393x_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6393x_port_set_ether_type,
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6393x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.port_set_upstream_port = mv88e6393x_port_set_upstream_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
/* .set_cpu_port is missing because this family does not support a global
* CPU port, only per port CPU port which is set via
* .port_set_upstream_port method.
*/
.set_egress_port = mv88e6393x_set_egress_port,
.watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.stu_getnext = mv88e6390_g1_stu_getnext,
.stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
/* TODO: serdes stats */
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6393x_phylink_get_caps,
.pcs_ops = &mv88e6393x_pcs_ops,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6020] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6020,
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6020",
.num_databases = 64,
.num_ports = 4,
.num_internal_phys = 2,
.max_vid = 4095,
.port_base_addr = 0x8,
.phy_base_addr = 0x0,
.global1_addr = 0xf,
.global2_addr = 0x7,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
},
[MV88E6071] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6071,
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6071",
.num_databases = 64,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x08,
.phy_base_addr = 0x00,
.global1_addr = 0x0f,
.global2_addr = 0x07,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
},
[MV88E6085] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085,
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6085",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.ops = &mv88e6085_ops,
},
[MV88E6095] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6095,
.family = MV88E6XXX_FAMILY_6095,
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
.num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
},
[MV88E6097] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6097,
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6097_ops,
},
[MV88E6123] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6123,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6123",
.num_databases = 4096,
.num_macs = 1024,
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6123_ops,
},
[MV88E6131] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6131,
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6131",
.num_databases = 256,
.num_macs = 8192,
.num_ports = 8,
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
},
[MV88E6141] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
.num_databases = 4096,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
.num_gpio = 11,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6141_ops,
},
[MV88E6161] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6161,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
.num_macs = 1024,
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6161_ops,
},
[MV88E6165] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6165,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6165",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6165_ops,
},
[MV88E6171] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6171,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6171",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6171_ops,
},
[MV88E6172] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6172,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6172",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6172_ops,
},
[MV88E6175] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6175,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6175",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6175_ops,
},
[MV88E6176] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6176,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6176",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6176_ops,
},
[MV88E6185] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6185,
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6185",
.num_databases = 256,
.num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6185_ops,
},
[MV88E6190] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6190,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190",
.num_databases = 4096,
.num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
.ops = &mv88e6190_ops,
},
[MV88E6190X] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6190X,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190X",
.num_databases = 4096,
.num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ops = &mv88e6190x_ops,
},
[MV88E6191] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6191",
.num_databases = 4096,
.num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6191_ops,
},
[MV88E6191X] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191X,
.family = MV88E6XXX_FAMILY_6393,
.name = "Marvell 88E6191X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 8,
.internal_phys_offset = 1,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6393x_ops,
},
[MV88E6193X] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6193X,
.family = MV88E6XXX_FAMILY_6393,
.name = "Marvell 88E6193X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 8,
.internal_phys_offset = 1,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6393x_ops,
},
[MV88E6220] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220,
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6220",
.num_databases = 64,
/* Ports 2-4 are not routed to pins
* => usable ports 0, 1, 5, 6
*/
.num_ports = 7,
.num_internal_phys = 2,
.invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
.max_vid = 4095,
.port_base_addr = 0x08,
.phy_base_addr = 0x00,
.global1_addr = 0x0f,
.global2_addr = 0x07,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
.ops = &mv88e6250_ops,
},
[MV88E6240] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6240",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6240_ops,
},
[MV88E6250] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6250,
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6250",
.num_databases = 64,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x08,
.phy_base_addr = 0x00,
.global1_addr = 0x0f,
.global2_addr = 0x07,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
.ops = &mv88e6250_ops,
},
[MV88E6290] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6290",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6290_ops,
},
[MV88E6320] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6320,
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6320",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6320_ops,
},
[MV88E6321] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6321,
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6321",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6321_ops,
},
[MV88E6341] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
.num_gpio = 11,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6341_ops,
},
[MV88E6350] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6350,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6350",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6350_ops,
},
[MV88E6351] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6351,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6351",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ops = &mv88e6351_ops,
},
[MV88E6352] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6352,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6352",
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
.ops = &mv88e6352_ops,
},
[MV88E6361] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6361,
.family = MV88E6XXX_FAMILY_6393,
.name = "Marvell 88E6361",
.num_databases = 4096,
.num_macs = 16384,
.num_ports = 11,
/* Ports 1, 2 and 8 are not routed */
.invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
.num_internal_phys = 5,
.internal_phys_offset = 3,
.max_vid = 4095,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6393x_ops,
},
[MV88E6390] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390",
.num_databases = 4096,
.num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED,
.ptp_support = true,
.ops = &mv88e6390_ops,
},
[MV88E6390X] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390X,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390X",
.num_databases = 4096,
.num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED,
.ptp_support = true,
.ops = &mv88e6390x_ops,
},
[MV88E6393X] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
.family = MV88E6XXX_FAMILY_6393,
.name = "Marvell 88E6393X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 8,
.internal_phys_offset = 1,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
.ptp_support = true,
.ops = &mv88e6393x_ops,
},
};
static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
{
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i)
if (mv88e6xxx_table[i].prod_num == prod_num)
return &mv88e6xxx_table[i];
return NULL;
}
static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
{
const struct mv88e6xxx_info *info;
unsigned int prod_num, rev;
u16 id;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_read(chip, 0, MV88E6XXX_PORT_SWITCH_ID, &id);
mv88e6xxx_reg_unlock(chip);
if (err)
return err;
prod_num = id & MV88E6XXX_PORT_SWITCH_ID_PROD_MASK;
rev = id & MV88E6XXX_PORT_SWITCH_ID_REV_MASK;
info = mv88e6xxx_lookup_info(prod_num);
if (!info)
return -ENODEV;
/* Update the compatible info with the probed one */
chip->info = info;
dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
chip->info->prod_num, chip->info->name, rev);
return 0;
}
static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip,
struct mdio_device *mdiodev)
{
int err;
/* dual_chip takes precedence over single/multi-chip modes */
if (chip->info->dual_chip)
return -EINVAL;
/* If the mdio addr is 16 indicating the first port address of a switch
* (e.g. mv88e6*41) in single chip addressing mode the device may be
* configured in single chip addressing mode. Setup the smi access as
* single chip addressing mode and attempt to detect the model of the
* switch, if this fails the device is not configured in single chip
* addressing mode.
*/
if (mdiodev->addr != 16)
return -EINVAL;
err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0);
if (err)
return err;
return mv88e6xxx_detect(chip);
}
static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
{
struct mv88e6xxx_chip *chip;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return NULL;
chip->dev = dev;
mutex_init(&chip->reg_lock);
INIT_LIST_HEAD(&chip->mdios);
idr_init(&chip->policies);
INIT_LIST_HEAD(&chip->msts);
return chip;
}
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol m)
{
struct mv88e6xxx_chip *chip = ds->priv;
return chip->tag_protocol;
}
static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct mv88e6xxx_chip *chip = ds->priv;
enum dsa_tag_protocol old_protocol;
struct dsa_port *cpu_dp;
int err;
switch (proto) {
case DSA_TAG_PROTO_EDSA:
switch (chip->info->edsa_support) {
case MV88E6XXX_EDSA_UNSUPPORTED:
return -EPROTONOSUPPORT;
case MV88E6XXX_EDSA_UNDOCUMENTED:
dev_warn(chip->dev, "Relying on undocumented EDSA tagging behavior\n");
fallthrough;
case MV88E6XXX_EDSA_SUPPORTED:
break;
}
break;
case DSA_TAG_PROTO_DSA:
break;
default:
return -EPROTONOSUPPORT;
}
old_protocol = chip->tag_protocol;
chip->tag_protocol = proto;
mv88e6xxx_reg_lock(chip);
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
if (err) {
mv88e6xxx_reg_unlock(chip);
goto unwind;
}
}
mv88e6xxx_reg_unlock(chip);
return 0;
unwind:
chip->tag_protocol = old_protocol;
mv88e6xxx_reg_lock(chip);
dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, 0);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress,
struct netlink_ext_ack *extack)
{
enum mv88e6xxx_egress_direction direction = ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
MV88E6XXX_EGRESS_DIR_EGRESS;
struct mv88e6xxx_chip *chip = ds->priv;
bool other_mirrors = false;
int i;
int err;
mutex_lock(&chip->reg_lock);
if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) !=
mirror->to_local_port) {
for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
other_mirrors |= ingress ?
chip->ports[i].mirror_ingress :
chip->ports[i].mirror_egress;
/* Can't change egress port when other mirror is active */
if (other_mirrors) {
err = -EBUSY;
goto out;
}
err = mv88e6xxx_set_egress_port(chip, direction,
mirror->to_local_port);
if (err)
goto out;
}
err = mv88e6xxx_port_set_mirror(chip, port, direction, true);
out:
mutex_unlock(&chip->reg_lock);
return err;
}
static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
enum mv88e6xxx_egress_direction direction = mirror->ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
MV88E6XXX_EGRESS_DIR_EGRESS;
struct mv88e6xxx_chip *chip = ds->priv;
bool other_mirrors = false;
int i;
mutex_lock(&chip->reg_lock);
if (mv88e6xxx_port_set_mirror(chip, port, direction, false))
dev_err(ds->dev, "p%d: failed to disable mirroring\n", port);
for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
other_mirrors |= mirror->ingress ?
chip->ports[i].mirror_ingress :
chip->ports[i].mirror_egress;
/* Reset egress port when no other mirror is active */
if (!other_mirrors) {
if (mv88e6xxx_set_egress_port(chip, direction,
dsa_upstream_port(ds, port)))
dev_err(ds->dev, "failed to set egress port\n");
}
mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD | BR_PORT_LOCKED | BR_PORT_MAB))
return -EINVAL;
ops = chip->info->ops;
if ((flags.mask & BR_FLOOD) && !ops->port_set_ucast_flood)
return -EINVAL;
if ((flags.mask & BR_MCAST_FLOOD) && !ops->port_set_mcast_flood)
return -EINVAL;
return 0;
}
static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err = 0;
mv88e6xxx_reg_lock(chip);
if (flags.mask & BR_LEARNING) {
bool learning = !!(flags.val & BR_LEARNING);
u16 pav = learning ? (1 << port) : 0;
err = mv88e6xxx_port_set_assoc_vector(chip, port, pav);
if (err)
goto out;
}
if (flags.mask & BR_FLOOD) {
bool unicast = !!(flags.val & BR_FLOOD);
err = chip->info->ops->port_set_ucast_flood(chip, port,
unicast);
if (err)
goto out;
}
if (flags.mask & BR_MCAST_FLOOD) {
bool multicast = !!(flags.val & BR_MCAST_FLOOD);
err = chip->info->ops->port_set_mcast_flood(chip, port,
multicast);
if (err)
goto out;
}
if (flags.mask & BR_BCAST_FLOOD) {
bool broadcast = !!(flags.val & BR_BCAST_FLOOD);
err = mv88e6xxx_port_broadcast_sync(chip, port, broadcast);
if (err)
goto out;
}
if (flags.mask & BR_PORT_MAB) {
bool mab = !!(flags.val & BR_PORT_MAB);
mv88e6xxx_port_set_mab(chip, port, mab);
}
if (flags.mask & BR_PORT_LOCKED) {
bool locked = !!(flags.val & BR_PORT_LOCKED);
err = mv88e6xxx_port_set_lock(chip, port, locked);
if (err)
goto out;
}
out:
mv88e6xxx_reg_unlock(chip);
return err;
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int members = 0;
if (!mv88e6xxx_has_lag(chip)) {
NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
}
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
if (members > 8) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot offload more than 8 LAG ports");
return false;
}
/* We could potentially relax this to include active
* backup in the future.
*/
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack,
"Can only offload LAG using hash TX type");
return false;
}
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
* interfaces.
*/
return true;
}
static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
/* DSA LAG IDs are one-based, hardware is zero-based */
id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
}
static const u8 mv88e6xxx_lag_mask_table[8][8] = {
/* Row number corresponds to the number of active members in a
* LAG. Each column states which of the eight hash buckets are
* mapped to the column:th port in the LAG.
*
* Example: In a LAG with three active ports, the second port
* ([2][1]) would be selected for traffic mapped to buckets
* 3,4,5 (0x38).
*/
{ 0xff, 0, 0, 0, 0, 0, 0, 0 },
{ 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
{ 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
{ 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
{ 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
{ 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
{ 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
{ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
};
static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
int num_tx, int nth)
{
u8 active = 0;
int i;
num_tx = num_tx <= 8 ? num_tx : 8;
if (nth < num_tx)
active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
for (i = 0; i < 8; i++) {
if (BIT(i) & active)
mask[i] |= BIT(port);
}
}
static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
struct dsa_port *dp;
struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
/* Assume no port is a member of any LAG. */
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
dsa_switch_for_each_port(dp, ds) {
if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
}
for (i = 0; i < 8; i++)
mask[i] = ivec;
/* Enable the correct subset of masks for all LAG ports that
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
num_tx = 0;
dsa_lag_foreach_port(dp, ds->dst, lag) {
if (dp->lag_tx_enabled)
num_tx++;
}
if (!num_tx)
continue;
nth = 0;
dsa_lag_foreach_port(dp, ds->dst, lag) {
if (!dp->lag_tx_enabled)
continue;
if (dp->ds == ds)
mv88e6xxx_lag_set_port_mask(mask, dp->index,
num_tx, nth);
nth++;
}
}
for (i = 0; i < 8; i++) {
err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
if (err)
return err;
}
return 0;
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
struct dsa_lag lag)
{
int err;
err = mv88e6xxx_lag_sync_masks(ds);
if (!err)
err = mv88e6xxx_lag_sync_map(ds, lag);
return err;
}
static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_lag_sync_masks(ds);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
/* DSA LAG IDs are one-based */
id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_trunk(chip, port, true, id);
if (err)
goto err_unlock;
err = mv88e6xxx_lag_sync_masks_map(ds, lag);
if (err)
goto err_clear_trunk;
mv88e6xxx_reg_unlock(chip);
return 0;
err_clear_trunk:
mv88e6xxx_port_set_trunk(chip, port, false, 0);
err_unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
mv88e6xxx_reg_lock(chip);
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
mv88e6xxx_reg_unlock(chip);
return err_sync ? : err_trunk;
}
static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_lag_sync_masks(ds);
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_lag_sync_masks_map(ds, lag);
if (err)
goto unlock;
err = mv88e6xxx_pvt_map(chip, sw_index, port);
unlock:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
mv88e6xxx_reg_lock(chip);
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
mv88e6xxx_reg_unlock(chip);
return err_sync ? : err_pvt;
}
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
.phylink_get_caps = mv88e6xxx_get_caps,
.phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs,
.phylink_mac_prepare = mv88e6xxx_mac_prepare,
.phylink_mac_config = mv88e6xxx_mac_config,
.phylink_mac_finish = mv88e6xxx_mac_finish,
.phylink_mac_link_down = mv88e6xxx_mac_link_down,
.phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
.get_mac_eee = mv88e6xxx_get_mac_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
.set_eeprom = mv88e6xxx_set_eeprom,
.get_regs_len = mv88e6xxx_get_regs_len,
.get_regs = mv88e6xxx_get_regs,
.get_rxnfc = mv88e6xxx_get_rxnfc,
.set_rxnfc = mv88e6xxx_set_rxnfc,
.set_ageing_time = mv88e6xxx_set_ageing_time,
.port_bridge_join = mv88e6xxx_port_bridge_join,
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
.port_mst_state_set = mv88e6xxx_port_mst_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.vlan_msti_set = mv88e6xxx_vlan_msti_set,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
.port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
.crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
.port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set,
.port_hwtstamp_get = mv88e6xxx_port_hwtstamp_get,
.port_txtstamp = mv88e6xxx_port_txtstamp,
.port_rxtstamp = mv88e6xxx_port_rxtstamp,
.get_ts_info = mv88e6xxx_get_ts_info,
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
.devlink_info_get = mv88e6xxx_devlink_info_get,
.port_lag_change = mv88e6xxx_port_lag_change,
.port_lag_join = mv88e6xxx_port_lag_join,
.port_lag_leave = mv88e6xxx_port_lag_leave,
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
{
struct device *dev = chip->dev;
struct dsa_switch *ds;
ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = dev;
ds->num_ports = mv88e6xxx_num_ports(chip);
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
/* Some chips support up to 32, but that requires enabling the
* 5-bit port mode, which we do not support. 640k^W16 ought to
* be enough for anyone.
*/
ds->num_lag_ids = mv88e6xxx_has_lag(chip) ? 16 : 0;
dev_set_drvdata(dev, ds);
return dsa_register_switch(ds);
}
static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
{
dsa_unregister_switch(chip->ds);
}
static const void *pdata_device_get_match_data(struct device *dev)
{
const struct of_device_id *matches = dev->driver->of_match_table;
const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data;
for (; matches->name[0] || matches->type[0] || matches->compatible[0];
matches++) {
if (!strcmp(pdata->compatible, matches->compatible))
return matches->data;
}
return NULL;
}
/* There is no suspend to RAM support at DSA level yet, the switch configuration
* would be lost after a power cycle so prevent it to be suspended.
*/
static int __maybe_unused mv88e6xxx_suspend(struct device *dev)
{
return -EOPNOTSUPP;
}
static int __maybe_unused mv88e6xxx_resume(struct device *dev)
{
return 0;
}
static SIMPLE_DEV_PM_OPS(mv88e6xxx_pm_ops, mv88e6xxx_suspend, mv88e6xxx_resume);
static int mv88e6xxx_probe(struct mdio_device *mdiodev)
{
struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data;
const struct mv88e6xxx_info *compat_info = NULL;
struct device *dev = &mdiodev->dev;
struct device_node *np = dev->of_node;
struct mv88e6xxx_chip *chip;
int port;
int err;
if (!np && !pdata)
return -EINVAL;
if (np)
compat_info = of_device_get_match_data(dev);
if (pdata) {
compat_info = pdata_device_get_match_data(dev);
if (!pdata->netdev)
return -EINVAL;
for (port = 0; port < DSA_MAX_PORTS; port++) {
if (!(pdata->enabled_ports & (1 << port)))
continue;
if (strcmp(pdata->cd.port_names[port], "cpu"))
continue;
pdata->cd.netdev[port] = &pdata->netdev->dev;
break;
}
}
if (!compat_info)
return -EINVAL;
chip = mv88e6xxx_alloc_chip(dev);
if (!chip) {
err = -ENOMEM;
goto out;
}
chip->info = compat_info;
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(chip->reset)) {
err = PTR_ERR(chip->reset);
goto out;
}
if (chip->reset)
usleep_range(10000, 20000);
/* Detect if the device is configured in single chip addressing mode,
* otherwise continue with address specific smi init/detection.
*/
err = mv88e6xxx_single_chip_detect(chip, mdiodev);
if (err) {
err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
if (err)
goto out;
err = mv88e6xxx_detect(chip);
if (err)
goto out;
}
if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
chip->tag_protocol = DSA_TAG_PROTO_EDSA;
else
chip->tag_protocol = DSA_TAG_PROTO_DSA;
mv88e6xxx_phy_init(chip);
if (chip->info->ops->get_eeprom) {
if (np)
of_property_read_u32(np, "eeprom-length",
&chip->eeprom_len);
else
chip->eeprom_len = pdata->eeprom_len;
}
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_switch_reset(chip);
mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
if (np) {
chip->irq = of_irq_get(np, 0);
if (chip->irq == -EPROBE_DEFER) {
err = chip->irq;
goto out;
}
}
if (pdata)
chip->irq = pdata->irq;
/* Has to be performed before the MDIO bus is created, because
* the PHYs will link their interrupts to these interrupt
* controllers
*/
mv88e6xxx_reg_lock(chip);
if (chip->irq > 0)
err = mv88e6xxx_g1_irq_setup(chip);
else
err = mv88e6xxx_irq_poll_setup(chip);
mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
if (chip->info->g2_irqs > 0) {
err = mv88e6xxx_g2_irq_setup(chip);
if (err)
goto out_g1_irq;
}
err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
if (err)
goto out_g2_irq;
err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
if (err)
goto out_g1_atu_prob_irq;
err = mv88e6xxx_register_switch(chip);
if (err)
goto out_g1_vtu_prob_irq;
return 0;
out_g1_vtu_prob_irq:
mv88e6xxx_g1_vtu_prob_irq_free(chip);
out_g1_atu_prob_irq:
mv88e6xxx_g1_atu_prob_irq_free(chip);
out_g2_irq:
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
out_g1_irq:
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
out:
if (pdata)
dev_put(pdata->netdev);
return err;
}
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
struct mv88e6xxx_chip *chip;
if (!ds)
return;
chip = ds->priv;
if (chip->info->ptp_support) {
mv88e6xxx_hwtstamp_free(chip);
mv88e6xxx_ptp_free(chip);
}
mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
mv88e6xxx_g1_atu_prob_irq_free(chip);
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
if (!ds)
return;
dsa_switch_shutdown(ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
{
.compatible = "marvell,mv88e6085",
.data = &mv88e6xxx_table[MV88E6085],
},
{
.compatible = "marvell,mv88e6190",
.data = &mv88e6xxx_table[MV88E6190],
},
{
.compatible = "marvell,mv88e6250",
.data = &mv88e6xxx_table[MV88E6250],
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
static struct mdio_driver mv88e6xxx_driver = {
.probe = mv88e6xxx_probe,
.remove = mv88e6xxx_remove,
.shutdown = mv88e6xxx_shutdown,
.mdiodrv.driver = {
.name = "mv88e6085",
.of_match_table = mv88e6xxx_of_match,
.pm = &mv88e6xxx_pm_ops,
},
};
mdio_module_driver(mv88e6xxx_driver);
MODULE_AUTHOR("Lennert Buytenhek <[email protected]>");
MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/mv88e6xxx/chip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6352 family SERDES PCS support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 Andrew Lunn <[email protected]>
*/
#include <linux/phylink.h>
#include "global2.h"
#include "port.h"
#include "serdes.h"
/* Definitions from drivers/net/phy/marvell.c, which would be good to reuse. */
#define MII_M1011_PHY_STATUS 17
#define MII_M1011_IMASK 18
#define MII_M1011_IMASK_LINK_CHANGE BIT(10)
#define MII_M1011_IEVENT 19
#define MII_M1011_IEVENT_LINK_CHANGE BIT(10)
#define MII_MARVELL_PHY_PAGE 22
#define MII_MARVELL_FIBER_PAGE 1
struct marvell_c22_pcs {
struct mdio_device mdio;
struct phylink_pcs phylink_pcs;
unsigned int irq;
char name[64];
bool (*link_check)(struct marvell_c22_pcs *mpcs);
struct mv88e6xxx_port *port;
};
static struct marvell_c22_pcs *pcs_to_marvell_c22_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct marvell_c22_pcs, phylink_pcs);
}
static int marvell_c22_pcs_set_fiber_page(struct marvell_c22_pcs *mpcs)
{
u16 page;
int err;
mutex_lock(&mpcs->mdio.bus->mdio_lock);
err = __mdiodev_read(&mpcs->mdio, MII_MARVELL_PHY_PAGE);
if (err < 0) {
dev_err(mpcs->mdio.dev.parent,
"%s: can't read Serdes page register: %pe\n",
mpcs->name, ERR_PTR(err));
return err;
}
page = err;
err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE,
MII_MARVELL_FIBER_PAGE);
if (err) {
dev_err(mpcs->mdio.dev.parent,
"%s: can't set Serdes page register: %pe\n",
mpcs->name, ERR_PTR(err));
return err;
}
return page;
}
static int marvell_c22_pcs_restore_page(struct marvell_c22_pcs *mpcs,
int oldpage, int ret)
{
int err;
if (oldpage >= 0) {
err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE,
oldpage);
if (err)
dev_err(mpcs->mdio.dev.parent,
"%s: can't restore Serdes page register: %pe\n",
mpcs->name, ERR_PTR(err));
if (!err || ret < 0)
err = ret;
} else {
err = oldpage;
}
mutex_unlock(&mpcs->mdio.bus->mdio_lock);
return err;
}
static irqreturn_t marvell_c22_pcs_handle_irq(int irq, void *dev_id)
{
struct marvell_c22_pcs *mpcs = dev_id;
irqreturn_t status = IRQ_NONE;
int err, oldpage;
oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
if (oldpage < 0)
goto fail;
err = __mdiodev_read(&mpcs->mdio, MII_M1011_IEVENT);
if (err >= 0 && err & MII_M1011_IEVENT_LINK_CHANGE) {
phylink_pcs_change(&mpcs->phylink_pcs, true);
status = IRQ_HANDLED;
}
fail:
marvell_c22_pcs_restore_page(mpcs, oldpage, 0);
return status;
}
static int marvell_c22_pcs_modify(struct marvell_c22_pcs *mpcs, u8 reg,
u16 mask, u16 val)
{
int oldpage, err = 0;
oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
if (oldpage >= 0)
err = __mdiodev_modify(&mpcs->mdio, reg, mask, val);
return marvell_c22_pcs_restore_page(mpcs, oldpage, err);
}
static int marvell_c22_pcs_power(struct marvell_c22_pcs *mpcs,
bool on)
{
u16 val = on ? 0 : BMCR_PDOWN;
return marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_PDOWN, val);
}
static int marvell_c22_pcs_control_irq(struct marvell_c22_pcs *mpcs,
bool enable)
{
u16 val = enable ? MII_M1011_IMASK_LINK_CHANGE : 0;
return marvell_c22_pcs_modify(mpcs, MII_M1011_IMASK,
MII_M1011_IMASK_LINK_CHANGE, val);
}
static int marvell_c22_pcs_enable(struct phylink_pcs *pcs)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
int err;
err = marvell_c22_pcs_power(mpcs, true);
if (err)
return err;
return marvell_c22_pcs_control_irq(mpcs, !!mpcs->irq);
}
static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
marvell_c22_pcs_control_irq(mpcs, false);
marvell_c22_pcs_power(mpcs, false);
}
static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
int oldpage, bmsr, lpa, status;
state->link = false;
if (mpcs->link_check && !mpcs->link_check(mpcs))
return;
oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
if (oldpage >= 0) {
bmsr = __mdiodev_read(&mpcs->mdio, MII_BMSR);
lpa = __mdiodev_read(&mpcs->mdio, MII_LPA);
status = __mdiodev_read(&mpcs->mdio, MII_M1011_PHY_STATUS);
}
if (marvell_c22_pcs_restore_page(mpcs, oldpage, 0) >= 0 &&
bmsr >= 0 && lpa >= 0 && status >= 0)
mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa,
status, state);
}
static int marvell_c22_pcs_config(struct phylink_pcs *pcs,
unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
int oldpage, adv, err, ret = 0;
u16 bmcr;
adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
if (adv < 0)
return 0;
bmcr = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ? BMCR_ANENABLE : 0;
oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
if (oldpage < 0)
goto restore;
err = __mdiodev_modify_changed(&mpcs->mdio, MII_ADVERTISE, 0xffff, adv);
ret = err;
if (err < 0)
goto restore;
err = __mdiodev_modify_changed(&mpcs->mdio, MII_BMCR, BMCR_ANENABLE,
bmcr);
if (err < 0) {
ret = err;
goto restore;
}
/* If the ANENABLE bit was changed, the PHY will restart negotiation,
* so we don't need to flag a change to trigger its own restart.
*/
if (err)
ret = 0;
restore:
return marvell_c22_pcs_restore_page(mpcs, oldpage, ret);
}
static void marvell_c22_pcs_an_restart(struct phylink_pcs *pcs)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_ANRESTART, BMCR_ANRESTART);
}
static void marvell_c22_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed,
int duplex)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
u16 bmcr;
int err;
if (phylink_autoneg_inband(mode))
return;
bmcr = mii_bmcr_encode_fixed(speed, duplex);
err = marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_SPEED100 |
BMCR_FULLDPLX | BMCR_SPEED1000, bmcr);
if (err)
dev_err(mpcs->mdio.dev.parent,
"%s: failed to configure mpcs: %pe\n", mpcs->name,
ERR_PTR(err));
}
static const struct phylink_pcs_ops marvell_c22_pcs_ops = {
.pcs_enable = marvell_c22_pcs_enable,
.pcs_disable = marvell_c22_pcs_disable,
.pcs_get_state = marvell_c22_pcs_get_state,
.pcs_config = marvell_c22_pcs_config,
.pcs_an_restart = marvell_c22_pcs_an_restart,
.pcs_link_up = marvell_c22_pcs_link_up,
};
static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
struct mii_bus *bus,
unsigned int addr)
{
struct marvell_c22_pcs *mpcs;
mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
if (!mpcs)
return NULL;
mpcs->mdio.dev.parent = dev;
mpcs->mdio.bus = bus;
mpcs->mdio.addr = addr;
mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
mpcs->phylink_pcs.neg_mode = true;
return mpcs;
}
static int marvell_c22_pcs_setup_irq(struct marvell_c22_pcs *mpcs,
unsigned int irq)
{
int err;
mpcs->phylink_pcs.poll = !irq;
mpcs->irq = irq;
if (irq) {
err = request_threaded_irq(irq, NULL,
marvell_c22_pcs_handle_irq,
IRQF_ONESHOT, mpcs->name, mpcs);
if (err)
return err;
}
return 0;
}
/* mv88e6352 specifics */
static bool mv88e6352_pcs_link_check(struct marvell_c22_pcs *mpcs)
{
struct mv88e6xxx_port *port = mpcs->port;
struct mv88e6xxx_chip *chip = port->chip;
u8 cmode;
/* Port 4 can be in auto-media mode. Check that the port is
* associated with the mpcs.
*/
mv88e6xxx_reg_lock(chip);
chip->info->ops->port_get_cmode(chip, port->port, &cmode);
mv88e6xxx_reg_unlock(chip);
return cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII;
}
static int mv88e6352_pcs_init(struct mv88e6xxx_chip *chip, int port)
{
struct marvell_c22_pcs *mpcs;
struct mii_bus *bus;
struct device *dev;
unsigned int irq;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
mv88e6xxx_reg_unlock(chip);
if (err <= 0)
return err;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
bus = mv88e6xxx_default_mdio_bus(chip);
dev = chip->dev;
mpcs = marvell_c22_pcs_alloc(dev, bus, MV88E6352_ADDR_SERDES);
if (!mpcs)
return -ENOMEM;
snprintf(mpcs->name, sizeof(mpcs->name),
"mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
mpcs->link_check = mv88e6352_pcs_link_check;
mpcs->port = &chip->ports[port];
err = marvell_c22_pcs_setup_irq(mpcs, irq);
if (err) {
kfree(mpcs);
return err;
}
chip->ports[port].pcs_private = &mpcs->phylink_pcs;
return 0;
}
static void mv88e6352_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
{
struct marvell_c22_pcs *mpcs;
struct phylink_pcs *pcs;
pcs = chip->ports[port].pcs_private;
if (!pcs)
return;
mpcs = pcs_to_marvell_c22_pcs(pcs);
if (mpcs->irq)
free_irq(mpcs->irq, mpcs);
kfree(mpcs);
chip->ports[port].pcs_private = NULL;
}
static struct phylink_pcs *mv88e6352_pcs_select(struct mv88e6xxx_chip *chip,
int port,
phy_interface_t interface)
{
return chip->ports[port].pcs_private;
}
const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops = {
.pcs_init = mv88e6352_pcs_init,
.pcs_teardown = mv88e6352_pcs_teardown,
.pcs_select = mv88e6352_pcs_select,
};
| linux-master | drivers/net/dsa/mv88e6xxx/pcs-6352.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch Global 2 Scratch & Misc Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 National Instruments
* Brandon Streiff <[email protected]>
*/
#include "chip.h"
#include "global2.h"
/* Offset 0x1A: Scratch and Misc. Register */
static int mv88e6xxx_g2_scratch_read(struct mv88e6xxx_chip *chip, int reg,
u8 *data)
{
u16 value;
int err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
reg << 8);
if (err)
return err;
err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, &value);
if (err)
return err;
*data = (value & MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK);
return 0;
}
static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
u8 data)
{
u16 value = (reg << 8) | data;
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
MV88E6XXX_G2_SCRATCH_MISC_UPDATE | value);
}
/**
* mv88e6xxx_g2_scratch_get_bit - get a bit
* @chip: chip private data
* @base_reg: base of scratch bits
* @offset: index of bit within the register
* @set: is bit set?
*/
static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip,
int base_reg, unsigned int offset,
int *set)
{
int reg = base_reg + (offset / 8);
u8 mask = (1 << (offset & 0x7));
u8 val;
int err;
err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
if (err)
return err;
*set = !!(mask & val);
return 0;
}
/**
* mv88e6xxx_g2_scratch_set_bit - set (or clear) a bit
* @chip: chip private data
* @base_reg: base of scratch bits
* @offset: index of bit within the register
* @set: should this bit be set?
*
* Helper function for dealing with the direction and data registers.
*/
static int mv88e6xxx_g2_scratch_set_bit(struct mv88e6xxx_chip *chip,
int base_reg, unsigned int offset,
int set)
{
int reg = base_reg + (offset / 8);
u8 mask = (1 << (offset & 0x7));
u8 val;
int err;
err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
if (err)
return err;
if (set)
val |= mask;
else
val &= ~mask;
return mv88e6xxx_g2_scratch_write(chip, reg, val);
}
/**
* mv88e6352_g2_scratch_gpio_get_data - get data on gpio pin
* @chip: chip private data
* @pin: gpio index
*
* Return: 0 for low, 1 for high, negative error
*/
static int mv88e6352_g2_scratch_gpio_get_data(struct mv88e6xxx_chip *chip,
unsigned int pin)
{
int val = 0;
int err;
err = mv88e6xxx_g2_scratch_get_bit(chip,
MV88E6352_G2_SCRATCH_GPIO_DATA0,
pin, &val);
if (err)
return err;
return val;
}
/**
* mv88e6352_g2_scratch_gpio_set_data - set data on gpio pin
* @chip: chip private data
* @pin: gpio index
* @value: value to set
*/
static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
unsigned int pin, int value)
{
u8 mask = (1 << (pin & 0x7));
int offset = (pin / 8);
int reg;
reg = MV88E6352_G2_SCRATCH_GPIO_DATA0 + offset;
if (value)
chip->gpio_data[offset] |= mask;
else
chip->gpio_data[offset] &= ~mask;
return mv88e6xxx_g2_scratch_write(chip, reg, chip->gpio_data[offset]);
}
/**
* mv88e6352_g2_scratch_gpio_get_dir - get direction of gpio pin
* @chip: chip private data
* @pin: gpio index
*
* Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
*/
static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
unsigned int pin)
{
int val = 0;
int err;
err = mv88e6xxx_g2_scratch_get_bit(chip,
MV88E6352_G2_SCRATCH_GPIO_DIR0,
pin, &val);
if (err)
return err;
return val;
}
/**
* mv88e6352_g2_scratch_gpio_set_dir - set direction of gpio pin
* @chip: chip private data
* @pin: gpio index
* @input: should the gpio be an input, or an output?
*/
static int mv88e6352_g2_scratch_gpio_set_dir(struct mv88e6xxx_chip *chip,
unsigned int pin, bool input)
{
int value = (input ? MV88E6352_G2_SCRATCH_GPIO_DIR_IN :
MV88E6352_G2_SCRATCH_GPIO_DIR_OUT);
return mv88e6xxx_g2_scratch_set_bit(chip,
MV88E6352_G2_SCRATCH_GPIO_DIR0,
pin, value);
}
/**
* mv88e6352_g2_scratch_gpio_get_pctl - get pin control setting
* @chip: chip private data
* @pin: gpio index
* @func: function number
*
* Note that the function numbers themselves may vary by chipset.
*/
static int mv88e6352_g2_scratch_gpio_get_pctl(struct mv88e6xxx_chip *chip,
unsigned int pin, int *func)
{
int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2);
int offset = (pin & 0x1) ? 4 : 0;
u8 mask = (0x7 << offset);
int err;
u8 val;
err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
if (err)
return err;
*func = (val & mask) >> offset;
return 0;
}
/**
* mv88e6352_g2_scratch_gpio_set_pctl - set pin control setting
* @chip: chip private data
* @pin: gpio index
* @func: function number
*/
static int mv88e6352_g2_scratch_gpio_set_pctl(struct mv88e6xxx_chip *chip,
unsigned int pin, int func)
{
int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2);
int offset = (pin & 0x1) ? 4 : 0;
u8 mask = (0x7 << offset);
int err;
u8 val;
err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
if (err)
return err;
val = (val & ~mask) | ((func & mask) << offset);
return mv88e6xxx_g2_scratch_write(chip, reg, val);
}
const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
.get_data = mv88e6352_g2_scratch_gpio_get_data,
.set_data = mv88e6352_g2_scratch_gpio_set_data,
.get_dir = mv88e6352_g2_scratch_gpio_get_dir,
.set_dir = mv88e6352_g2_scratch_gpio_set_dir,
.get_pctl = mv88e6352_g2_scratch_gpio_get_pctl,
.set_pctl = mv88e6352_g2_scratch_gpio_set_pctl,
};
/**
* mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
* Some mv88e6xxx models have GPIO pins that may be configured as
* an external SMI interface, or they may be made free for other
* GPIO uses.
*/
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external)
{
int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
int config_data1 = MV88E6352_G2_SCRATCH_CONFIG_DATA1;
int config_data2 = MV88E6352_G2_SCRATCH_CONFIG_DATA2;
bool no_cpu;
u8 p0_mode;
int err;
u8 val;
err = mv88e6xxx_g2_scratch_read(chip, config_data2, &val);
if (err)
return err;
p0_mode = val & MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK;
if (p0_mode == 0x01 || p0_mode == 0x02)
return -EBUSY;
err = mv88e6xxx_g2_scratch_read(chip, config_data1, &val);
if (err)
return err;
no_cpu = !!(val & MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU);
err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
if (err)
return err;
/* NO_CPU being 0 inverts the meaning of the bit */
if (!no_cpu)
external = !external;
if (external)
val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
else
val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
}
/**
* mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
* @chip: chip private data
* @port: port number to check for serdes
*
* Indicates whether the port may have a serdes attached according to the
* pin strapping. Returns negative error number, 0 if the port is not
* configured to have a serdes, and 1 if the port is configured to have a
* serdes attached.
*/
int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
{
u8 config3, p;
int err;
err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
&config3);
if (err)
return err;
if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
p = 5;
else
p = 4;
return port == p;
}
| linux-master | drivers/net/dsa/mv88e6xxx/global2_scratch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6352 family SERDES PCS support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 Andrew Lunn <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mii.h>
#include "chip.h"
#include "global2.h"
#include "phy.h"
#include "port.h"
#include "serdes.h"
struct mv88e639x_pcs {
struct mdio_device mdio;
struct phylink_pcs sgmii_pcs;
struct phylink_pcs xg_pcs;
bool erratum_3_14;
bool supports_5g;
phy_interface_t interface;
unsigned int irq;
char name[64];
irqreturn_t (*handle_irq)(struct mv88e639x_pcs *mpcs);
};
static int mv88e639x_read(struct mv88e639x_pcs *mpcs, u16 regnum, u16 *val)
{
int err;
err = mdiodev_c45_read(&mpcs->mdio, MDIO_MMD_PHYXS, regnum);
if (err < 0)
return err;
*val = err;
return 0;
}
static int mv88e639x_write(struct mv88e639x_pcs *mpcs, u16 regnum, u16 val)
{
return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, val);
}
static int mv88e639x_modify(struct mv88e639x_pcs *mpcs, u16 regnum, u16 mask,
u16 val)
{
return mdiodev_c45_modify(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, mask,
val);
}
static int mv88e639x_modify_changed(struct mv88e639x_pcs *mpcs, u16 regnum,
u16 mask, u16 set)
{
return mdiodev_c45_modify_changed(&mpcs->mdio, MDIO_MMD_PHYXS, regnum,
mask, set);
}
static struct mv88e639x_pcs *
mv88e639x_pcs_alloc(struct device *dev, struct mii_bus *bus, unsigned int addr,
int port)
{
struct mv88e639x_pcs *mpcs;
mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
if (!mpcs)
return NULL;
mpcs->mdio.dev.parent = dev;
mpcs->mdio.bus = bus;
mpcs->mdio.addr = addr;
snprintf(mpcs->name, sizeof(mpcs->name),
"mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
return mpcs;
}
static irqreturn_t mv88e639x_pcs_handle_irq(int irq, void *dev_id)
{
struct mv88e639x_pcs *mpcs = dev_id;
irqreturn_t (*handler)(struct mv88e639x_pcs *);
handler = READ_ONCE(mpcs->handle_irq);
if (!handler)
return IRQ_NONE;
return handler(mpcs);
}
static int mv88e639x_pcs_setup_irq(struct mv88e639x_pcs *mpcs,
struct mv88e6xxx_chip *chip, int port)
{
unsigned int irq;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (!irq) {
/* Use polling mode */
mpcs->sgmii_pcs.poll = true;
mpcs->xg_pcs.poll = true;
return 0;
}
mpcs->irq = irq;
return request_threaded_irq(irq, NULL, mv88e639x_pcs_handle_irq,
IRQF_ONESHOT, mpcs->name, mpcs);
}
static void mv88e639x_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
{
struct mv88e639x_pcs *mpcs = chip->ports[port].pcs_private;
if (!mpcs)
return;
if (mpcs->irq)
free_irq(mpcs->irq, mpcs);
kfree(mpcs);
chip->ports[port].pcs_private = NULL;
}
static struct mv88e639x_pcs *sgmii_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mv88e639x_pcs, sgmii_pcs);
}
static irqreturn_t mv88e639x_sgmii_handle_irq(struct mv88e639x_pcs *mpcs)
{
u16 int_status;
int err;
err = mv88e639x_read(mpcs, MV88E6390_SGMII_INT_STATUS, &int_status);
if (err)
return IRQ_NONE;
if (int_status & (MV88E6390_SGMII_INT_LINK_DOWN |
MV88E6390_SGMII_INT_LINK_UP)) {
phylink_pcs_change(&mpcs->sgmii_pcs,
int_status & MV88E6390_SGMII_INT_LINK_UP);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mv88e639x_sgmii_pcs_control_irq(struct mv88e639x_pcs *mpcs,
bool enable)
{
u16 val = 0;
if (enable)
val |= MV88E6390_SGMII_INT_LINK_DOWN |
MV88E6390_SGMII_INT_LINK_UP;
return mv88e639x_modify(mpcs, MV88E6390_SGMII_INT_ENABLE,
MV88E6390_SGMII_INT_LINK_DOWN |
MV88E6390_SGMII_INT_LINK_UP, val);
}
static int mv88e639x_sgmii_pcs_control_pwr(struct mv88e639x_pcs *mpcs,
bool enable)
{
u16 mask, val;
if (enable) {
mask = BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN;
val = 0;
} else {
mask = val = BMCR_PDOWN;
}
return mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, mask, val);
}
static int mv88e639x_sgmii_pcs_enable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
/* power enable done in post_config */
mpcs->handle_irq = mv88e639x_sgmii_handle_irq;
return mv88e639x_sgmii_pcs_control_irq(mpcs, !!mpcs->irq);
}
static void mv88e639x_sgmii_pcs_disable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
mv88e639x_sgmii_pcs_control_irq(mpcs, false);
mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
}
static void mv88e639x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
}
static int mv88e6390_erratum_3_14(struct mv88e639x_pcs *mpcs)
{
const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1,
MV88E6390_PORT9_LANE2, MV88E6390_PORT9_LANE3,
MV88E6390_PORT10_LANE0, MV88E6390_PORT10_LANE1,
MV88E6390_PORT10_LANE2, MV88E6390_PORT10_LANE3 };
int err, i;
/* 88e6190x and 88e6390x errata 3.14:
* After chip reset, SERDES reconfiguration or SERDES core
* Software Reset, the SERDES lanes may not be properly aligned
* resulting in CRC errors
*/
for (i = 0; i < ARRAY_SIZE(lanes); i++) {
err = mdiobus_c45_write(mpcs->mdio.bus, lanes[i],
MDIO_MMD_PHYXS,
0xf054, 0x400C);
if (err)
return err;
err = mdiobus_c45_write(mpcs->mdio.bus, lanes[i],
MDIO_MMD_PHYXS,
0xf054, 0x4000);
if (err)
return err;
}
return 0;
}
static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
int err;
mv88e639x_sgmii_pcs_control_pwr(mpcs, true);
if (mpcs->erratum_3_14) {
err = mv88e6390_erratum_3_14(mpcs);
if (err)
dev_err(mpcs->mdio.dev.parent,
"failed to apply erratum 3.14: %pe\n",
ERR_PTR(err));
}
return 0;
}
static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
u16 bmsr, lpa, status;
int err;
err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
dev_err(mpcs->mdio.dev.parent,
"can't read Serdes PHY %s: %pe\n",
"BMSR", ERR_PTR(err));
state->link = false;
return;
}
err = mv88e639x_read(mpcs, MV88E6390_SGMII_LPA, &lpa);
if (err) {
dev_err(mpcs->mdio.dev.parent,
"can't read Serdes PHY %s: %pe\n",
"LPA", ERR_PTR(err));
state->link = false;
return;
}
err = mv88e639x_read(mpcs, MV88E6390_SGMII_PHY_STATUS, &status);
if (err) {
dev_err(mpcs->mdio.dev.parent,
"can't read Serdes PHY %s: %pe\n",
"status", ERR_PTR(err));
state->link = false;
return;
}
mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa, status,
state);
}
static int mv88e639x_sgmii_pcs_config(struct phylink_pcs *pcs,
unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
u16 val, bmcr;
bool changed;
int adv, err;
adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
if (adv < 0)
return 0;
mpcs->interface = interface;
err = mv88e639x_modify_changed(mpcs, MV88E6390_SGMII_ADVERTISE,
0xffff, adv);
if (err < 0)
return err;
changed = err > 0;
err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMCR, &val);
if (err)
return err;
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
bmcr = val | BMCR_ANENABLE;
else
bmcr = val & ~BMCR_ANENABLE;
/* setting ANENABLE triggers a restart of negotiation */
if (bmcr == val)
return changed;
return mv88e639x_write(mpcs, MV88E6390_SGMII_BMCR, bmcr);
}
static void mv88e639x_sgmii_pcs_an_restart(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
BMCR_ANRESTART, BMCR_ANRESTART);
}
static void mv88e639x_sgmii_pcs_link_up(struct phylink_pcs *pcs,
unsigned int mode,
phy_interface_t interface,
int speed, int duplex)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
u16 bmcr;
int err;
if (phylink_autoneg_inband(mode))
return;
bmcr = mii_bmcr_encode_fixed(speed, duplex);
err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX,
bmcr);
if (err)
dev_err(mpcs->mdio.dev.parent,
"can't access Serdes PHY %s: %pe\n",
"BMCR", ERR_PTR(err));
}
static const struct phylink_pcs_ops mv88e639x_sgmii_pcs_ops = {
.pcs_enable = mv88e639x_sgmii_pcs_enable,
.pcs_disable = mv88e639x_sgmii_pcs_disable,
.pcs_pre_config = mv88e639x_sgmii_pcs_pre_config,
.pcs_post_config = mv88e639x_sgmii_pcs_post_config,
.pcs_get_state = mv88e639x_sgmii_pcs_get_state,
.pcs_an_restart = mv88e639x_sgmii_pcs_an_restart,
.pcs_config = mv88e639x_sgmii_pcs_config,
.pcs_link_up = mv88e639x_sgmii_pcs_link_up,
};
static struct mv88e639x_pcs *xg_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mv88e639x_pcs, xg_pcs);
}
static int mv88e639x_xg_pcs_enable(struct mv88e639x_pcs *mpcs)
{
return mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1,
MDIO_CTRL1_RESET | MDIO_PCS_CTRL1_LOOPBACK |
MDIO_CTRL1_LPOWER, 0);
}
static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
{
mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1, MDIO_CTRL1_LPOWER,
MDIO_CTRL1_LPOWER);
}
static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
u16 status;
int err;
state->link = false;
err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &status);
if (err) {
dev_err(mpcs->mdio.dev.parent,
"can't read Serdes PHY %s: %pe\n",
"STAT1", ERR_PTR(err));
return;
}
state->link = !!(status & MDIO_STAT1_LSTATUS);
if (state->link) {
switch (state->interface) {
case PHY_INTERFACE_MODE_5GBASER:
state->speed = SPEED_5000;
break;
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_XAUI:
state->speed = SPEED_10000;
break;
default:
state->link = false;
return;
}
state->duplex = DUPLEX_FULL;
}
}
static int mv88e639x_xg_pcs_config(struct phylink_pcs *pcs,
unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
return 0;
}
static struct phylink_pcs *
mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
struct mv88e639x_pcs *mpcs;
mpcs = chip->ports[port].pcs_private;
if (!mpcs)
return NULL;
switch (mode) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
return &mpcs->sgmii_pcs;
case PHY_INTERFACE_MODE_5GBASER:
if (!mpcs->supports_5g)
return NULL;
fallthrough;
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_RXAUI:
return &mpcs->xg_pcs;
default:
return NULL;
}
}
/* Marvell 88E6390 Specific support */
static irqreturn_t mv88e6390_xg_handle_irq(struct mv88e639x_pcs *mpcs)
{
u16 int_status;
int err;
err = mv88e639x_read(mpcs, MV88E6390_10G_INT_STATUS, &int_status);
if (err)
return IRQ_NONE;
if (int_status & (MV88E6390_10G_INT_LINK_DOWN |
MV88E6390_10G_INT_LINK_UP)) {
phylink_pcs_change(&mpcs->xg_pcs,
int_status & MV88E6390_10G_INT_LINK_UP);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mv88e6390_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable)
{
u16 val = 0;
if (enable)
val = MV88E6390_10G_INT_LINK_DOWN | MV88E6390_10G_INT_LINK_UP;
return mv88e639x_modify(mpcs, MV88E6390_10G_INT_ENABLE,
MV88E6390_10G_INT_LINK_DOWN |
MV88E6390_10G_INT_LINK_UP, val);
}
static int mv88e6390_xg_pcs_enable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
int err;
err = mv88e639x_xg_pcs_enable(mpcs);
if (err)
return err;
mpcs->handle_irq = mv88e6390_xg_handle_irq;
return mv88e6390_xg_control_irq(mpcs, !!mpcs->irq);
}
static void mv88e6390_xg_pcs_disable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
mv88e6390_xg_control_irq(mpcs, false);
mv88e639x_xg_pcs_disable(mpcs);
}
static const struct phylink_pcs_ops mv88e6390_xg_pcs_ops = {
.pcs_enable = mv88e6390_xg_pcs_enable,
.pcs_disable = mv88e6390_xg_pcs_disable,
.pcs_get_state = mv88e639x_xg_pcs_get_state,
.pcs_config = mv88e639x_xg_pcs_config,
};
static int mv88e6390_pcs_enable_checker(struct mv88e639x_pcs *mpcs)
{
return mv88e639x_modify(mpcs, MV88E6390_PG_CONTROL,
MV88E6390_PG_CONTROL_ENABLE_PC,
MV88E6390_PG_CONTROL_ENABLE_PC);
}
static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
{
struct mv88e639x_pcs *mpcs;
struct mii_bus *bus;
struct device *dev;
int lane, err;
lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;
bus = mv88e6xxx_default_mdio_bus(chip);
dev = chip->dev;
mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port);
if (!mpcs)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
mpcs->xg_pcs.neg_mode = true;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X ||
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X)
mpcs->erratum_3_14 = true;
err = mv88e639x_pcs_setup_irq(mpcs, chip, port);
if (err)
goto err_free;
/* 6390 and 6390x has the checker, 6393x doesn't appear to? */
/* This is to enable gathering the statistics. Maybe this
* should call out to a helper? Or we could do this at init time.
*/
err = mv88e6390_pcs_enable_checker(mpcs);
if (err)
goto err_free;
chip->ports[port].pcs_private = mpcs;
return 0;
err_free:
kfree(mpcs);
return err;
}
const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops = {
.pcs_init = mv88e6390_pcs_init,
.pcs_teardown = mv88e639x_pcs_teardown,
.pcs_select = mv88e639x_pcs_select,
};
/* Marvell 88E6393X Specific support */
static int mv88e6393x_power_lane(struct mv88e639x_pcs *mpcs, bool enable)
{
u16 val = MV88E6393X_SERDES_CTRL1_TX_PDOWN |
MV88E6393X_SERDES_CTRL1_RX_PDOWN;
return mv88e639x_modify(mpcs, MV88E6393X_SERDES_CTRL1, val,
enable ? 0 : val);
}
/* mv88e6393x family errata 4.6:
* Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD mode or
* P0_mode is configured for [x]MII.
* Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
*
* It seems that after this workaround the SERDES is automatically powered up
* (the bit is cleared), so power it down.
*/
static int mv88e6393x_erratum_4_6(struct mv88e639x_pcs *mpcs)
{
int err;
err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC,
MV88E6393X_SERDES_POC_PDOWN |
MV88E6393X_SERDES_POC_RESET,
MV88E6393X_SERDES_POC_RESET);
if (err)
return err;
err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
BMCR_PDOWN, BMCR_PDOWN);
if (err)
return err;
err = mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
if (err)
return err;
return mv88e6393x_power_lane(mpcs, false);
}
/* mv88e6393x family errata 4.8:
* When a SERDES port is operating in 1000BASE-X or SGMII mode link may not
* come up after hardware reset or software reset of SERDES core. Workaround
* is to write SERDES register 4.F074.14=1 for only those modes and 0 in all
* other modes.
*/
static int mv88e6393x_erratum_4_8(struct mv88e639x_pcs *mpcs)
{
u16 reg, poc;
int err;
err = mv88e639x_read(mpcs, MV88E6393X_SERDES_POC, &poc);
if (err)
return err;
poc &= MV88E6393X_SERDES_POC_PCS_MASK;
if (poc == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
poc == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
poc == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
reg = MV88E6393X_ERRATA_4_8_BIT;
else
reg = 0;
return mv88e639x_modify(mpcs, MV88E6393X_ERRATA_4_8_REG,
MV88E6393X_ERRATA_4_8_BIT, reg);
}
/* mv88e6393x family errata 5.2:
* For optimal signal integrity the following sequence should be applied to
* SERDES operating in 10G mode. These registers only apply to 10G operation
* and have no effect on other speeds.
*/
static int mv88e6393x_erratum_5_2(struct mv88e639x_pcs *mpcs)
{
static const struct {
u16 dev, reg, val, mask;
} fixes[] = {
{ MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
{ MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
{ MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
{ MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
{ MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
{ MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
{ MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
};
int err, i;
for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
err = mdiodev_c45_modify(&mpcs->mdio, fixes[i].dev,
fixes[i].reg, fixes[i].mask,
fixes[i].val);
if (err)
return err;
}
return 0;
}
/* Inband AN is broken on Amethyst in 2500base-x mode when set by standard
* mechanism (via cmode).
* We can get around this by configuring the PCS mode to 1000base-x and then
* writing value 0x58 to register 1e.8000. (This must be done while SerDes
* receiver and transmitter are disabled, which is, when this function is
* called.)
* It seem that when we do this configuration to 2500base-x mode (by changing
* PCS mode to 1000base-x and frequency to 3.125 GHz from 1.25 GHz) and then
* configure to sgmii or 1000base-x, the device thinks that it already has
* SerDes at 1.25 GHz and does not change the 1e.8000 register, leaving SerDes
* at 3.125 GHz.
* To avoid this, change PCS mode back to 2500base-x when disabling SerDes from
* 2500base-x mode.
*/
static int mv88e6393x_fix_2500basex_an(struct mv88e639x_pcs *mpcs, bool on)
{
u16 reg;
int err;
if (on)
reg = MV88E6393X_SERDES_POC_PCS_1000BASEX |
MV88E6393X_SERDES_POC_AN;
else
reg = MV88E6393X_SERDES_POC_PCS_2500BASEX;
reg |= MV88E6393X_SERDES_POC_RESET;
err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC,
MV88E6393X_SERDES_POC_PCS_MASK |
MV88E6393X_SERDES_POC_AN |
MV88E6393X_SERDES_POC_RESET, reg);
if (err)
return err;
return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_VEND1, 0x8000, 0x58);
}
static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
phy_interface_t interface,
bool enable)
{
int err;
if (interface != PHY_INTERFACE_MODE_2500BASEX)
return 0;
err = mv88e6393x_fix_2500basex_an(mpcs, enable);
if (err)
dev_err(mpcs->mdio.dev.parent,
"failed to %s 2500basex fix: %pe\n",
enable ? "enable" : "disable", ERR_PTR(err));
return err;
}
static void mv88e6393x_sgmii_pcs_disable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
mv88e639x_sgmii_pcs_disable(pcs);
mv88e6393x_power_lane(mpcs, false);
mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false);
}
static void mv88e6393x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
mv88e639x_sgmii_pcs_pre_config(pcs, interface);
mv88e6393x_power_lane(mpcs, false);
mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false);
}
static int mv88e6393x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
int err;
err = mv88e6393x_erratum_4_8(mpcs);
if (err)
return err;
err = mv88e6393x_sgmii_apply_2500basex_an(mpcs, interface, true);
if (err)
return err;
err = mv88e6393x_power_lane(mpcs, true);
if (err)
return err;
return mv88e639x_sgmii_pcs_post_config(pcs, interface);
}
static const struct phylink_pcs_ops mv88e6393x_sgmii_pcs_ops = {
.pcs_enable = mv88e639x_sgmii_pcs_enable,
.pcs_disable = mv88e6393x_sgmii_pcs_disable,
.pcs_pre_config = mv88e6393x_sgmii_pcs_pre_config,
.pcs_post_config = mv88e6393x_sgmii_pcs_post_config,
.pcs_get_state = mv88e639x_sgmii_pcs_get_state,
.pcs_an_restart = mv88e639x_sgmii_pcs_an_restart,
.pcs_config = mv88e639x_sgmii_pcs_config,
.pcs_link_up = mv88e639x_sgmii_pcs_link_up,
};
static irqreturn_t mv88e6393x_xg_handle_irq(struct mv88e639x_pcs *mpcs)
{
u16 int_status, stat1;
bool link_down;
int err;
err = mv88e639x_read(mpcs, MV88E6393X_10G_INT_STATUS, &int_status);
if (err)
return IRQ_NONE;
if (int_status & MV88E6393X_10G_INT_LINK_CHANGE) {
err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &stat1);
if (err)
return IRQ_NONE;
link_down = !(stat1 & MDIO_STAT1_LSTATUS);
phylink_pcs_change(&mpcs->xg_pcs, !link_down);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mv88e6393x_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable)
{
u16 val = 0;
if (enable)
val = MV88E6393X_10G_INT_LINK_CHANGE;
return mv88e639x_modify(mpcs, MV88E6393X_10G_INT_ENABLE,
MV88E6393X_10G_INT_LINK_CHANGE, val);
}
static int mv88e6393x_xg_pcs_enable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
mpcs->handle_irq = mv88e6393x_xg_handle_irq;
return mv88e6393x_xg_control_irq(mpcs, !!mpcs->irq);
}
static void mv88e6393x_xg_pcs_disable(struct phylink_pcs *pcs)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
mv88e6393x_xg_control_irq(mpcs, false);
mv88e639x_xg_pcs_disable(mpcs);
mv88e6393x_power_lane(mpcs, false);
}
/* The PCS has to be powered down while CMODE is changed */
static void mv88e6393x_xg_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
mv88e639x_xg_pcs_disable(mpcs);
mv88e6393x_power_lane(mpcs, false);
}
static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
int err;
if (interface == PHY_INTERFACE_MODE_10GBASER) {
err = mv88e6393x_erratum_5_2(mpcs);
if (err)
return err;
}
err = mv88e6393x_power_lane(mpcs, true);
if (err)
return err;
return mv88e639x_xg_pcs_enable(mpcs);
}
static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = {
.pcs_enable = mv88e6393x_xg_pcs_enable,
.pcs_disable = mv88e6393x_xg_pcs_disable,
.pcs_pre_config = mv88e6393x_xg_pcs_pre_config,
.pcs_post_config = mv88e6393x_xg_pcs_post_config,
.pcs_get_state = mv88e639x_xg_pcs_get_state,
.pcs_config = mv88e639x_xg_pcs_config,
};
static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
{
struct mv88e639x_pcs *mpcs;
struct mii_bus *bus;
struct device *dev;
int lane, err;
lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;
bus = mv88e6xxx_default_mdio_bus(chip);
dev = chip->dev;
mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port);
if (!mpcs)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
mpcs->xg_pcs.neg_mode = true;
mpcs->supports_5g = true;
err = mv88e6393x_erratum_4_6(mpcs);
if (err)
goto err_free;
err = mv88e639x_pcs_setup_irq(mpcs, chip, port);
if (err)
goto err_free;
chip->ports[port].pcs_private = mpcs;
return 0;
err_free:
kfree(mpcs);
return err;
}
const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops = {
.pcs_init = mv88e6393x_pcs_init,
.pcs_teardown = mv88e639x_pcs_teardown,
.pcs_select = mv88e639x_pcs_select,
};
| linux-master | drivers/net/dsa/mv88e6xxx/pcs-639x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Address Translation Unit (ATU) support
*
* Copyright (c) 2008 Marvell Semiconductor
* Copyright (c) 2017 Savoir-faire Linux, Inc.
*/
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include "chip.h"
#include "global1.h"
#include "switchdev.h"
#include "trace.h"
/* Offset 0x01: ATU FID Register */
static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
{
return mv88e6xxx_g1_write(chip, MV88E6352_G1_ATU_FID, fid & 0xfff);
}
/* Offset 0x0A: ATU Control Register */
int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
if (err)
return err;
if (learn2all)
val |= MV88E6XXX_G1_ATU_CTL_LEARN2ALL;
else
val &= ~MV88E6XXX_G1_ATU_CTL_LEARN2ALL;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
}
int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
unsigned int msecs)
{
const unsigned int coeff = chip->info->age_time_coeff;
const unsigned int min = 0x01 * coeff;
const unsigned int max = 0xff * coeff;
u8 age_time;
u16 val;
int err;
if (msecs < min || msecs > max)
return -ERANGE;
/* Round to nearest multiple of coeff */
age_time = (msecs + coeff / 2) / coeff;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
if (err)
return err;
/* AgeTime is 11:4 bits */
val &= ~0xff0;
val |= age_time << 4;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
if (err)
return err;
dev_dbg(chip->dev, "AgeTime set to 0x%02x (%d ms)\n", age_time,
age_time * coeff);
return 0;
}
int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
{
int err;
u16 val;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
if (err)
return err;
*hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK;
return 0;
}
int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
{
int err;
u16 val;
if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK)
return -EINVAL;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
if (err)
return err;
val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK;
val |= hash;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
}
/* Offset 0x0B: ATU Operation Register */
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G1_ATU_OP_BUSY);
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
}
static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
MV88E6XXX_G1_ATU_OP_BUSY |
MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
if (err)
return err;
return mv88e6xxx_g1_atu_op_wait(chip);
}
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
{
u16 val;
int err;
/* FID bits are dispatched all around gradually as more are supported */
if (mv88e6xxx_num_databases(chip) > 256) {
err = mv88e6xxx_g1_atu_fid_write(chip, fid);
if (err)
return err;
} else {
if (mv88e6xxx_num_databases(chip) > 64) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
&val);
if (err)
return err;
val = (val & 0x0fff) | ((fid << 8) & 0xf000);
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL,
val);
if (err)
return err;
} else if (mv88e6xxx_num_databases(chip) > 16) {
/* ATU DBNum[5:4] are located in ATU Operation 9:8 */
op |= (fid & 0x30) << 4;
}
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
op |= fid & 0xf;
}
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
MV88E6XXX_G1_ATU_OP_BUSY | op);
if (err)
return err;
return mv88e6xxx_g1_atu_op_wait(chip);
}
int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
{
return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
}
static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
{
u16 val = 0, upper = 0, op = 0;
int err = -EOPNOTSUPP;
if (mv88e6xxx_num_databases(chip) > 256) {
err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
val &= 0xfff;
if (err)
return err;
} else {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
if (err)
return err;
if (mv88e6xxx_num_databases(chip) > 64) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
&upper);
if (err)
return err;
upper = (upper >> 8) & 0x00f0;
} else if (mv88e6xxx_num_databases(chip) > 16) {
/* ATU DBNum[5:4] are located in ATU Operation 9:8 */
upper = (op >> 4) & 0x30;
}
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
val = (op & 0xf) | upper;
}
*fid = val;
return err;
}
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &val);
if (err)
return err;
entry->state = val & 0xf;
if (entry->state) {
entry->trunk = !!(val & MV88E6XXX_G1_ATU_DATA_TRUNK);
entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
}
return 0;
}
static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry)
{
u16 data = entry->state & 0xf;
if (entry->state) {
if (entry->trunk)
data |= MV88E6XXX_G1_ATU_DATA_TRUNK;
data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
}
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_DATA, data);
}
/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
* Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
* Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
*/
static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry)
{
u16 val;
int i, err;
for (i = 0; i < 3; i++) {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01 + i, &val);
if (err)
return err;
entry->mac[i * 2] = val >> 8;
entry->mac[i * 2 + 1] = val & 0xff;
}
return 0;
}
static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_atu_entry *entry)
{
u16 val;
int i, err;
for (i = 0; i < 3; i++) {
val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_MAC01 + i, val);
if (err)
return err;
}
return 0;
}
/* Address Translation Unit operations */
int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
int err;
err = mv88e6xxx_g1_atu_op_wait(chip);
if (err)
return err;
/* Write the MAC address to iterate from only once */
if (!entry->state) {
err = mv88e6xxx_g1_atu_mac_write(chip, entry);
if (err)
return err;
}
err = mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
if (err)
return err;
err = mv88e6xxx_g1_atu_data_read(chip, entry);
if (err)
return err;
return mv88e6xxx_g1_atu_mac_read(chip, entry);
}
int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
int err;
err = mv88e6xxx_g1_atu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_atu_mac_write(chip, entry);
if (err)
return err;
err = mv88e6xxx_g1_atu_data_write(chip, entry);
if (err)
return err;
return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_LOAD_DB);
}
static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
struct mv88e6xxx_atu_entry *entry,
bool all)
{
u16 op;
int err;
err = mv88e6xxx_g1_atu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_atu_data_write(chip, entry);
if (err)
return err;
/* Flush/Move all or non-static entries from all or a given database */
if (all && fid)
op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB;
else if (fid)
op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
else if (all)
op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL;
else
op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC;
return mv88e6xxx_g1_atu_op(chip, fid, op);
}
int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all)
{
struct mv88e6xxx_atu_entry entry = {
.state = 0, /* Null EntryState means Flush */
};
return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
}
static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
int from_port, int to_port, bool all)
{
struct mv88e6xxx_atu_entry entry = { 0 };
unsigned long mask;
int shift;
if (!chip->info->atu_move_port_mask)
return -EOPNOTSUPP;
mask = chip->info->atu_move_port_mask;
shift = bitmap_weight(&mask, 16);
entry.state = 0xf; /* Full EntryState means Move */
entry.portvec = from_port & mask;
entry.portvec |= (to_port & mask) << shift;
return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
}
int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
bool all)
{
int from_port = port;
int to_port = chip->info->atu_move_port_mask;
return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
}
static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
struct mv88e6xxx_atu_entry entry;
int err, spid;
u16 val, fid;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read_atu_violation(chip);
if (err)
goto out_unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
if (err)
goto out_unlock;
err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
if (err)
goto out_unlock;
err = mv88e6xxx_g1_atu_data_read(chip, &entry);
if (err)
goto out_unlock;
err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
if (err)
goto out_unlock;
mv88e6xxx_reg_unlock(chip);
spid = entry.state;
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_member_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_miss_violation++;
if (fid != MV88E6XXX_FID_STANDALONE && chip->ports[spid].mab) {
err = mv88e6xxx_handle_miss_violation(chip, spid,
&entry, fid);
if (err)
goto out;
}
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_full_violation++;
}
return IRQ_HANDLED;
out_unlock:
mv88e6xxx_reg_unlock(chip);
out:
dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
err);
return IRQ_HANDLED;
}
int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
{
int err;
chip->atu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
MV88E6XXX_G1_STS_IRQ_ATU_PROB);
if (chip->atu_prob_irq < 0)
return chip->atu_prob_irq;
snprintf(chip->atu_prob_irq_name, sizeof(chip->atu_prob_irq_name),
"mv88e6xxx-%s-g1-atu-prob", dev_name(chip->dev));
err = request_threaded_irq(chip->atu_prob_irq, NULL,
mv88e6xxx_g1_atu_prob_irq_thread_fn,
IRQF_ONESHOT, chip->atu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->atu_prob_irq);
return err;
}
void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip)
{
free_irq(chip->atu_prob_irq, chip);
irq_dispose_mapping(chip->atu_prob_irq);
}
| linux-master | drivers/net/dsa/mv88e6xxx/global1_atu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx VLAN [Spanning Tree] Translation Unit (VTU [STU]) support
*
* Copyright (c) 2008 Marvell Semiconductor
* Copyright (c) 2015 CMC Electronics, Inc.
* Copyright (c) 2017 Savoir-faire Linux, Inc.
*/
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include "chip.h"
#include "global1.h"
#include "trace.h"
/* Offset 0x02: VTU FID Register */
static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID, &val);
if (err)
return err;
entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
return 0;
}
static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
if (entry->policy)
val |= MV88E6352_G1_VTU_FID_VID_POLICY;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID, &val);
if (err)
return err;
*sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
{
u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
/* Offset 0x05: VTU Operation Register */
static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G1_VTU_OP_BUSY);
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_VTU_OP, bit, 0);
}
static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
{
int err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_OP,
MV88E6XXX_G1_VTU_OP_BUSY | op);
if (err)
return err;
return mv88e6xxx_g1_vtu_op_wait(chip);
}
/* Offset 0x06: VTU VID Register */
static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
bool *valid, u16 *vid)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID, &val);
if (err)
return err;
if (vid) {
*vid = val & 0xfff;
if (val & MV88E6390_G1_VTU_VID_PAGE)
*vid |= 0x1000;
}
if (valid)
*valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
bool valid, u16 vid)
{
u16 val = vid & 0xfff;
if (vid & 0x1000)
val |= MV88E6390_G1_VTU_VID_PAGE;
if (valid)
val |= MV88E6XXX_G1_VTU_VID_VALID;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
}
/* Offset 0x07: VTU/STU Data Register 1
* Offset 0x08: VTU/STU Data Register 2
* Offset 0x09: VTU/STU Data Register 3
*/
static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
u16 *regs)
{
int i;
/* Read all 3 VTU/STU Data registers */
for (i = 0; i < 3; ++i) {
u16 *reg = ®s[i];
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
return 0;
}
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
u8 *member, u8 *state)
{
u16 regs[3];
int err;
int i;
err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
if (err)
return err;
/* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
if (member)
member[i] = (regs[i / 4] >> member_offset) & 0x3;
if (state)
state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
return 0;
}
static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
u8 *member, u8 *state)
{
u16 regs[3] = { 0 };
int i;
/* Insert MemberTag and PortState data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
if (member)
regs[i / 4] |= (member[i] & 0x3) << member_offset;
if (state)
regs[i / 4] |= (state[i] & 0x3) << state_offset;
}
/* Write all 3 VTU/STU Data registers */
for (i = 0; i < 3; ++i) {
u16 reg = regs[i];
int err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
return 0;
}
static int mv88e6390_g1_vtu_data_read(struct mv88e6xxx_chip *chip, u8 *data)
{
u16 regs[2];
int i;
/* Read the 2 VTU/STU Data registers */
for (i = 0; i < 2; ++i) {
u16 *reg = ®s[i];
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
/* Extract data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int offset = (i % 8) * 2;
data[i] = (regs[i / 8] >> offset) & 0x3;
}
return 0;
}
static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
{
u16 regs[2] = { 0 };
int i;
/* Insert data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int offset = (i % 8) * 2;
regs[i / 8] |= (data[i] & 0x3) << offset;
}
/* Write the 2 VTU/STU Data registers */
for (i = 0; i < 2; ++i) {
u16 reg = regs[i];
int err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
return 0;
}
/* VLAN Translation Unit Operations */
int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
/* To get the next higher active VID, the VTU GetNext operation can be
* started again without setting the VID registers since it already
* contains the last VID.
*
* To save a few hardware accesses and abstract this to the caller,
* write the VID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
if (err)
return err;
}
err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT);
if (err)
return err;
return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
}
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
u16 val;
int err;
err = mv88e6xxx_g1_vtu_getnext(chip, entry);
if (err)
return err;
if (entry->valid) {
err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
if (err)
return err;
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] ([5:4] for 6250) are located in VTU Operation 11:8 (9:8)
*/
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
if (err)
return err;
entry->fid = val & 0x000f;
entry->fid |= (val & 0x0f00) >> 4;
entry->fid &= mv88e6xxx_num_databases(chip) - 1;
}
return 0;
}
int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
int err;
/* Fetch VLAN MemberTag data from the VTU */
err = mv88e6xxx_g1_vtu_getnext(chip, entry);
if (err)
return err;
if (entry->valid) {
err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
if (err)
return err;
err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
return 0;
}
int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
int err;
/* Fetch VLAN MemberTag data from the VTU */
err = mv88e6xxx_g1_vtu_getnext(chip, entry);
if (err)
return err;
if (entry->valid) {
err = mv88e6390_g1_vtu_data_read(chip, entry->member);
if (err)
return err;
err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
return 0;
}
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
if (err)
return err;
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
*
* For the 6250/6220, the latter are really [5:4] and
* 9:8, but in those cases bits 7:6 of entry->fid are
* 0 since they have num_databases = 64.
*/
op |= entry->fid & 0x000f;
op |= (entry->fid & 0x00f0) << 4;
}
return mv88e6xxx_g1_vtu_op(chip, op);
}
int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
/* Write MemberTag data */
err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
if (err)
return err;
err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
/* Load/Purge VTU entry */
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
}
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
/* Write MemberTag data */
err = mv88e6390_g1_vtu_data_write(chip, entry->member);
if (err)
return err;
err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
/* Load/Purge VTU entry */
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
}
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
}
/* Spanning Tree Unit Operations */
int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_stu_entry *entry)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
/* To get the next higher active SID, the STU GetNext operation can be
* started again without setting the SID registers since it already
* contains the last SID.
*
* To save a few hardware accesses and abstract this to the caller,
* write the SID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
if (err)
return err;
err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
if (err)
return err;
if (entry->valid) {
err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
return 0;
}
int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_stu_entry *entry)
{
int err;
err = mv88e6xxx_g1_stu_getnext(chip, entry);
if (err)
return err;
if (!entry->valid)
return 0;
return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
}
int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_stu_entry *entry)
{
int err;
err = mv88e6xxx_g1_stu_getnext(chip, entry);
if (err)
return err;
if (!entry->valid)
return 0;
return mv88e6390_g1_vtu_data_read(chip, entry->state);
}
int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_stu_entry *entry)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
if (err)
return err;
err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
if (entry->valid) {
err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
if (err)
return err;
}
/* Load/Purge STU entry */
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_stu_entry *entry)
{
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
if (err)
return err;
err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
if (entry->valid) {
err = mv88e6390_g1_vtu_data_write(chip, entry->state);
if (err)
return err;
}
/* Load/Purge STU entry */
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
/* VTU Violation Management */
static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
u16 val, vid;
int spid;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION);
if (err)
goto out;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
if (err)
goto out;
err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
if (err)
goto out;
spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
trace_mv88e6xxx_vtu_member_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
trace_mv88e6xxx_vtu_miss_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_miss_violation++;
}
mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
out:
mv88e6xxx_reg_unlock(chip);
dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n",
err);
return IRQ_HANDLED;
}
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
{
int err;
chip->vtu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
MV88E6XXX_G1_STS_IRQ_VTU_PROB);
if (chip->vtu_prob_irq < 0)
return chip->vtu_prob_irq;
snprintf(chip->vtu_prob_irq_name, sizeof(chip->vtu_prob_irq_name),
"mv88e6xxx-%s-g1-vtu-prob", dev_name(chip->dev));
err = request_threaded_irq(chip->vtu_prob_irq, NULL,
mv88e6xxx_g1_vtu_prob_irq_thread_fn,
IRQF_ONESHOT, chip->vtu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->vtu_prob_irq);
return err;
}
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip)
{
free_irq(chip->vtu_prob_irq, chip);
irq_dispose_mapping(chip->vtu_prob_irq);
}
| linux-master | drivers/net/dsa/mv88e6xxx/global1_vtu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx SERDES manipulation, via SMI bus
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 Andrew Lunn <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mii.h>
#include "chip.h"
#include "global2.h"
#include "phy.h"
#include "port.h"
#include "serdes.h"
static int mv88e6352_serdes_read(struct mv88e6xxx_chip *chip, int reg,
u16 *val)
{
return mv88e6xxx_phy_page_read(chip, MV88E6352_ADDR_SERDES,
MV88E6352_SERDES_PAGE_FIBER,
reg, val);
}
static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
u16 val)
{
return mv88e6xxx_phy_page_write(chip, MV88E6352_ADDR_SERDES,
MV88E6352_SERDES_PAGE_FIBER,
reg, val);
}
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val);
}
int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa,
u16 status, struct phylink_link_state *state)
{
state->link = false;
/* If the BMSR reports that the link had failed, report this to
* phylink.
*/
if (!(bmsr & BMSR_LSTATUS))
return 0;
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
* still get here on link up.
*/
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
if (status & MV88E6390_SGMII_PHY_STATUS_TX_PAUSE)
state->pause |= MLO_PAUSE_TX;
if (status & MV88E6390_SGMII_PHY_STATUS_RX_PAUSE)
state->pause |= MLO_PAUSE_RX;
switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
state->speed = SPEED_2500;
else
state->speed = SPEED_1000;
break;
case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
state->speed = SPEED_100;
break;
case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
state->speed = SPEED_10;
break;
default:
dev_err(dev, "invalid PHY speed\n");
return -EINVAL;
}
} else if (state->link &&
state->interface != PHY_INTERFACE_MODE_SGMII) {
/* If Speed and Duplex Resolved register is 0 and link is up, it
* means that AN was enabled, but link partner had it disabled
* and the PHY invoked the Auto-Negotiation Bypass feature and
* linked anyway.
*/
state->duplex = DUPLEX_FULL;
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
state->speed = SPEED_2500;
else
state->speed = SPEED_1000;
} else {
state->link = false;
}
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
else if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
return 0;
}
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
int reg;
};
static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
{ "serdes_fibre_rx_error", 16, 21 },
{ "serdes_PRBS_error", 32, 24 },
};
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
int err;
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
if (err <= 0)
return err;
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data)
{
struct mv88e6352_serdes_hw_stat *stat;
int err, i;
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
if (err <= 0)
return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
memcpy(data + i * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
}
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
struct mv88e6352_serdes_hw_stat *stat)
{
u64 val = 0;
u16 reg;
int err;
err = mv88e6352_serdes_read(chip, stat->reg, ®);
if (err) {
dev_err(chip->dev, "failed to read statistic\n");
return 0;
}
val = reg;
if (stat->sizeof_stat == 32) {
err = mv88e6352_serdes_read(chip, stat->reg + 1, ®);
if (err) {
dev_err(chip->dev, "failed to read statistic\n");
return 0;
}
val = val << 16 | reg;
}
return val;
}
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
int i, err;
u64 value;
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
if (err <= 0)
return err;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
value = mv88e6352_serdes_get_stat(chip, stat);
mv88e6xxx_port->serdes_stats[i] += value;
data[i] = mv88e6xxx_port->serdes_stats[i];
}
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
}
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
mv88e6xxx_reg_unlock(chip);
if (err <= 0)
return err;
return 32 * sizeof(u16);
}
void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
{
u16 *p = _p;
u16 reg;
int err;
int i;
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
if (err <= 0)
return;
for (i = 0 ; i < 32; i++) {
err = mv88e6352_serdes_read(chip, i, ®);
if (!err)
p[i] = reg;
}
}
int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
int lane = -ENODEV;
switch (port) {
case 5:
if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
lane = MV88E6341_PORT5_LANE;
break;
}
return lane;
}
int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
int lane = -ENODEV;
switch (port) {
case 9:
if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
lane = MV88E6390_PORT9_LANE0;
break;
case 10:
if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
lane = MV88E6390_PORT10_LANE0;
break;
}
return lane;
}
int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode_port = chip->ports[port].cmode;
u8 cmode_port10 = chip->ports[10].cmode;
u8 cmode_port9 = chip->ports[9].cmode;
int lane = -ENODEV;
switch (port) {
case 2:
if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
lane = MV88E6390_PORT9_LANE1;
break;
case 3:
if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
lane = MV88E6390_PORT9_LANE2;
break;
case 4:
if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
lane = MV88E6390_PORT9_LANE3;
break;
case 5:
if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
lane = MV88E6390_PORT10_LANE1;
break;
case 6:
if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
lane = MV88E6390_PORT10_LANE2;
break;
case 7:
if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
lane = MV88E6390_PORT10_LANE3;
break;
case 9:
if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
lane = MV88E6390_PORT9_LANE0;
break;
case 10:
if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
lane = MV88E6390_PORT10_LANE0;
break;
}
return lane;
}
/* Only Ports 0, 9 and 10 have SERDES lanes. Return the SERDES lane address
* a port is using else Returns -ENODEV.
*/
int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
int lane = -ENODEV;
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode == MV88E6393X_PORT_STS_CMODE_5GBASER ||
cmode == MV88E6393X_PORT_STS_CMODE_10GBASER ||
cmode == MV88E6393X_PORT_STS_CMODE_USXGMII)
lane = port;
return lane;
}
struct mv88e6390_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int reg;
};
static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
{ "serdes_rx_pkts", 0xf021 },
{ "serdes_rx_bytes", 0xf024 },
{ "serdes_rx_pkts_error", 0xf027 },
};
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data)
{
struct mv88e6390_serdes_hw_stat *stat;
int i;
if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
stat = &mv88e6390_serdes_hw_stats[i];
memcpy(data + i * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
}
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
struct mv88e6390_serdes_hw_stat *stat)
{
u16 reg[3];
int err, i;
for (i = 0; i < 3; i++) {
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
stat->reg + i, ®[i]);
if (err) {
dev_err(chip->dev, "failed to read statistic\n");
return 0;
}
}
return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
}
int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
struct mv88e6390_serdes_hw_stat *stat;
int lane;
int i;
lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
stat = &mv88e6390_serdes_hw_stats[i];
data[i] = mv88e6390_serdes_get_stat(chip, lane, stat);
}
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, port);
}
static const u16 mv88e6390_serdes_regs[] = {
/* SERDES common registers */
0xf00a, 0xf00b, 0xf00c,
0xf010, 0xf011, 0xf012, 0xf013,
0xf016, 0xf017, 0xf018,
0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
0xf028, 0xf029,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039,
/* SGMII */
0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
0x2008,
0x200f,
0xa000, 0xa001, 0xa002, 0xa003,
/* 10Gbase-X */
0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
0x1008,
0x100e, 0x100f,
0x1018, 0x1019,
0x9000, 0x9001, 0x9002, 0x9003, 0x9004,
0x9006,
0x9010, 0x9011, 0x9012, 0x9013, 0x9014, 0x9015, 0x9016,
/* 10Gbase-R */
0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
0x1028, 0x1029, 0x102a, 0x102b,
};
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16);
}
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
{
u16 *p = _p;
int lane;
u16 reg;
int err;
int i;
lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return;
for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) {
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
mv88e6390_serdes_regs[i], ®);
if (!err)
p[i] = reg;
}
}
static const int mv88e6352_serdes_p2p_to_reg[] = {
/* Index of value in microvolts corresponds to the register value */
14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
};
int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
int val)
{
bool found = false;
u16 ctrl, reg;
int err;
int i;
err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
if (err <= 0)
return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
if (mv88e6352_serdes_p2p_to_reg[i] == val) {
reg = i;
found = true;
break;
}
}
if (!found)
return -EINVAL;
err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
if (err)
return err;
ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
ctrl |= reg;
return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
}
| linux-master | drivers/net/dsa/mv88e6xxx/serdes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88e6xxx Ethernet switch PHY and PPU support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 Andrew Lunn <[email protected]>
*/
#include <linux/mdio.h>
#include <linux/module.h>
#include "chip.h"
#include "phy.h"
int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 *val)
{
return mv88e6xxx_read(chip, addr, reg, val);
}
int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 val)
{
return mv88e6xxx_write(chip, addr, reg, val);
}
int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
struct mii_bus *bus;
bus = mv88e6xxx_default_mdio_bus(chip);
if (!bus)
return -EOPNOTSUPP;
if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
return chip->info->ops->phy_read(chip, bus, addr, reg, val);
}
int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
struct mii_bus *bus;
bus = mv88e6xxx_default_mdio_bus(chip);
if (!bus)
return -EOPNOTSUPP;
if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
int reg, u16 *val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
struct mii_bus *bus;
bus = mv88e6xxx_default_mdio_bus(chip);
if (!bus)
return -EOPNOTSUPP;
if (!chip->info->ops->phy_read_c45)
return -EOPNOTSUPP;
return chip->info->ops->phy_read_c45(chip, bus, addr, devad, reg, val);
}
int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
int reg, u16 val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
struct mii_bus *bus;
bus = mv88e6xxx_default_mdio_bus(chip);
if (!bus)
return -EOPNOTSUPP;
if (!chip->info->ops->phy_write_c45)
return -EOPNOTSUPP;
return chip->info->ops->phy_write_c45(chip, bus, addr, devad, reg, val);
}
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
{
return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
}
static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
{
int err;
/* Restore PHY page Copper 0x0 for access via the registered
* MDIO bus
*/
err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE,
MV88E6XXX_PHY_PAGE_COPPER);
if (unlikely(err)) {
dev_err(chip->dev,
"failed to restore PHY %d page Copper (%d)\n",
phy, err);
}
}
int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
u8 page, int reg, u16 *val)
{
int err;
/* There is no paging for registers 22 */
if (reg == MV88E6XXX_PHY_PAGE)
return -EINVAL;
err = mv88e6xxx_phy_page_get(chip, phy, page);
if (!err) {
err = mv88e6xxx_phy_read(chip, phy, reg, val);
mv88e6xxx_phy_page_put(chip, phy);
}
return err;
}
int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
u8 page, int reg, u16 val)
{
int err;
/* There is no paging for registers 22 */
if (reg == MV88E6XXX_PHY_PAGE)
return -EINVAL;
err = mv88e6xxx_phy_page_get(chip, phy, page);
if (!err) {
err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
if (!err)
err = mv88e6xxx_phy_write(chip, phy, reg, val);
mv88e6xxx_phy_page_put(chip, phy);
}
return err;
}
static int mv88e6xxx_phy_ppu_disable(struct mv88e6xxx_chip *chip)
{
if (!chip->info->ops->ppu_disable)
return 0;
return chip->info->ops->ppu_disable(chip);
}
static int mv88e6xxx_phy_ppu_enable(struct mv88e6xxx_chip *chip)
{
if (!chip->info->ops->ppu_enable)
return 0;
return chip->info->ops->ppu_enable(chip);
}
static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
{
struct mv88e6xxx_chip *chip;
chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
mv88e6xxx_reg_lock(chip);
if (mutex_trylock(&chip->ppu_mutex)) {
if (mv88e6xxx_phy_ppu_enable(chip) == 0)
chip->ppu_disabled = 0;
mutex_unlock(&chip->ppu_mutex);
}
mv88e6xxx_reg_unlock(chip);
}
static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
static int mv88e6xxx_phy_ppu_access_get(struct mv88e6xxx_chip *chip)
{
int ret;
mutex_lock(&chip->ppu_mutex);
/* If the PHY polling unit is enabled, disable it so that
* we can access the PHY registers. If it was already
* disabled, cancel the timer that is going to re-enable
* it.
*/
if (!chip->ppu_disabled) {
ret = mv88e6xxx_phy_ppu_disable(chip);
if (ret < 0) {
mutex_unlock(&chip->ppu_mutex);
return ret;
}
chip->ppu_disabled = 1;
} else {
del_timer(&chip->ppu_timer);
ret = 0;
}
return ret;
}
static void mv88e6xxx_phy_ppu_access_put(struct mv88e6xxx_chip *chip)
{
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
mutex_unlock(&chip->ppu_mutex);
}
static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
{
mutex_init(&chip->ppu_mutex);
INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
}
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
{
del_timer_sync(&chip->ppu_timer);
}
int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 *val)
{
int err;
err = mv88e6xxx_phy_ppu_access_get(chip);
if (!err) {
err = mv88e6xxx_read(chip, addr, reg, val);
mv88e6xxx_phy_ppu_access_put(chip);
}
return err;
}
int mv88e6185_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 val)
{
int err;
err = mv88e6xxx_phy_ppu_access_get(chip);
if (!err) {
err = mv88e6xxx_write(chip, addr, reg, val);
mv88e6xxx_phy_ppu_access_put(chip);
}
return err;
}
void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
mv88e6xxx_phy_ppu_state_init(chip);
}
void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
mv88e6xxx_phy_ppu_state_destroy(chip);
}
int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_phy_ppu_enable(chip);
}
| linux-master | drivers/net/dsa/mv88e6xxx/phy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6185 family SERDES PCS support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 Andrew Lunn <[email protected]>
*/
#include <linux/phylink.h>
#include "global2.h"
#include "port.h"
#include "serdes.h"
struct mv88e6185_pcs {
struct phylink_pcs phylink_pcs;
unsigned int irq;
char name[64];
struct mv88e6xxx_chip *chip;
int port;
};
static struct mv88e6185_pcs *pcs_to_mv88e6185_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mv88e6185_pcs, phylink_pcs);
}
static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
{
struct mv88e6185_pcs *mpcs = dev_id;
struct mv88e6xxx_chip *chip;
irqreturn_t ret = IRQ_NONE;
bool link_up;
u16 status;
int port;
int err;
chip = mpcs->chip;
port = mpcs->port;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
mv88e6xxx_reg_unlock(chip);
if (!err) {
link_up = !!(status & MV88E6XXX_PORT_STS_LINK);
phylink_pcs_change(&mpcs->phylink_pcs, link_up);
ret = IRQ_HANDLED;
}
return ret;
}
static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
struct mv88e6xxx_chip *chip = mpcs->chip;
int port = mpcs->port;
u16 status;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
mv88e6xxx_reg_unlock(chip);
if (err)
status = 0;
state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
if (state->link) {
state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ?
DUPLEX_FULL : DUPLEX_HALF;
switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
case MV88E6XXX_PORT_STS_SPEED_1000:
state->speed = SPEED_1000;
break;
case MV88E6XXX_PORT_STS_SPEED_100:
state->speed = SPEED_100;
break;
case MV88E6XXX_PORT_STS_SPEED_10:
state->speed = SPEED_10;
break;
default:
state->link = false;
break;
}
}
}
static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
return 0;
}
static void mv88e6185_pcs_an_restart(struct phylink_pcs *pcs)
{
}
static const struct phylink_pcs_ops mv88e6185_phylink_pcs_ops = {
.pcs_get_state = mv88e6185_pcs_get_state,
.pcs_config = mv88e6185_pcs_config,
.pcs_an_restart = mv88e6185_pcs_an_restart,
};
static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
{
struct mv88e6185_pcs *mpcs;
struct device *dev;
unsigned int irq;
int err;
/* There are no configurable serdes lanes on this switch chip, so
* we use the static cmode configuration to determine whether we
* have a PCS or not.
*/
if (chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_SERDES &&
chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_1000BASE_X)
return 0;
dev = chip->dev;
mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
if (!mpcs)
return -ENOMEM;
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
snprintf(mpcs->name, sizeof(mpcs->name),
"mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
err = request_threaded_irq(irq, NULL, mv88e6185_pcs_handle_irq,
IRQF_ONESHOT, mpcs->name, mpcs);
if (err) {
kfree(mpcs);
return err;
}
mpcs->irq = irq;
} else {
mpcs->phylink_pcs.poll = true;
}
chip->ports[port].pcs_private = &mpcs->phylink_pcs;
return 0;
}
static void mv88e6185_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
{
struct mv88e6185_pcs *mpcs;
mpcs = chip->ports[port].pcs_private;
if (!mpcs)
return;
if (mpcs->irq)
free_irq(mpcs->irq, mpcs);
kfree(mpcs);
chip->ports[port].pcs_private = NULL;
}
static struct phylink_pcs *mv88e6185_pcs_select(struct mv88e6xxx_chip *chip,
int port,
phy_interface_t interface)
{
return chip->ports[port].pcs_private;
}
const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops = {
.pcs_init = mv88e6185_pcs_init,
.pcs_teardown = mv88e6185_pcs_teardown,
.pcs_select = mv88e6185_pcs_select,
};
| linux-master | drivers/net/dsa/mv88e6xxx/pcs-6185.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch Global 2 Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2016-2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include "chip.h"
#include "global1.h" /* for MV88E6XXX_G1_STS_IRQ_DEVICE */
#include "global2.h"
int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
{
return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val);
}
int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
{
return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val);
}
int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
bit, int val)
{
return mv88e6xxx_wait_bit(chip, chip->info->global2_addr, reg,
bit, val);
}
/* Offset 0x00: Interrupt Source Register */
static int mv88e6xxx_g2_int_source(struct mv88e6xxx_chip *chip, u16 *src)
{
/* Read (and clear most of) the Interrupt Source bits */
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SRC, src);
}
/* Offset 0x01: Interrupt Mask Register */
static int mv88e6xxx_g2_int_mask(struct mv88e6xxx_chip *chip, u16 mask)
{
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, mask);
}
/* Offset 0x02: Management Enable 2x */
static int mv88e6xxx_g2_mgmt_enable_2x(struct mv88e6xxx_chip *chip, u16 en2x)
{
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, en2x);
}
/* Offset 0x03: Management Enable 0x */
static int mv88e6xxx_g2_mgmt_enable_0x(struct mv88e6xxx_chip *chip, u16 en0x)
{
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, en0x);
}
/* Offset 0x05: Switch Management Register */
static int mv88e6xxx_g2_switch_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip,
bool enable)
{
u16 val;
int err;
err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SWITCH_MGMT, &val);
if (err)
return err;
if (enable)
val |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU;
else
val &= ~MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU;
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, val);
}
int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
int err;
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:0x as MGMT.
*/
err = mv88e6xxx_g2_mgmt_enable_0x(chip, 0xffff);
if (err)
return err;
return mv88e6xxx_g2_switch_mgmt_rsvd2cpu(chip, true);
}
int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
int err;
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:2x as MGMT.
*/
err = mv88e6xxx_g2_mgmt_enable_2x(chip, 0xffff);
if (err)
return err;
return mv88e6185_g2_mgmt_rsvd2cpu(chip);
}
/* Offset 0x06: Device Mapping Table register */
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port)
{
u16 val = (target << 8) | (port & 0x1f);
/* Modern chips use 5 bits to define a device mapping port,
* but bit 4 is reserved on older chips, so it is safe to use.
*/
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_DEVICE_MAPPING,
MV88E6XXX_G2_DEVICE_MAPPING_UPDATE | val);
}
/* Offset 0x07: Trunk Mask Table register */
int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
bool hash, u16 mask)
{
u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
if (hash)
val |= MV88E6XXX_G2_TRUNK_MASK_HASH;
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MASK,
MV88E6XXX_G2_TRUNK_MASK_UPDATE | val);
}
/* Offset 0x08: Trunk Mapping Table register */
int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
u16 map)
{
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MAPPING,
MV88E6XXX_G2_TRUNK_MAPPING_UPDATE | val);
}
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
{
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
int i, err;
/* Clear all eight possible Trunk Mask vectors */
for (i = 0; i < 8; ++i) {
err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask);
if (err)
return err;
}
/* Clear all sixteen possible Trunk ID routing vectors */
for (i = 0; i < 16; ++i) {
err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0);
if (err)
return err;
}
return 0;
}
/* Offset 0x09: Ingress Rate Command register
* Offset 0x0A: Ingress Rate Data register
*/
static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_IRL_CMD_BUSY);
return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_IRL_CMD, bit, 0);
}
static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port,
int res, int reg)
{
int err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_IRL_CMD,
MV88E6XXX_G2_IRL_CMD_BUSY | op | (port << 8) |
(res << 5) | reg);
if (err)
return err;
return mv88e6xxx_g2_irl_wait(chip);
}
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_g2_irl_op(chip, MV88E6352_G2_IRL_CMD_OP_INIT_ALL, port,
0, 0);
}
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_g2_irl_op(chip, MV88E6390_G2_IRL_CMD_OP_INIT_ALL, port,
0, 0);
}
/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register
* Offset 0x0C: Cross-chip Port VLAN Data Register
*/
static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_PVT_ADDR_BUSY);
return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_PVT_ADDR, bit, 0);
}
static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
int src_port, u16 op)
{
int err;
/* 9-bit Cross-chip PVT pointer: with MV88E6XXX_G2_MISC_5_BIT_PORT
* cleared, source device is 5-bit, source port is 4-bit.
*/
op |= MV88E6XXX_G2_PVT_ADDR_BUSY;
op |= (src_dev & 0x1f) << 4;
op |= (src_port & 0xf);
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_ADDR, op);
if (err)
return err;
return mv88e6xxx_g2_pvt_op_wait(chip);
}
int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev,
int src_port, u16 *data)
{
int err;
err = mv88e6xxx_g2_pvt_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
MV88E6XXX_G2_PVT_ADDR_OP_READ);
if (err)
return err;
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_PVT_DATA, data);
}
int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
int src_port, u16 data)
{
int err;
err = mv88e6xxx_g2_pvt_op_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_DATA, data);
if (err)
return err;
return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN);
}
/* Offset 0x0D: Switch MAC/WoL/WoF register */
static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
unsigned int pointer, u8 data)
{
u16 val = (pointer << 8) | data;
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MAC,
MV88E6XXX_G2_SWITCH_MAC_UPDATE | val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
int i, err;
for (i = 0; i < 6; i++) {
err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]);
if (err)
break;
}
return err;
}
/* Offset 0x0E: ATU Statistics */
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
{
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
kind | bin);
}
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
{
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
}
/* Offset 0x0F: Priority Override Table */
static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
u8 data)
{
u16 val = (pointer << 8) | (data & 0x7);
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PRIO_OVERRIDE,
MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE | val);
}
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
{
int i, err;
/* Clear all sixteen possible Priority Override entries */
for (i = 0; i < 16; i++) {
err = mv88e6xxx_g2_pot_write(chip, i, 0);
if (err)
break;
}
return err;
}
/* Offset 0x14: EEPROM Command
* Offset 0x15: EEPROM Data (for 16-bit data access)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;
err = mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
if (err)
return err;
bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_RUNNING);
return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_EEPROM_CMD,
MV88E6XXX_G2_EEPROM_CMD_BUSY | cmd);
if (err)
return err;
return mv88e6xxx_g2_eeprom_wait(chip);
}
static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
u16 addr, u8 *data)
{
u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
if (err)
return err;
err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
if (err)
return err;
err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &cmd);
if (err)
return err;
*data = cmd & 0xff;
return 0;
}
static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
u16 addr, u8 data)
{
u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE |
MV88E6XXX_G2_EEPROM_CMD_WRITE_EN;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
if (err)
return err;
return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data);
}
static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
u8 addr, u16 *data)
{
u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ | addr;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
if (err)
return err;
return mv88e6xxx_g2_read(chip, MV88E6352_G2_EEPROM_DATA, data);
}
static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
u8 addr, u16 data)
{
u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE | addr;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6352_G2_EEPROM_DATA, data);
if (err)
return err;
return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
}
int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data)
{
unsigned int offset = eeprom->offset;
unsigned int len = eeprom->len;
int err;
eeprom->len = 0;
while (len) {
err = mv88e6xxx_g2_eeprom_read8(chip, offset, data);
if (err)
return err;
eeprom->len++;
offset++;
data++;
len--;
}
return 0;
}
int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data)
{
unsigned int offset = eeprom->offset;
unsigned int len = eeprom->len;
int err;
eeprom->len = 0;
while (len) {
err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data);
if (err)
return err;
eeprom->len++;
offset++;
data++;
len--;
}
return 0;
}
int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data)
{
unsigned int offset = eeprom->offset;
unsigned int len = eeprom->len;
u16 val;
int err;
eeprom->len = 0;
if (offset & 1) {
err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
if (err)
return err;
*data++ = (val >> 8) & 0xff;
offset++;
len--;
eeprom->len++;
}
while (len >= 2) {
err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
if (err)
return err;
*data++ = val & 0xff;
*data++ = (val >> 8) & 0xff;
offset += 2;
len -= 2;
eeprom->len += 2;
}
if (len) {
err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
if (err)
return err;
*data++ = val & 0xff;
offset++;
len--;
eeprom->len++;
}
return 0;
}
int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data)
{
unsigned int offset = eeprom->offset;
unsigned int len = eeprom->len;
u16 val;
int err;
/* Ensure the RO WriteEn bit is set */
err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &val);
if (err)
return err;
if (!(val & MV88E6XXX_G2_EEPROM_CMD_WRITE_EN))
return -EROFS;
eeprom->len = 0;
if (offset & 1) {
err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
if (err)
return err;
val = (*data++ << 8) | (val & 0xff);
err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
if (err)
return err;
offset++;
len--;
eeprom->len++;
}
while (len >= 2) {
val = *data++;
val |= *data++ << 8;
err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
if (err)
return err;
offset += 2;
len -= 2;
eeprom->len += 2;
}
if (len) {
err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
if (err)
return err;
val = (val & 0xff00) | *data++;
err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
if (err)
return err;
offset++;
len--;
eeprom->len++;
}
return 0;
}
/* Offset 0x18: SMI PHY Command Register
* Offset 0x19: SMI PHY Data Register
*/
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_SMI_PHY_CMD, bit, 0);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_CMD,
MV88E6XXX_G2_SMI_PHY_CMD_BUSY | cmd);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_wait(chip);
}
static int mv88e6xxx_g2_smi_phy_access(struct mv88e6xxx_chip *chip,
bool external, bool c45, u16 op, int dev,
int reg)
{
u16 cmd = op;
if (external)
cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL;
else
cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL; /* empty mask */
if (c45)
cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_45; /* empty mask */
else
cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_22;
dev <<= __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK);
cmd |= dev & MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK;
cmd |= reg & MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK;
return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
}
static int mv88e6xxx_g2_smi_phy_access_c22(struct mv88e6xxx_chip *chip,
bool external, u16 op, int dev,
int reg)
{
return mv88e6xxx_g2_smi_phy_access(chip, external, false, op, dev, reg);
}
/* IEEE 802.3 Clause 22 Read Data Register */
static int mv88e6xxx_g2_smi_phy_read_data_c22(struct mv88e6xxx_chip *chip,
bool external, int dev, int reg,
u16 *data)
{
u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA;
int err;
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
if (err)
return err;
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
/* IEEE 802.3 Clause 22 Write Data Register */
static int mv88e6xxx_g2_smi_phy_write_data_c22(struct mv88e6xxx_chip *chip,
bool external, int dev, int reg,
u16 data)
{
u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA;
int err;
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
}
static int mv88e6xxx_g2_smi_phy_access_c45(struct mv88e6xxx_chip *chip,
bool external, u16 op, int port,
int dev)
{
return mv88e6xxx_g2_smi_phy_access(chip, external, true, op, port, dev);
}
/* IEEE 802.3 Clause 45 Write Address Register */
static int mv88e6xxx_g2_smi_phy_write_addr_c45(struct mv88e6xxx_chip *chip,
bool external, int port, int dev,
int addr)
{
u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR;
int err;
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, addr);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
/* IEEE 802.3 Clause 45 Read Data Register */
static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
bool external, int port, int dev,
u16 *data)
{
u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA;
int err;
err = mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
if (err)
return err;
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
static int _mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
bool external, int port, int devad,
int reg, u16 *data)
{
int err;
err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
reg);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, devad,
data);
}
/* IEEE 802.3 Clause 45 Write Data Register */
static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
bool external, int port, int dev,
u16 data)
{
u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA;
int err;
err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
static int _mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
bool external, int port, int devad,
int reg, u16 data)
{
int err;
err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
reg);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, devad,
data);
}
int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
struct mii_bus *bus,
int addr, int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
val);
}
int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int addr, int devad,
int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
return _mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, devad, reg,
val);
}
int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int addr, int reg,
u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
val);
}
int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int addr, int devad,
int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
return _mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, devad, reg,
val);
}
/* Offset 0x1B: Watchdog Control */
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
u16 reg;
mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, ®);
dev_info(chip->dev, "Watchdog event: 0x%04x", reg);
return IRQ_HANDLED;
}
static void mv88e6097_watchdog_free(struct mv88e6xxx_chip *chip)
{
u16 reg;
mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, ®);
reg &= ~(MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
MV88E6352_G2_WDOG_CTL_QC_ENABLE);
mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL, reg);
}
static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL,
MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
MV88E6352_G2_WDOG_CTL_QC_ENABLE |
MV88E6352_G2_WDOG_CTL_SWRESET);
}
const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
.irq_action = mv88e6097_watchdog_action,
.irq_setup = mv88e6097_watchdog_setup,
.irq_free = mv88e6097_watchdog_free,
};
static void mv88e6250_watchdog_free(struct mv88e6xxx_chip *chip)
{
u16 reg;
mv88e6xxx_g2_read(chip, MV88E6250_G2_WDOG_CTL, ®);
reg &= ~(MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
MV88E6250_G2_WDOG_CTL_QC_ENABLE);
mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL, reg);
}
static int mv88e6250_watchdog_setup(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL,
MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
MV88E6250_G2_WDOG_CTL_QC_ENABLE |
MV88E6250_G2_WDOG_CTL_SWRESET);
}
const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {
.irq_action = mv88e6097_watchdog_action,
.irq_setup = mv88e6250_watchdog_setup,
.irq_free = mv88e6250_watchdog_free,
};
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
MV88E6390_G2_WDOG_CTL_UPDATE |
MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
MV88E6390_G2_WDOG_CTL_EGRESS |
MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
}
static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
u16 reg;
mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
MV88E6390_G2_WDOG_CTL_PTR_EVENT);
mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®);
dev_info(chip->dev, "Watchdog event: 0x%04x",
reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
MV88E6390_G2_WDOG_CTL_PTR_HISTORY);
mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®);
dev_info(chip->dev, "Watchdog history: 0x%04x",
reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
/* Trigger a software reset to try to recover the switch */
if (chip->info->ops->reset)
chip->info->ops->reset(chip);
mv88e6390_watchdog_setup(chip);
return IRQ_HANDLED;
}
static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
{
mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
MV88E6390_G2_WDOG_CTL_UPDATE |
MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
}
const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
.irq_action = mv88e6390_watchdog_action,
.irq_setup = mv88e6390_watchdog_setup,
.irq_free = mv88e6390_watchdog_free,
};
static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
mv88e6390_watchdog_action(chip, irq);
/* Fix for clearing the force WD event bit.
* Unreleased erratum on mv88e6393x.
*/
mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
MV88E6390_G2_WDOG_CTL_UPDATE |
MV88E6390_G2_WDOG_CTL_PTR_EVENT);
return IRQ_HANDLED;
}
const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
.irq_action = mv88e6393x_watchdog_action,
.irq_setup = mv88e6390_watchdog_setup,
.irq_free = mv88e6390_watchdog_free,
};
static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
irqreturn_t ret = IRQ_NONE;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->watchdog_ops->irq_action)
ret = chip->info->ops->watchdog_ops->irq_action(chip, irq);
mv88e6xxx_reg_unlock(chip);
return ret;
}
static void mv88e6xxx_g2_watchdog_free(struct mv88e6xxx_chip *chip)
{
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->watchdog_ops->irq_free)
chip->info->ops->watchdog_ops->irq_free(chip);
mv88e6xxx_reg_unlock(chip);
free_irq(chip->watchdog_irq, chip);
irq_dispose_mapping(chip->watchdog_irq);
}
static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
{
int err;
chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain,
MV88E6XXX_G2_INT_SOURCE_WATCHDOG);
if (chip->watchdog_irq < 0)
return chip->watchdog_irq;
snprintf(chip->watchdog_irq_name, sizeof(chip->watchdog_irq_name),
"mv88e6xxx-%s-watchdog", dev_name(chip->dev));
err = request_threaded_irq(chip->watchdog_irq, NULL,
mv88e6xxx_g2_watchdog_thread_fn,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
chip->watchdog_irq_name, chip);
if (err)
return err;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->watchdog_ops->irq_setup)
err = chip->info->ops->watchdog_ops->irq_setup(chip);
mv88e6xxx_reg_unlock(chip);
return err;
}
/* Offset 0x1D: Misc Register */
static int mv88e6xxx_g2_misc_5_bit_port(struct mv88e6xxx_chip *chip,
bool port_5_bit)
{
u16 val;
int err;
err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_MISC, &val);
if (err)
return err;
if (port_5_bit)
val |= MV88E6XXX_G2_MISC_5_BIT_PORT;
else
val &= ~MV88E6XXX_G2_MISC_5_BIT_PORT;
return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MISC, val);
}
int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g2_misc_5_bit_port(chip, false);
}
static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int n = d->hwirq;
chip->g2_irq.masked |= (1 << n);
}
static void mv88e6xxx_g2_irq_unmask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int n = d->hwirq;
chip->g2_irq.masked &= ~(1 << n);
}
static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
unsigned int nhandled = 0;
unsigned int sub_irq;
unsigned int n;
int err;
u16 reg;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_int_source(chip, ®);
mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
for (n = 0; n < 16; ++n) {
if (reg & (1 << n)) {
sub_irq = irq_find_mapping(chip->g2_irq.domain, n);
handle_nested_irq(sub_irq);
++nhandled;
}
}
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
mv88e6xxx_reg_lock(chip);
}
static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
int err;
err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
if (err)
dev_err(chip->dev, "failed to mask interrupts\n");
mv88e6xxx_reg_unlock(chip);
}
static const struct irq_chip mv88e6xxx_g2_irq_chip = {
.name = "mv88e6xxx-g2",
.irq_mask = mv88e6xxx_g2_irq_mask,
.irq_unmask = mv88e6xxx_g2_irq_unmask,
.irq_bus_lock = mv88e6xxx_g2_irq_bus_lock,
.irq_bus_sync_unlock = mv88e6xxx_g2_irq_bus_sync_unlock,
};
static int mv88e6xxx_g2_irq_domain_map(struct irq_domain *d,
unsigned int irq,
irq_hw_number_t hwirq)
{
struct mv88e6xxx_chip *chip = d->host_data;
irq_set_chip_data(irq, d->host_data);
irq_set_chip_and_handler(irq, &chip->g2_irq.chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops mv88e6xxx_g2_irq_domain_ops = {
.map = mv88e6xxx_g2_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
{
int irq, virq;
mv88e6xxx_g2_watchdog_free(chip);
free_irq(chip->device_irq, chip);
irq_dispose_mapping(chip->device_irq);
for (irq = 0; irq < 16; irq++) {
virq = irq_find_mapping(chip->g2_irq.domain, irq);
irq_dispose_mapping(virq);
}
irq_domain_remove(chip->g2_irq.domain);
}
int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
{
int err, irq, virq;
chip->g2_irq.masked = ~0;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
mv88e6xxx_reg_unlock(chip);
if (err)
return err;
chip->g2_irq.domain = irq_domain_add_simple(
chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
for (irq = 0; irq < 16; irq++)
irq_create_mapping(chip->g2_irq.domain, irq);
chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
MV88E6XXX_G1_STS_IRQ_DEVICE);
if (chip->device_irq < 0) {
err = chip->device_irq;
goto out;
}
snprintf(chip->device_irq_name, sizeof(chip->device_irq_name),
"mv88e6xxx-%s-g2", dev_name(chip->dev));
err = request_threaded_irq(chip->device_irq, NULL,
mv88e6xxx_g2_irq_thread_fn,
IRQF_ONESHOT, chip->device_irq_name, chip);
if (err)
goto out;
return mv88e6xxx_g2_watchdog_setup(chip);
out:
for (irq = 0; irq < 16; irq++) {
virq = irq_find_mapping(chip->g2_irq.domain, irq);
irq_dispose_mapping(virq);
}
irq_domain_remove(chip->g2_irq.domain);
return err;
}
int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
struct mii_bus *bus)
{
int phy_start = chip->info->internal_phys_offset;
int phy_end = chip->info->internal_phys_offset +
chip->info->num_internal_phys;
int phy, irq;
for (phy = phy_start; phy < phy_end; phy++) {
irq = irq_find_mapping(chip->g2_irq.domain, phy);
if (irq < 0)
return irq;
bus->irq[chip->info->phy_base_addr + phy] = irq;
}
return 0;
}
void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
struct mii_bus *bus)
{
}
| linux-master | drivers/net/dsa/mv88e6xxx/global2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch Port Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2016-2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/if_bridge.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include "chip.h"
#include "global2.h"
#include "port.h"
#include "serdes.h"
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val)
{
int addr = chip->info->port_base_addr + port;
return mv88e6xxx_read(chip, addr, reg, val);
}
int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
int bit, int val)
{
int addr = chip->info->port_base_addr + port;
return mv88e6xxx_wait_bit(chip, addr, reg, bit, val);
}
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
u16 val)
{
int addr = chip->info->port_base_addr + port;
return mv88e6xxx_write(chip, addr, reg, val);
}
/* Offset 0x00: MAC (or PCS or Physical) Status Register
*
* For most devices, this is read only. However the 6185 has the MyPause
* bit read/write.
*/
int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
int pause)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
if (pause)
reg |= MV88E6XXX_PORT_STS_MY_PAUSE;
else
reg &= ~MV88E6XXX_PORT_STS_MY_PAUSE;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
}
/* Offset 0x01: MAC (or PCS or Physical) Control Register
*
* Link, Duplex and Flow Control have one force bit, one value bit.
*
* For port's MAC speed, ForceSpd (or SpdValue) bits 1:0 program the value.
* Alternative values require the 200BASE (or AltSpeed) bit 12 set.
* Newer chips need a ForcedSpd bit 13 set to consider the value.
*/
static int mv88e6xxx_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
reg &= ~(MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK |
MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK);
switch (mode) {
case PHY_INTERFACE_MODE_RGMII_RXID:
reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK |
MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK;
break;
case PHY_INTERFACE_MODE_RGMII:
break;
default:
return 0;
}
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
dev_dbg(chip->dev, "p%d: delay RXCLK %s, TXCLK %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK ? "yes" : "no",
reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK ? "yes" : "no");
return 0;
}
int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
if (port < 5)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
}
int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
if (port != 0)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
}
int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
if (port != 2 && port != 5 && port != 6)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
}
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
reg &= ~(MV88E6XXX_PORT_MAC_CTL_FORCE_LINK |
MV88E6XXX_PORT_MAC_CTL_LINK_UP);
switch (link) {
case LINK_FORCED_DOWN:
reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_LINK;
break;
case LINK_FORCED_UP:
reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_LINK |
MV88E6XXX_PORT_MAC_CTL_LINK_UP;
break;
case LINK_UNFORCED:
/* normal link detection */
break;
default:
return -EINVAL;
}
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
dev_dbg(chip->dev, "p%d: %s link %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
return 0;
}
int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup)
{
const struct mv88e6xxx_ops *ops = chip->info->ops;
int err = 0;
int link;
if (isup)
link = LINK_FORCED_UP;
else
link = LINK_FORCED_DOWN;
if (ops->port_set_link)
err = ops->port_set_link(chip, port, link);
return err;
}
int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup)
{
const struct mv88e6xxx_ops *ops = chip->info->ops;
int err = 0;
int link;
if (mode == MLO_AN_INBAND)
link = LINK_UNFORCED;
else if (isup)
link = LINK_FORCED_UP;
else
link = LINK_FORCED_DOWN;
if (ops->port_set_link)
err = ops->port_set_link(chip, port, link);
return err;
}
static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
int port, int speed, bool alt_bit,
bool force_bit, int duplex)
{
u16 reg, ctrl;
int err;
switch (speed) {
case 10:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10;
break;
case 100:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100;
break;
case 200:
if (alt_bit)
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
else
ctrl = MV88E6065_PORT_MAC_CTL_SPEED_200;
break;
case 1000:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
break;
case 2500:
if (alt_bit)
ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
else
ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
break;
case 10000:
/* all bits set, fall through... */
case SPEED_UNFORCED:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED;
break;
default:
return -EOPNOTSUPP;
}
switch (duplex) {
case DUPLEX_HALF:
ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
break;
case DUPLEX_FULL:
ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
break;
case DUPLEX_UNFORCED:
/* normal duplex detection */
break;
default:
return -EOPNOTSUPP;
}
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
if (alt_bit)
reg &= ~MV88E6390_PORT_MAC_CTL_ALTSPEED;
if (force_bit) {
reg &= ~MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
if (speed != SPEED_UNFORCED)
ctrl |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
}
reg |= ctrl;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
return 0;
}
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
duplex);
}
/* Support 10, 100 Mbps (e.g. 88E6250 family) */
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
if (speed > 100)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
duplex);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
if (speed > 2500)
return -EOPNOTSUPP;
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
if (speed == 2500 && port < 5)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed_duplex(chip, port, speed, !port, true,
duplex);
}
phy_interface_t mv88e6341_port_max_speed_mode(struct mv88e6xxx_chip *chip,
int port)
{
if (port == 5)
return PHY_INTERFACE_MODE_2500BASEX;
return PHY_INTERFACE_MODE_NA;
}
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
if (speed > 1000)
return -EOPNOTSUPP;
if (speed == 200 && port < 5)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, false,
duplex);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6390) */
int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
if (speed > 2500)
return -EOPNOTSUPP;
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
if (speed == 2500 && port < 9)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
duplex);
}
phy_interface_t mv88e6390_port_max_speed_mode(struct mv88e6xxx_chip *chip,
int port)
{
if (port == 9 || port == 10)
return PHY_INTERFACE_MODE_2500BASEX;
return PHY_INTERFACE_MODE_NA;
}
/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
if (speed >= 2500 && port < 9)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
duplex);
}
phy_interface_t mv88e6390x_port_max_speed_mode(struct mv88e6xxx_chip *chip,
int port)
{
if (port == 9 || port == 10)
return PHY_INTERFACE_MODE_XAUI;
return PHY_INTERFACE_MODE_NA;
}
/* Support 10, 100, 200, 1000, 2500, 5000, 10000 Mbps (e.g. 88E6393X)
* Function mv88e6xxx_port_set_speed_duplex() can't be used as the register
* values for speeds 2500 & 5000 conflict.
*/
int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
u16 reg, ctrl;
int err;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6361 &&
speed > 2500)
return -EOPNOTSUPP;
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
if (speed >= 2500 && port > 0 && port < 9)
return -EOPNOTSUPP;
switch (speed) {
case 10:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10;
break;
case 100:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100;
break;
case 200:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
break;
case 1000:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
break;
case 2500:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
break;
case 5000:
ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
break;
case 10000:
case SPEED_UNFORCED:
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED;
break;
default:
return -EOPNOTSUPP;
}
switch (duplex) {
case DUPLEX_HALF:
ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
break;
case DUPLEX_FULL:
ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
break;
case DUPLEX_UNFORCED:
/* normal duplex detection */
break;
default:
return -EOPNOTSUPP;
}
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
MV88E6390_PORT_MAC_CTL_ALTSPEED |
MV88E6390_PORT_MAC_CTL_FORCE_SPEED);
if (speed != SPEED_UNFORCED)
reg |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
reg |= ctrl;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
return 0;
}
phy_interface_t mv88e6393x_port_max_speed_mode(struct mv88e6xxx_chip *chip,
int port)
{
if (port != 0 && port != 9 && port != 10)
return PHY_INTERFACE_MODE_NA;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6361)
return PHY_INTERFACE_MODE_2500BASEX;
return PHY_INTERFACE_MODE_10GBASER;
}
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode, bool force)
{
u16 cmode;
u16 reg;
int err;
/* Default to a slow mode, so freeing up SERDES interfaces for
* other ports which might use them for SFPs.
*/
if (mode == PHY_INTERFACE_MODE_NA)
mode = PHY_INTERFACE_MODE_1000BASEX;
switch (mode) {
case PHY_INTERFACE_MODE_RMII:
cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
case PHY_INTERFACE_MODE_SGMII:
cmode = MV88E6XXX_PORT_STS_CMODE_SGMII;
break;
case PHY_INTERFACE_MODE_2500BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
break;
case PHY_INTERFACE_MODE_5GBASER:
cmode = MV88E6393X_PORT_STS_CMODE_5GBASER;
break;
case PHY_INTERFACE_MODE_XGMII:
case PHY_INTERFACE_MODE_XAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
break;
case PHY_INTERFACE_MODE_RXAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_RXAUI;
break;
case PHY_INTERFACE_MODE_10GBASER:
cmode = MV88E6393X_PORT_STS_CMODE_10GBASER;
break;
case PHY_INTERFACE_MODE_USXGMII:
cmode = MV88E6393X_PORT_STS_CMODE_USXGMII;
break;
default:
cmode = 0;
}
/* cmode doesn't change, nothing to do for us unless forced */
if (cmode == chip->ports[port].cmode && !force)
return 0;
chip->ports[port].cmode = 0;
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_STS_CMODE_MASK;
reg |= cmode;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
chip->ports[port].cmode = cmode;
}
return 0;
}
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
if (port != 9 && port != 10)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
if (port != 9 && port != 10)
return -EOPNOTSUPP;
switch (mode) {
case PHY_INTERFACE_MODE_NA:
return 0;
case PHY_INTERFACE_MODE_XGMII:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_RXAUI:
return -EINVAL;
default:
break;
}
return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
int err;
u16 reg;
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
if (port == 9 || port == 10) {
switch (mode) {
case PHY_INTERFACE_MODE_RMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
return -EINVAL;
default:
break;
}
}
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_MAC_CTL_EEE;
reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_EEE;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
int port)
{
int err, addr;
u16 reg, bits;
if (port != 5)
return -EOPNOTSUPP;
addr = chip->info->port_base_addr + port;
err = mv88e6xxx_port_hidden_read(chip, 0x7, addr, 0, ®);
if (err)
return err;
bits = MV88E6341_PORT_RESERVED_1A_FORCE_CMODE |
MV88E6341_PORT_RESERVED_1A_SGMII_AN;
if ((reg & bits) == bits)
return 0;
reg |= bits;
return mv88e6xxx_port_hidden_write(chip, 0x7, addr, 0, reg);
}
int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
int err;
if (port != 5)
return -EOPNOTSUPP;
switch (mode) {
case PHY_INTERFACE_MODE_NA:
return 0;
case PHY_INTERFACE_MODE_XGMII:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_RXAUI:
return -EINVAL;
default:
break;
}
err = mv88e6341_port_set_cmode_writable(chip, port);
if (err)
return err;
return mv88e6xxx_port_set_cmode(chip, port, mode, true);
}
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
*cmode = reg & MV88E6185_PORT_STS_CMODE_MASK;
return 0;
}
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
*cmode = reg & MV88E6XXX_PORT_STS_CMODE_MASK;
return 0;
}
/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
* the remote end or the period of time that this port can pause the
* remote end.
*/
int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out)
{
return mv88e6xxx_port_write(chip, port, MV88E6097_PORT_JAM_CTL,
out << 8 | in);
}
int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out)
{
int err;
err = mv88e6xxx_port_write(chip, port, MV88E6390_PORT_FLOW_CTL,
MV88E6390_PORT_FLOW_CTL_UPDATE |
MV88E6390_PORT_FLOW_CTL_LIMIT_IN | in);
if (err)
return err;
return mv88e6xxx_port_write(chip, port, MV88E6390_PORT_FLOW_CTL,
MV88E6390_PORT_FLOW_CTL_UPDATE |
MV88E6390_PORT_FLOW_CTL_LIMIT_OUT | out);
}
/* Offset 0x04: Port Control Register */
static const char * const mv88e6xxx_port_state_names[] = {
[MV88E6XXX_PORT_CTL0_STATE_DISABLED] = "Disabled",
[MV88E6XXX_PORT_CTL0_STATE_BLOCKING] = "Blocking/Listening",
[MV88E6XXX_PORT_CTL0_STATE_LEARNING] = "Learning",
[MV88E6XXX_PORT_CTL0_STATE_FORWARDING] = "Forwarding",
};
int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL0_STATE_MASK;
switch (state) {
case BR_STATE_DISABLED:
state = MV88E6XXX_PORT_CTL0_STATE_DISABLED;
break;
case BR_STATE_BLOCKING:
case BR_STATE_LISTENING:
state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
break;
case BR_STATE_LEARNING:
state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
break;
case BR_STATE_FORWARDING:
state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
break;
default:
return -EINVAL;
}
reg |= state;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
dev_dbg(chip->dev, "p%d: PortState set to %s\n", port,
mv88e6xxx_port_state_names[state]);
return 0;
}
int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_egress_mode mode)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK;
switch (mode) {
case MV88E6XXX_EGRESS_MODE_UNMODIFIED:
reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED;
break;
case MV88E6XXX_EGRESS_MODE_UNTAGGED:
reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED;
break;
case MV88E6XXX_EGRESS_MODE_TAGGED:
reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_TAGGED;
break;
case MV88E6XXX_EGRESS_MODE_ETHERTYPE:
reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_ETHER_TYPE_DSA;
break;
default:
return -EINVAL;
}
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK;
switch (mode) {
case MV88E6XXX_FRAME_MODE_NORMAL:
reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL;
break;
case MV88E6XXX_FRAME_MODE_DSA:
reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA;
break;
default:
return -EINVAL;
}
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK;
switch (mode) {
case MV88E6XXX_FRAME_MODE_NORMAL:
reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL;
break;
case MV88E6XXX_FRAME_MODE_DSA:
reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA;
break;
case MV88E6XXX_FRAME_MODE_PROVIDER:
reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_PROVIDER;
break;
case MV88E6XXX_FRAME_MODE_ETHERTYPE:
reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_ETHER_TYPE_DSA;
break;
default:
return -EINVAL;
}
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
int port, bool unicast)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
if (unicast)
reg |= MV88E6185_PORT_CTL0_FORWARD_UNKNOWN;
else
reg &= ~MV88E6185_PORT_CTL0_FORWARD_UNKNOWN;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
bool unicast)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
if (unicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
else
reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
bool multicast)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
if (multicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
else
reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
/* Offset 0x05: Port Control 1 */
int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
bool message_port)
{
u16 val;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
if (err)
return err;
if (message_port)
val |= MV88E6XXX_PORT_CTL1_MESSAGE_PORT;
else
val &= ~MV88E6XXX_PORT_CTL1_MESSAGE_PORT;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
}
int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
bool trunk, u8 id)
{
u16 val;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
if (err)
return err;
val &= ~MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK;
if (trunk)
val |= MV88E6XXX_PORT_CTL1_TRUNK_PORT |
(id << MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT);
else
val &= ~MV88E6XXX_PORT_CTL1_TRUNK_PORT;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
}
/* Offset 0x06: Port Based VLAN Map */
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
{
const u16 mask = mv88e6xxx_port_mask(chip);
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, ®);
if (err)
return err;
reg &= ~mask;
reg |= map & mask;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_BASE_VLAN, reg);
if (err)
return err;
dev_dbg(chip->dev, "p%d: VLANTable set to %.3x\n", port, map);
return 0;
}
int mv88e6xxx_port_get_fid(struct mv88e6xxx_chip *chip, int port, u16 *fid)
{
const u16 upper_mask = (mv88e6xxx_num_databases(chip) - 1) >> 4;
u16 reg;
int err;
/* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, ®);
if (err)
return err;
*fid = (reg & 0xf000) >> 12;
/* Port's default FID upper bits are located in reg 0x05, offset 0 */
if (upper_mask) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1,
®);
if (err)
return err;
*fid |= (reg & upper_mask) << 4;
}
return 0;
}
int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid)
{
const u16 upper_mask = (mv88e6xxx_num_databases(chip) - 1) >> 4;
u16 reg;
int err;
if (fid >= mv88e6xxx_num_databases(chip))
return -EINVAL;
/* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, ®);
if (err)
return err;
reg &= 0x0fff;
reg |= (fid & 0x000f) << 12;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_BASE_VLAN, reg);
if (err)
return err;
/* Port's default FID upper bits are located in reg 0x05, offset 0 */
if (upper_mask) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1,
®);
if (err)
return err;
reg &= ~upper_mask;
reg |= (fid >> 4) & upper_mask;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1,
reg);
if (err)
return err;
}
dev_dbg(chip->dev, "p%d: FID set to %u\n", port, fid);
return 0;
}
/* Offset 0x07: Default Port VLAN ID & Priority */
int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
®);
if (err)
return err;
*pvid = reg & MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
return 0;
}
int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
reg |= pvid & MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
reg);
if (err)
return err;
dev_dbg(chip->dev, "p%d: DefaultVID set to %u\n", port, pvid);
return 0;
}
/* Offset 0x08: Port Control 2 Register */
static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED] = "Disabled",
[MV88E6XXX_PORT_CTL2_8021Q_MODE_FALLBACK] = "Fallback",
[MV88E6XXX_PORT_CTL2_8021Q_MODE_CHECK] = "Check",
[MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE] = "Secure",
};
int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
int port, bool multicast)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
if (multicast)
reg |= MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD;
else
reg &= ~MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
reg &= ~MV88E6095_PORT_CTL2_CPU_PORT_MASK;
reg |= upstream_port;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_egress_direction direction,
bool mirror)
{
bool *mirror_port;
u16 reg;
u16 bit;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
bit = MV88E6XXX_PORT_CTL2_INGRESS_MONITOR;
mirror_port = &chip->ports[port].mirror_ingress;
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
bit = MV88E6XXX_PORT_CTL2_EGRESS_MONITOR;
mirror_port = &chip->ports[port].mirror_egress;
break;
default:
return -EINVAL;
}
reg &= ~bit;
if (mirror)
reg |= bit;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
if (!err)
*mirror_port = mirror;
return err;
}
int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
bool locked)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
if (locked)
reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
if (locked)
reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
}
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK;
reg |= mode & MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK;
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
if (err)
return err;
dev_dbg(chip->dev, "p%d: 802.1QMode set to %s\n", port,
mv88e6xxx_port_8021q_mode_names[mode]);
return 0;
}
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged)
{
u16 old, new;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old);
if (err)
return err;
if (drop_untagged)
new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
else
new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
if (new == old)
return 0;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
}
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
{
u16 reg;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
if (map)
reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
else
reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
size_t size)
{
u16 reg;
int err;
size += VLAN_ETH_HLEN + ETH_FCS_LEN;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
reg &= ~MV88E6XXX_PORT_CTL2_JUMBO_MODE_MASK;
if (size <= 1522)
reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_1522;
else if (size <= 2048)
reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_2048;
else if (size <= 10240)
reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_10240;
else
return -ERANGE;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
/* Offset 0x09: Port Rate Control */
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL1,
0x0000);
}
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL1,
0x0001);
}
/* Offset 0x0B: Port Association Vector */
int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port,
u16 pav)
{
u16 reg, mask;
int err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
®);
if (err)
return err;
mask = mv88e6xxx_port_mask(chip);
reg &= ~mask;
reg |= pav & mask;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
reg);
}
/* Offset 0x0C: Port ATU Control */
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ATU_CTL, 0);
}
/* Offset 0x0D: (Priority) Override Register */
int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_PRI_OVERRIDE, 0);
}
/* Offset 0x0E: Policy & MGMT Control Register for FAMILY 6191X 6193X 6393X */
static int mv88e6393x_port_policy_read(struct mv88e6xxx_chip *chip, int port,
u16 pointer, u8 *data)
{
u16 reg;
int err;
err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
pointer);
if (err)
return err;
err = mv88e6xxx_port_read(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
®);
if (err)
return err;
*data = reg;
return 0;
}
static int mv88e6393x_port_policy_write(struct mv88e6xxx_chip *chip, int port,
u16 pointer, u8 data)
{
u16 reg;
reg = MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE | pointer | data;
return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
reg);
}
static int mv88e6393x_port_policy_write_all(struct mv88e6xxx_chip *chip,
u16 pointer, u8 data)
{
int err, port;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
if (dsa_is_unused_port(chip->ds, port))
continue;
err = mv88e6393x_port_policy_write(chip, port, pointer, data);
if (err)
return err;
}
return 0;
}
int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
{
u16 ptr;
int err;
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST;
err = mv88e6393x_port_policy_write_all(chip, ptr, port);
if (err)
return err;
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
ptr = MV88E6393X_G2_EGRESS_MONITOR_DEST;
err = mv88e6xxx_g2_write(chip, ptr, port);
if (err)
return err;
break;
}
return 0;
}
int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port)
{
u16 ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST;
u8 data = MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI |
upstream_port;
return mv88e6393x_port_policy_write(chip, port, ptr, data);
}
int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
u16 ptr;
int err;
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:00 and
* 01:80:c2:00:00:02 as MGMT.
*/
ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO;
err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
if (err)
return err;
ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI;
err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
if (err)
return err;
ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO;
err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
if (err)
return err;
ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI;
err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
if (err)
return err;
return 0;
}
/* Offset 0x10 & 0x11: EPC */
static int mv88e6393x_port_epc_wait_ready(struct mv88e6xxx_chip *chip, int port)
{
int bit = __bf_shf(MV88E6393X_PORT_EPC_CMD_BUSY);
return mv88e6xxx_port_wait_bit(chip, port, MV88E6393X_PORT_EPC_CMD, bit, 0);
}
/* Port Ether type for 6393X family */
int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype)
{
u16 val;
int err;
err = mv88e6393x_port_epc_wait_ready(chip, port);
if (err)
return err;
err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_DATA, etype);
if (err)
return err;
val = MV88E6393X_PORT_EPC_CMD_BUSY |
MV88E6393X_PORT_EPC_CMD_WRITE |
MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE;
return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_CMD, val);
}
/* Offset 0x0f: Port Ether type */
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype)
{
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ETH_TYPE, etype);
}
/* Offset 0x18: Port IEEE Priority Remapping Registers [0-3]
* Offset 0x19: Port IEEE Priority Remapping Registers [4-7]
*/
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
{
int err;
/* Use a direct priority mapping for all IEEE tagged frames */
err = mv88e6xxx_port_write(chip, port,
MV88E6095_PORT_IEEE_PRIO_REMAP_0123,
0x3210);
if (err)
return err;
return mv88e6xxx_port_write(chip, port,
MV88E6095_PORT_IEEE_PRIO_REMAP_4567,
0x7654);
}
static int mv88e6xxx_port_ieeepmt_write(struct mv88e6xxx_chip *chip,
int port, u16 table, u8 ptr, u16 data)
{
u16 reg;
reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE | table |
(ptr << __bf_shf(MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK)) |
(data & MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK);
return mv88e6xxx_port_write(chip, port,
MV88E6390_PORT_IEEE_PRIO_MAP_TABLE, reg);
}
int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
{
int err, i;
u16 table;
for (i = 0; i <= 7; i++) {
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP;
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i,
(i | i << 4));
if (err)
return err;
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP;
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
if (err)
return err;
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP;
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
if (err)
return err;
table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP;
err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
if (err)
return err;
}
return 0;
}
/* Offset 0x0E: Policy Control Register */
static int
mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping,
enum mv88e6xxx_policy_action action,
u16 *mask, u16 *val, int *shift)
{
switch (mapping) {
case MV88E6XXX_POLICY_MAPPING_DA:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_SA:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_VTU:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_ETYPE:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_PPPOE:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_VBAS:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_OPT82:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK;
break;
case MV88E6XXX_POLICY_MAPPING_UDP:
*shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK);
*mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK;
break;
default:
return -EOPNOTSUPP;
}
switch (action) {
case MV88E6XXX_POLICY_ACTION_NORMAL:
*val = MV88E6XXX_PORT_POLICY_CTL_NORMAL;
break;
case MV88E6XXX_POLICY_ACTION_MIRROR:
*val = MV88E6XXX_PORT_POLICY_CTL_MIRROR;
break;
case MV88E6XXX_POLICY_ACTION_TRAP:
*val = MV88E6XXX_PORT_POLICY_CTL_TRAP;
break;
case MV88E6XXX_POLICY_ACTION_DISCARD:
*val = MV88E6XXX_PORT_POLICY_CTL_DISCARD;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_policy_mapping mapping,
enum mv88e6xxx_policy_action action)
{
u16 reg, mask, val;
int shift;
int err;
err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
&val, &shift);
if (err)
return err;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_POLICY_CTL, ®);
if (err)
return err;
reg &= ~mask;
reg |= (val << shift) & mask;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_POLICY_CTL, reg);
}
int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_policy_mapping mapping,
enum mv88e6xxx_policy_action action)
{
u16 mask, val;
int shift;
int err;
u16 ptr;
u8 reg;
err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
&val, &shift);
if (err)
return err;
/* The 16-bit Port Policy CTL register from older chips is on 6393x
* changed to Port Policy MGMT CTL, which can access more data, but
* indirectly. The original 16-bit value is divided into two 8-bit
* registers.
*/
ptr = shift / 8;
shift %= 8;
mask >>= ptr * 8;
err = mv88e6393x_port_policy_read(chip, port, ptr, ®);
if (err)
return err;
reg &= ~mask;
reg |= (val << shift) & mask;
return mv88e6393x_port_policy_write(chip, port, ptr, reg);
}
| linux-master | drivers/net/dsa/mv88e6xxx/port.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/dsa.h>
#include "chip.h"
#include "devlink.h"
#include "global1.h"
#include "global2.h"
#include "port.h"
static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
{
if (chip->info->ops->atu_get_hash)
return chip->info->ops->atu_get_hash(chip, hash);
return -EOPNOTSUPP;
}
static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
{
if (chip->info->ops->atu_set_hash)
return chip->info->ops->atu_set_hash(chip, hash);
return -EOPNOTSUPP;
}
enum mv88e6xxx_devlink_param_id {
MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
};
int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
switch (id) {
case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
break;
default:
err = -EOPNOTSUPP;
break;
}
mv88e6xxx_reg_unlock(chip);
return err;
}
int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
switch (id) {
case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
break;
default:
err = -EOPNOTSUPP;
break;
}
mv88e6xxx_reg_unlock(chip);
return err;
}
static const struct devlink_param mv88e6xxx_devlink_params[] = {
DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
"ATU_hash", DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
};
int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
{
return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
ARRAY_SIZE(mv88e6xxx_devlink_params));
}
void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
{
dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
ARRAY_SIZE(mv88e6xxx_devlink_params));
}
enum mv88e6xxx_devlink_resource_id {
MV88E6XXX_RESOURCE_ID_ATU,
MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
};
static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
u16 bin)
{
u16 occupancy = 0;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
bin);
if (err) {
dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
goto unlock;
}
err = mv88e6xxx_g1_atu_get_next(chip, 0);
if (err) {
dev_err(chip->dev, "failed to perform ATU get next\n");
goto unlock;
}
err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
if (err) {
dev_err(chip->dev, "failed to get ATU stats\n");
goto unlock;
}
occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
unlock:
mv88e6xxx_reg_unlock(chip);
return occupancy;
}
static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
{
struct mv88e6xxx_chip *chip = priv;
return mv88e6xxx_devlink_atu_bin_get(chip,
MV88E6XXX_G2_ATU_STATS_BIN_0);
}
static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
{
struct mv88e6xxx_chip *chip = priv;
return mv88e6xxx_devlink_atu_bin_get(chip,
MV88E6XXX_G2_ATU_STATS_BIN_1);
}
static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
{
struct mv88e6xxx_chip *chip = priv;
return mv88e6xxx_devlink_atu_bin_get(chip,
MV88E6XXX_G2_ATU_STATS_BIN_2);
}
static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
{
struct mv88e6xxx_chip *chip = priv;
return mv88e6xxx_devlink_atu_bin_get(chip,
MV88E6XXX_G2_ATU_STATS_BIN_3);
}
static u64 mv88e6xxx_devlink_atu_get(void *priv)
{
return mv88e6xxx_devlink_atu_bin_0_get(priv) +
mv88e6xxx_devlink_atu_bin_1_get(priv) +
mv88e6xxx_devlink_atu_bin_2_get(priv) +
mv88e6xxx_devlink_atu_bin_3_get(priv);
}
int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
{
struct devlink_resource_size_params size_params;
struct mv88e6xxx_chip *chip = ds->priv;
int err;
devlink_resource_size_params_init(&size_params,
mv88e6xxx_num_macs(chip),
mv88e6xxx_num_macs(chip),
1, DEVLINK_RESOURCE_UNIT_ENTRY);
err = dsa_devlink_resource_register(ds, "ATU",
mv88e6xxx_num_macs(chip),
MV88E6XXX_RESOURCE_ID_ATU,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&size_params);
if (err)
goto out;
devlink_resource_size_params_init(&size_params,
mv88e6xxx_num_macs(chip) / 4,
mv88e6xxx_num_macs(chip) / 4,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
err = dsa_devlink_resource_register(ds, "ATU_bin_0",
mv88e6xxx_num_macs(chip) / 4,
MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
MV88E6XXX_RESOURCE_ID_ATU,
&size_params);
if (err)
goto out;
err = dsa_devlink_resource_register(ds, "ATU_bin_1",
mv88e6xxx_num_macs(chip) / 4,
MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
MV88E6XXX_RESOURCE_ID_ATU,
&size_params);
if (err)
goto out;
err = dsa_devlink_resource_register(ds, "ATU_bin_2",
mv88e6xxx_num_macs(chip) / 4,
MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
MV88E6XXX_RESOURCE_ID_ATU,
&size_params);
if (err)
goto out;
err = dsa_devlink_resource_register(ds, "ATU_bin_3",
mv88e6xxx_num_macs(chip) / 4,
MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
MV88E6XXX_RESOURCE_ID_ATU,
&size_params);
if (err)
goto out;
dsa_devlink_resource_occ_get_register(ds,
MV88E6XXX_RESOURCE_ID_ATU,
mv88e6xxx_devlink_atu_get,
chip);
dsa_devlink_resource_occ_get_register(ds,
MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
mv88e6xxx_devlink_atu_bin_0_get,
chip);
dsa_devlink_resource_occ_get_register(ds,
MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
mv88e6xxx_devlink_atu_bin_1_get,
chip);
dsa_devlink_resource_occ_get_register(ds,
MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
mv88e6xxx_devlink_atu_bin_2_get,
chip);
dsa_devlink_resource_occ_get_register(ds,
MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
mv88e6xxx_devlink_atu_bin_3_get,
chip);
return 0;
out:
dsa_devlink_resources_unregister(ds);
return err;
}
static int mv88e6xxx_region_global_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct mv88e6xxx_region_priv *region_priv = ops->priv;
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct mv88e6xxx_chip *chip = ds->priv;
u16 *registers;
int i, err;
registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
if (!registers)
return -ENOMEM;
mv88e6xxx_reg_lock(chip);
for (i = 0; i < 32; i++) {
switch (region_priv->id) {
case MV88E6XXX_REGION_GLOBAL1:
err = mv88e6xxx_g1_read(chip, i, ®isters[i]);
break;
case MV88E6XXX_REGION_GLOBAL2:
err = mv88e6xxx_g2_read(chip, i, ®isters[i]);
break;
default:
err = -EOPNOTSUPP;
}
if (err) {
kfree(registers);
goto out;
}
}
*data = (u8 *)registers;
out:
mv88e6xxx_reg_unlock(chip);
return err;
}
/* The ATU entry varies between mv88e6xxx chipset generations. Define
* a generic format which covers all the current and hopefully future
* mv88e6xxx generations
*/
struct mv88e6xxx_devlink_atu_entry {
/* The FID is scattered over multiple registers. */
u16 fid;
u16 atu_op;
u16 atu_data;
u16 atu_01;
u16 atu_23;
u16 atu_45;
};
static int mv88e6xxx_region_atu_snapshot_fid(struct mv88e6xxx_chip *chip,
int fid,
struct mv88e6xxx_devlink_atu_entry *table,
int *count)
{
u16 atu_op, atu_data, atu_01, atu_23, atu_45;
struct mv88e6xxx_atu_entry addr;
int err;
addr.state = 0;
eth_broadcast_addr(addr.mac);
do {
err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
if (err)
return err;
if (!addr.state)
break;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &atu_op);
if (err)
return err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &atu_data);
if (err)
return err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01, &atu_01);
if (err)
return err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC23, &atu_23);
if (err)
return err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC45, &atu_45);
if (err)
return err;
table[*count].fid = fid;
table[*count].atu_op = atu_op;
table[*count].atu_data = atu_data;
table[*count].atu_01 = atu_01;
table[*count].atu_23 = atu_23;
table[*count].atu_45 = atu_45;
(*count)++;
} while (!is_broadcast_ether_addr(addr.mac));
return 0;
}
static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_devlink_atu_entry *table;
struct mv88e6xxx_chip *chip = ds->priv;
int fid = -1, count, err;
table = kmalloc_array(mv88e6xxx_num_databases(chip),
sizeof(struct mv88e6xxx_devlink_atu_entry),
GFP_KERNEL);
if (!table)
return -ENOMEM;
memset(table, 0, mv88e6xxx_num_databases(chip) *
sizeof(struct mv88e6xxx_devlink_atu_entry));
count = 0;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_fid_map(chip, fid_bitmap);
if (err) {
kfree(table);
goto out;
}
while (1) {
fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
if (fid == MV88E6XXX_N_FID)
break;
err = mv88e6xxx_region_atu_snapshot_fid(chip, fid, table,
&count);
if (err) {
kfree(table);
goto out;
}
}
*data = (u8 *)table;
out:
mv88e6xxx_reg_unlock(chip);
return err;
}
/**
* struct mv88e6xxx_devlink_vtu_entry - Devlink VTU entry
* @fid: Global1/2: FID and VLAN policy.
* @sid: Global1/3: SID, unknown filters and learning.
* @op: Global1/5: FID (old chipsets).
* @vid: Global1/6: VID, valid, and page.
* @data: Global1/7-9: Membership data and priority override.
* @resvd: Reserved. Also happens to align the size to 16B.
*
* The VTU entry format varies between chipset generations, the
* descriptions above represent the superset of all possible
* information, not all fields are valid on all devices. Since this is
* a low-level debug interface, copy all data verbatim and defer
* parsing to the consumer.
*/
struct mv88e6xxx_devlink_vtu_entry {
u16 fid;
u16 sid;
u16 op;
u16 vid;
u16 data[3];
u16 resvd;
};
static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct mv88e6xxx_devlink_vtu_entry *table, *entry;
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
int err;
table = kcalloc(mv88e6xxx_max_vid(chip) + 1,
sizeof(struct mv88e6xxx_devlink_vtu_entry),
GFP_KERNEL);
if (!table)
return -ENOMEM;
entry = table;
vlan.vid = mv88e6xxx_max_vid(chip);
vlan.valid = false;
mv88e6xxx_reg_lock(chip);
do {
err = mv88e6xxx_g1_vtu_getnext(chip, &vlan);
if (err)
break;
if (!vlan.valid)
break;
err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID,
&entry->fid);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
&entry->sid);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP,
&entry->op);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
&entry->vid);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
&entry->data[0]);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
&entry->data[1]);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
&entry->data[2]);
if (err)
break;
entry++;
} while (vlan.vid < mv88e6xxx_max_vid(chip));
mv88e6xxx_reg_unlock(chip);
if (err) {
kfree(table);
return err;
}
*data = (u8 *)table;
return 0;
}
/**
* struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
* @sid: Global1/3: SID, unknown filters and learning.
* @vid: Global1/6: Valid bit.
* @data: Global1/7-9: Membership data and priority override.
* @resvd: Reserved. In case we forgot something.
*
* The STU entry format varies between chipset generations. Peridot
* and Amethyst packs the STU data into Global1/7-8. Older silicon
* spreads the information across all three VTU data registers -
* inheriting the layout of even older hardware that had no STU at
* all. Since this is a low-level debug interface, copy all data
* verbatim and defer parsing to the consumer.
*/
struct mv88e6xxx_devlink_stu_entry {
u16 sid;
u16 vid;
u16 data[3];
u16 resvd;
};
static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct mv88e6xxx_devlink_stu_entry *table, *entry;
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_stu_entry stu;
int err;
table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
sizeof(struct mv88e6xxx_devlink_stu_entry),
GFP_KERNEL);
if (!table)
return -ENOMEM;
entry = table;
stu.sid = mv88e6xxx_max_sid(chip);
stu.valid = false;
mv88e6xxx_reg_lock(chip);
do {
err = mv88e6xxx_g1_stu_getnext(chip, &stu);
if (err)
break;
if (!stu.valid)
break;
err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
&entry->sid);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
&entry->vid);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
&entry->data[0]);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
&entry->data[1]);
err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
&entry->data[2]);
if (err)
break;
entry++;
} while (stu.sid < mv88e6xxx_max_sid(chip));
mv88e6xxx_reg_unlock(chip);
if (err) {
kfree(table);
return err;
}
*data = (u8 *)table;
return 0;
}
static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct mv88e6xxx_chip *chip = ds->priv;
int dev, port, err;
u16 *pvt, *cur;
pvt = kcalloc(MV88E6XXX_MAX_PVT_ENTRIES, sizeof(*pvt), GFP_KERNEL);
if (!pvt)
return -ENOMEM;
mv88e6xxx_reg_lock(chip);
cur = pvt;
for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; dev++) {
for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; port++) {
err = mv88e6xxx_g2_pvt_read(chip, dev, port, cur);
if (err)
break;
cur++;
}
}
mv88e6xxx_reg_unlock(chip);
if (err) {
kfree(pvt);
return err;
}
*data = (u8 *)pvt;
return 0;
}
static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
const struct devlink_port_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(devlink_port);
int port = dsa_devlink_port_to_port(devlink_port);
struct mv88e6xxx_chip *chip = ds->priv;
u16 *registers;
int i, err;
registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
if (!registers)
return -ENOMEM;
mv88e6xxx_reg_lock(chip);
for (i = 0; i < 32; i++) {
err = mv88e6xxx_port_read(chip, port, i, ®isters[i]);
if (err) {
kfree(registers);
goto out;
}
}
*data = (u8 *)registers;
out:
mv88e6xxx_reg_unlock(chip);
return err;
}
static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
.id = MV88E6XXX_REGION_GLOBAL1,
};
static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
.name = "global1",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
.priv = &mv88e6xxx_region_global1_priv,
};
static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
.id = MV88E6XXX_REGION_GLOBAL2,
};
static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
.name = "global2",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
.priv = &mv88e6xxx_region_global2_priv,
};
static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.name = "atu",
.snapshot = mv88e6xxx_region_atu_snapshot,
.destructor = kfree,
};
static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.name = "vtu",
.snapshot = mv88e6xxx_region_vtu_snapshot,
.destructor = kfree,
};
static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
.name = "stu",
.snapshot = mv88e6xxx_region_stu_snapshot,
.destructor = kfree,
};
static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
.destructor = kfree,
};
static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
.name = "port",
.snapshot = mv88e6xxx_region_port_snapshot,
.destructor = kfree,
};
struct mv88e6xxx_region {
struct devlink_region_ops *ops;
u64 size;
bool (*cond)(struct mv88e6xxx_chip *chip);
};
static struct mv88e6xxx_region mv88e6xxx_regions[] = {
[MV88E6XXX_REGION_GLOBAL1] = {
.ops = &mv88e6xxx_region_global1_ops,
.size = 32 * sizeof(u16)
},
[MV88E6XXX_REGION_GLOBAL2] = {
.ops = &mv88e6xxx_region_global2_ops,
.size = 32 * sizeof(u16) },
[MV88E6XXX_REGION_ATU] = {
.ops = &mv88e6xxx_region_atu_ops
/* calculated at runtime */
},
[MV88E6XXX_REGION_VTU] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
[MV88E6XXX_REGION_STU] = {
.ops = &mv88e6xxx_region_stu_ops,
.cond = mv88e6xxx_has_stu,
/* calculated at runtime */
},
[MV88E6XXX_REGION_PVT] = {
.ops = &mv88e6xxx_region_pvt_ops,
.size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
.cond = mv88e6xxx_has_pvt,
},
};
void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
dsa_devlink_region_destroy(chip->regions[i]);
}
void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
dsa_devlink_region_destroy(chip->ports[port].region);
}
int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region *region;
region = dsa_devlink_port_region_create(ds,
port,
&mv88e6xxx_region_port_ops, 1,
32 * sizeof(u16));
if (IS_ERR(region))
return PTR_ERR(region);
chip->ports[port].region = region;
return 0;
}
int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int i, j;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) {
ops = mv88e6xxx_regions[i].ops;
size = mv88e6xxx_regions[i].size;
cond = mv88e6xxx_regions[i].cond;
if (cond && !cond(chip))
continue;
switch (i) {
case MV88E6XXX_REGION_ATU:
size = mv88e6xxx_num_databases(chip) *
sizeof(struct mv88e6xxx_devlink_atu_entry);
break;
case MV88E6XXX_REGION_VTU:
size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
case MV88E6XXX_REGION_STU:
size = (mv88e6xxx_max_sid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_stu_entry);
break;
}
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region))
goto out;
chip->regions[i] = region;
}
return 0;
out:
for (j = 0; j < i; j++)
dsa_devlink_region_destroy(chip->regions[j]);
return PTR_ERR(region);
}
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
chip->info->name);
}
| linux-master | drivers/net/dsa/mv88e6xxx/devlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch hardware timestamping support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 National Instruments
* Erik Hons <[email protected]>
* Brandon Streiff <[email protected]>
* Dane Wagner <[email protected]>
*/
#include "chip.h"
#include "global2.h"
#include "hwtstamp.h"
#include "ptp.h"
#include <linux/ptp_classify.h>
#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
static int mv88e6xxx_port_ptp_read(struct mv88e6xxx_chip *chip, int port,
int addr, u16 *data, int len)
{
if (!chip->info->ops->avb_ops->port_ptp_read)
return -EOPNOTSUPP;
return chip->info->ops->avb_ops->port_ptp_read(chip, port, addr,
data, len);
}
static int mv88e6xxx_port_ptp_write(struct mv88e6xxx_chip *chip, int port,
int addr, u16 data)
{
if (!chip->info->ops->avb_ops->port_ptp_write)
return -EOPNOTSUPP;
return chip->info->ops->avb_ops->port_ptp_write(chip, port, addr,
data);
}
static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr,
u16 data)
{
if (!chip->info->ops->avb_ops->ptp_write)
return -EOPNOTSUPP;
return chip->info->ops->avb_ops->ptp_write(chip, addr, data);
}
static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data)
{
if (!chip->info->ops->avb_ops->ptp_read)
return -EOPNOTSUPP;
return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1);
}
/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
* timestamp. When working properly, hardware will produce a timestamp
* within 1ms. Software may enounter delays due to MDIO contention, so
* the timeout is set accordingly.
*/
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
const struct mv88e6xxx_ptp_ops *ptp_ops;
struct mv88e6xxx_chip *chip;
chip = ds->priv;
ptp_ops = chip->info->ops->ptp_ops;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = ptp_clock_index(chip->ptp_clock);
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = ptp_ops->rx_filters;
return 0;
}
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
struct hwtstamp_config *config)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
bool tstamp_enable = false;
/* Prevent the TX/RX paths from trying to interact with the
* timestamp hardware while we reconfigure it.
*/
clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_enable = false;
break;
case HWTSTAMP_TX_ON:
tstamp_enable = true;
break;
default:
return -ERANGE;
}
/* The switch supports timestamping both L2 and L4; one cannot be
* disabled independently of the other.
*/
if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) {
config->rx_filter = HWTSTAMP_FILTER_NONE;
dev_dbg(chip->dev, "Unsupported rx_filter %d\n",
config->rx_filter);
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tstamp_enable = false;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_ALL:
default:
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
mv88e6xxx_reg_lock(chip);
if (tstamp_enable) {
chip->enable_count += 1;
if (chip->enable_count == 1 && ptp_ops->global_enable)
ptp_ops->global_enable(chip);
if (ptp_ops->port_enable)
ptp_ops->port_enable(chip, port);
} else {
if (ptp_ops->port_disable)
ptp_ops->port_disable(chip, port);
chip->enable_count -= 1;
if (chip->enable_count == 0 && ptp_ops->global_disable)
ptp_ops->global_disable(chip);
}
mv88e6xxx_reg_unlock(chip);
/* Once hardware has been configured, enable timestamp checks
* in the RX/TX paths.
*/
if (tstamp_enable)
set_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
return 0;
}
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
struct hwtstamp_config config;
int err;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
if (err)
return err;
/* Save the chosen configuration to be returned later. */
memcpy(&ps->tstamp_config, &config, sizeof(config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
struct hwtstamp_config *config = &ps->tstamp_config;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp,
* or NULL if the caller should not.
*/
static struct ptp_header *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip,
int port, struct sk_buff *skb,
unsigned int type)
{
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
struct ptp_header *hdr;
if (!chip->info->ptp_support)
return NULL;
hdr = ptp_parse_header(skb, type);
if (!hdr)
return NULL;
if (!test_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state))
return NULL;
return hdr;
}
static int mv88e6xxx_ts_valid(u16 status)
{
if (!(status & MV88E6XXX_PTP_TS_VALID))
return 0;
if (status & MV88E6XXX_PTP_TS_STATUS_MASK)
return 0;
return 1;
}
static int seq_match(struct sk_buff *skb, u16 ts_seqid)
{
unsigned int type = SKB_PTP_TYPE(skb);
struct ptp_header *hdr;
hdr = ptp_parse_header(skb, type);
return ts_seqid == ntohs(hdr->sequence_id);
}
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps,
struct sk_buff *skb, u16 reg,
struct sk_buff_head *rxq)
{
u16 buf[4] = { 0 }, status, seq_id;
struct skb_shared_hwtstamps *shwt;
struct sk_buff_head received;
u64 ns, timelo, timehi;
unsigned long flags;
int err;
/* The latched timestamp belongs to one of the received frames. */
__skb_queue_head_init(&received);
spin_lock_irqsave(&rxq->lock, flags);
skb_queue_splice_tail_init(rxq, &received);
spin_unlock_irqrestore(&rxq->lock, flags);
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
reg, buf, ARRAY_SIZE(buf));
mv88e6xxx_reg_unlock(chip);
if (err)
pr_err("failed to get the receive time stamp\n");
status = buf[0];
timelo = buf[1];
timehi = buf[2];
seq_id = buf[3];
if (status & MV88E6XXX_PTP_TS_VALID) {
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
pr_err("failed to clear the receive status\n");
}
/* Since the device can only handle one time stamp at a time,
* we purge any extra frames from the queue.
*/
for ( ; skb; skb = __skb_dequeue(&received)) {
if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
ns = timehi << 16 | timelo;
mv88e6xxx_reg_lock(chip);
ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
mv88e6xxx_reg_unlock(chip);
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
status &= ~MV88E6XXX_PTP_TS_VALID;
}
netif_rx(skb);
}
}
static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct sk_buff *skb;
skb = skb_dequeue(&ps->rx_queue);
if (skb)
mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg,
&ps->rx_queue);
skb = skb_dequeue(&ps->rx_queue2);
if (skb)
mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg,
&ps->rx_queue2);
}
static int is_pdelay_resp(const struct ptp_header *hdr)
{
return (hdr->tsmt & 0xf) == 3;
}
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct mv88e6xxx_port_hwtstamp *ps;
struct mv88e6xxx_chip *chip;
struct ptp_header *hdr;
chip = ds->priv;
ps = &chip->port_hwtstamp[port];
if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
return false;
hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
if (!hdr)
return false;
SKB_PTP_TYPE(skb) = type;
if (is_pdelay_resp(hdr))
skb_queue_tail(&ps->rx_queue2, skb);
else
skb_queue_tail(&ps->rx_queue, skb);
ptp_schedule_worker(chip->ptp_clock, 0);
return true;
}
static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct skb_shared_hwtstamps shhwtstamps;
u16 departure_block[4], status;
struct sk_buff *tmp_skb;
u32 time_raw;
int err;
u64 ns;
if (!ps->tx_skb)
return 0;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
ptp_ops->dep_sts_reg,
departure_block,
ARRAY_SIZE(departure_block));
mv88e6xxx_reg_unlock(chip);
if (err)
goto free_and_clear_skb;
if (!(departure_block[0] & MV88E6XXX_PTP_TS_VALID)) {
if (time_is_before_jiffies(ps->tx_tstamp_start +
TX_TSTAMP_TIMEOUT)) {
dev_warn(chip->dev, "p%d: clearing tx timestamp hang\n",
ps->port_id);
goto free_and_clear_skb;
}
/* The timestamp should be available quickly, while getting it
* is high priority and time bounded to only 10ms. A poll is
* warranted so restart the work.
*/
return 1;
}
/* We have the timestamp; go ahead and clear valid now */
mv88e6xxx_reg_lock(chip);
mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
mv88e6xxx_reg_unlock(chip);
status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
if (status != MV88E6XXX_PTP_TS_STATUS_NORMAL) {
dev_warn(chip->dev, "p%d: tx timestamp overrun\n", ps->port_id);
goto free_and_clear_skb;
}
if (departure_block[3] != ps->tx_seq_id) {
dev_warn(chip->dev, "p%d: unexpected seq. id\n", ps->port_id);
goto free_and_clear_skb;
}
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
time_raw = ((u32)departure_block[2] << 16) | departure_block[1];
mv88e6xxx_reg_lock(chip);
ns = timecounter_cyc2time(&chip->tstamp_tc, time_raw);
mv88e6xxx_reg_unlock(chip);
shhwtstamps.hwtstamp = ns_to_ktime(ns);
dev_dbg(chip->dev,
"p%d: txtstamp %llx status 0x%04x skb ID 0x%04x hw ID 0x%04x\n",
ps->port_id, ktime_to_ns(shhwtstamps.hwtstamp),
departure_block[0], ps->tx_seq_id, departure_block[3]);
/* skb_complete_tx_timestamp() will free up the client to make
* another timestamp-able transmit. We have to be ready for it
* -- by clearing the ps->tx_skb "flag" -- beforehand.
*/
tmp_skb = ps->tx_skb;
ps->tx_skb = NULL;
clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
return 0;
free_and_clear_skb:
dev_kfree_skb_any(ps->tx_skb);
ps->tx_skb = NULL;
clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
return 0;
}
long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
struct dsa_switch *ds = chip->ds;
struct mv88e6xxx_port_hwtstamp *ps;
int i, restart = 0;
for (i = 0; i < ds->num_ports; i++) {
if (!dsa_is_user_port(ds, i))
continue;
ps = &chip->port_hwtstamp[i];
if (test_bit(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
restart |= mv88e6xxx_txtstamp_work(chip, ps);
mv88e6xxx_rxtstamp_work(chip, ps);
}
return restart ? 1 : -1;
}
void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
struct ptp_header *hdr;
struct sk_buff *clone;
unsigned int type;
type = ptp_classify_raw(skb);
if (type == PTP_CLASS_NONE)
return;
hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
if (!hdr)
return;
clone = skb_clone_sk(skb);
if (!clone)
return;
if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
&ps->state)) {
kfree_skb(clone);
return;
}
ps->tx_skb = clone;
ps->tx_tstamp_start = jiffies;
ps->tx_seq_id = be16_to_cpu(hdr->sequence_id);
ptp_schedule_worker(chip->ptp_clock, 0);
}
int mv88e6165_global_disable(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
if (err)
return err;
val |= MV88E6165_PTP_CFG_DISABLE_PTP;
return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
}
int mv88e6165_global_enable(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
if (err)
return err;
val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK);
return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
}
int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
}
int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH);
}
static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
ps->port_id = port;
skb_queue_head_init(&ps->rx_queue);
skb_queue_head_init(&ps->rx_queue2);
if (ptp_ops->port_disable)
return ptp_ops->port_disable(chip, port);
return 0;
}
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int err;
int i;
/* Disable timestamping on all ports. */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
err = mv88e6xxx_hwtstamp_port_setup(chip, i);
if (err)
return err;
}
/* Disable PTP globally */
if (ptp_ops->global_disable) {
err = ptp_ops->global_disable(chip);
if (err)
return err;
}
/* Set the ethertype of L2 PTP messages */
err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
if (err)
return err;
/* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to
* timestamp. This affects all ports that have timestamping enabled,
* but the timestamp config is per-port; thus we configure all events
* here and only support the HWTSTAMP_FILTER_*_EVENT filter types.
*/
err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_MSGTYPE,
MV88E6XXX_PTP_MSGTYPE_ALL_EVENT);
if (err)
return err;
/* Use ARRIVAL1 for peer delay response messages. */
err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_TS_ARRIVAL_PTR,
MV88E6XXX_PTP_MSGTYPE_PDLAY_RES);
if (err)
return err;
/* 88E6341 devices default to timestamping at the PHY, but this has
* a hardware issue that results in unreliable timestamps. Force
* these devices to timestamp at the MAC.
*/
if (chip->info->family == MV88E6XXX_FAMILY_6341) {
u16 val = MV88E6341_PTP_CFG_UPDATE |
MV88E6341_PTP_CFG_MODE_IDX |
MV88E6341_PTP_CFG_MODE_TS_AT_MAC;
err = mv88e6xxx_ptp_write(chip, MV88E6341_PTP_CFG, val);
if (err)
return err;
}
return 0;
}
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip)
{
}
| linux-master | drivers/net/dsa/mv88e6xxx/hwtstamp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch PTP support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2017 National Instruments
* Erik Hons <[email protected]>
* Brandon Streiff <[email protected]>
* Dane Wagner <[email protected]>
*/
#include "chip.h"
#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
#include "ptp.h"
#define MV88E6XXX_MAX_ADJ_PPB 1000000
/* Family MV88E6250:
* Raw timestamps are in units of 10-ns clock periods.
*
* clkadj = scaled_ppm * 10*2^28 / (10^6 * 2^16)
* simplifies to
* clkadj = scaled_ppm * 2^7 / 5^5
*/
#define MV88E6250_CC_SHIFT 28
#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
#define MV88E6250_CC_MULT_NUM (1 << 7)
#define MV88E6250_CC_MULT_DEM 3125ULL
/* Other families:
* Raw timestamps are in units of 8-ns clock periods.
*
* clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
* simplifies to
* clkadj = scaled_ppm * 2^9 / 5^6
*/
#define MV88E6XXX_CC_SHIFT 28
#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
#define MV88E6XXX_CC_MULT_NUM (1 << 9)
#define MV88E6XXX_CC_MULT_DEM 15625ULL
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
#define cc_to_chip(cc) container_of(cc, struct mv88e6xxx_chip, tstamp_cc)
#define dw_overflow_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \
overflow_work)
#define dw_tai_event_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \
tai_event_work)
static int mv88e6xxx_tai_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data, int len)
{
if (!chip->info->ops->avb_ops->tai_read)
return -EOPNOTSUPP;
return chip->info->ops->avb_ops->tai_read(chip, addr, data, len);
}
static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data)
{
if (!chip->info->ops->avb_ops->tai_write)
return -EOPNOTSUPP;
return chip->info->ops->avb_ops->tai_write(chip, addr, data);
}
/* TODO: places where this are called should be using pinctrl */
static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
int func, int input)
{
int err;
if (!chip->info->ops->gpio_ops)
return -EOPNOTSUPP;
err = chip->info->ops->gpio_ops->set_dir(chip, pin, input);
if (err)
return err;
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
}
static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
else
return ((u32)phc_time[1] << 16) | phc_time[0];
}
static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
else
return ((u32)phc_time[1] << 16) | phc_time[0];
}
/* mv88e6352_config_eventcap - configure TAI event capture
* @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
int rising)
{
u16 global_config;
u16 cap_config;
int err;
chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
MV88E6XXX_TAI_CFG_CAP_CTR_START;
if (!rising)
chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
global_config = (chip->evcap_config | chip->trig_config);
err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
if (err)
return err;
if (event == PTP_CLOCK_PPS) {
cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
} else if (event == PTP_CLOCK_EXTTS) {
/* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
cap_config = 0;
} else {
return -EINVAL;
}
/* Write the capture config; this also clears the capture counter */
err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
cap_config);
return err;
}
static void mv88e6352_tai_event_work(struct work_struct *ugly)
{
struct delayed_work *dw = to_delayed_work(ugly);
struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw);
struct ptp_clock_event ev;
u16 status[4];
u32 raw_ts;
int err;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
mv88e6xxx_reg_unlock(chip);
if (err) {
dev_err(chip->dev, "failed to read TAI status register\n");
return;
}
if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
dev_warn(chip->dev, "missed event capture\n");
return;
}
if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
goto out;
raw_ts = ((u32)status[2] << 16) | status[1];
/* Clear the valid bit so the next timestamp can come in */
status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
mv88e6xxx_reg_unlock(chip);
/* This is an external timestamp */
ev.type = PTP_CLOCK_EXTTS;
/* We only have one timestamping channel. */
ev.index = 0;
mv88e6xxx_reg_lock(chip);
ev.timestamp = timecounter_cyc2time(&chip->tstamp_tc, raw_ts);
mv88e6xxx_reg_unlock(chip);
ptp_clock_event(chip->ptp_clock, &ev);
out:
schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL);
}
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int neg_adj = 0;
u32 diff, mult;
u64 adj;
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
mult = ptp_ops->cc_mult;
adj = ptp_ops->cc_mult_num;
adj *= scaled_ppm;
diff = div_u64(adj, ptp_ops->cc_mult_dem);
mv88e6xxx_reg_lock(chip);
timecounter_read(&chip->tstamp_tc);
chip->tstamp_cc.mult = neg_adj ? mult - diff : mult + diff;
mv88e6xxx_reg_unlock(chip);
return 0;
}
static int mv88e6xxx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
mv88e6xxx_reg_lock(chip);
timecounter_adjtime(&chip->tstamp_tc, delta);
mv88e6xxx_reg_unlock(chip);
return 0;
}
static int mv88e6xxx_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
u64 ns;
mv88e6xxx_reg_lock(chip);
ns = timecounter_read(&chip->tstamp_tc);
mv88e6xxx_reg_unlock(chip);
*ts = ns_to_timespec64(ns);
return 0;
}
static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
u64 ns;
ns = timespec64_to_ns(ts);
mv88e6xxx_reg_lock(chip);
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ns);
mv88e6xxx_reg_unlock(chip);
return 0;
}
static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
struct ptp_clock_request *rq, int on)
{
int rising = (rq->extts.flags & PTP_RISING_EDGE);
int func;
int pin;
int err;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
mv88e6xxx_reg_lock(chip);
if (on) {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
err = mv88e6352_set_gpio_func(chip, pin, func, true);
if (err)
goto out;
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
err = mv88e6352_set_gpio_func(chip, pin, func, true);
cancel_delayed_work_sync(&chip->tai_event_work);
}
out:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
return mv88e6352_ptp_enable_extts(chip, rq, on);
default:
return -EOPNOTSUPP;
}
}
static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
break;
case PTP_PF_PEROUT:
case PTP_PF_PHYSYNC:
return -EOPNOTSUPP;
}
return 0;
}
const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
.clock_read = mv88e6165_ptp_clock_read,
.global_enable = mv88e6165_global_enable,
.global_disable = mv88e6165_global_disable,
.arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
.arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
.dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
.rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
.cc_shift = MV88E6XXX_CC_SHIFT,
.cc_mult = MV88E6XXX_CC_MULT,
.cc_mult_num = MV88E6XXX_CC_MULT_NUM,
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
.event_work = mv88e6352_tai_event_work,
.port_enable = mv88e6352_hwtstamp_port_enable,
.port_disable = mv88e6352_hwtstamp_port_disable,
.n_ext_ts = 1,
.arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
.arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
.dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
.rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
.cc_shift = MV88E6250_CC_SHIFT,
.cc_mult = MV88E6250_CC_MULT,
.cc_mult_num = MV88E6250_CC_MULT_NUM,
.cc_mult_dem = MV88E6250_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
.event_work = mv88e6352_tai_event_work,
.port_enable = mv88e6352_hwtstamp_port_enable,
.port_disable = mv88e6352_hwtstamp_port_disable,
.n_ext_ts = 1,
.arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
.arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
.dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
.rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
.cc_shift = MV88E6XXX_CC_SHIFT,
.cc_mult = MV88E6XXX_CC_MULT,
.cc_mult_num = MV88E6XXX_CC_MULT_NUM,
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
.event_work = mv88e6352_tai_event_work,
.port_enable = mv88e6352_hwtstamp_port_enable,
.port_disable = mv88e6352_hwtstamp_port_disable,
.set_ptp_cpu_port = mv88e6390_g1_set_ptp_cpu_port,
.n_ext_ts = 1,
.arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
.arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
.dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
.rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
.cc_shift = MV88E6XXX_CC_SHIFT,
.cc_mult = MV88E6XXX_CC_MULT,
.cc_mult_num = MV88E6XXX_CC_MULT_NUM,
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
if (chip->info->ops->ptp_ops->clock_read)
return chip->info->ops->ptp_ops->clock_read(cc);
return 0;
}
/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
* seconds; this task forces periodic reads so that we don't miss any.
*/
#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct mv88e6xxx_chip *chip = dw_overflow_to_chip(dw);
struct timespec64 ts;
mv88e6xxx_ptp_gettime(&chip->ptp_clock_info, &ts);
schedule_delayed_work(&chip->overflow_work,
MV88E6XXX_TAI_OVERFLOW_PERIOD);
}
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int i;
/* Set up the cycle counter */
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
chip->tstamp_cc.mult = ptp_ops->cc_mult;
chip->tstamp_cc.shift = ptp_ops->cc_shift;
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
ktime_to_ns(ktime_get_real()));
INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check);
if (ptp_ops->event_work)
INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work);
chip->ptp_clock_info.owner = THIS_MODULE;
snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
"%s", dev_name(chip->dev));
chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
chip->ptp_clock_info.n_per_out = 0;
chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip);
chip->ptp_clock_info.pps = 0;
for (i = 0; i < chip->ptp_clock_info.n_pins; ++i) {
struct ptp_pin_desc *ppd = &chip->pin_config[i];
snprintf(ppd->name, sizeof(ppd->name), "mv88e6xxx_gpio%d", i);
ppd->index = i;
ppd->func = PTP_PF_NONE;
}
chip->ptp_clock_info.pin_config = chip->pin_config;
chip->ptp_clock_info.max_adj = MV88E6XXX_MAX_ADJ_PPB;
chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine;
chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime;
chip->ptp_clock_info.enable = ptp_ops->ptp_enable;
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
if (ptp_ops->set_ptp_cpu_port) {
struct dsa_port *dp;
int upstream = 0;
int err;
dsa_switch_for_each_user_port(dp, chip->ds) {
upstream = dsa_upstream_port(chip->ds, dp->index);
break;
}
err = ptp_ops->set_ptp_cpu_port(chip, upstream);
if (err) {
dev_err(chip->dev, "Failed to set PTP CPU destination port!\n");
return err;
}
}
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
if (IS_ERR(chip->ptp_clock))
return PTR_ERR(chip->ptp_clock);
schedule_delayed_work(&chip->overflow_work,
MV88E6XXX_TAI_OVERFLOW_PERIOD);
return 0;
}
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
cancel_delayed_work_sync(&chip->overflow_work);
if (chip->info->ops->ptp_ops->event_work)
cancel_delayed_work_sync(&chip->tai_event_work);
ptp_clock_unregister(chip->ptp_clock);
chip->ptp_clock = NULL;
}
}
| linux-master | drivers/net/dsa/mv88e6xxx/ptp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch Global 2 Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2016-2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*
* Copyright (c) 2017 National Instruments
* Brandon Streiff <[email protected]>
*/
#include <linux/bitfield.h>
#include "global2.h"
/* Offset 0x16: AVB Command Register
* Offset 0x17: AVB Data Register
*
* There are two different versions of this register interface:
* "6352": 3-bit "op" field, 4-bit "port" field.
* "6390": 2-bit "op" field, 5-bit "port" field.
*
* The "op" codes are different between the two, as well as the special
* port fields for global PTP and TAI configuration.
*/
/* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words.
* The hardware supports snapshotting up to four contiguous registers.
*/
static int mv88e6xxx_g2_avb_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6352_G2_AVB_CMD_BUSY);
return mv88e6xxx_g2_wait_bit(chip, MV88E6352_G2_AVB_CMD, bit, 0);
}
static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop,
u16 *data, int len)
{
int err;
int i;
err = mv88e6xxx_g2_avb_wait(chip);
if (err)
return err;
/* Hardware can only snapshot four words. */
if (len > 4)
return -E2BIG;
err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
MV88E6352_G2_AVB_CMD_BUSY | readop);
if (err)
return err;
err = mv88e6xxx_g2_avb_wait(chip);
if (err)
return err;
for (i = 0; i < len; ++i) {
err = mv88e6xxx_g2_read(chip, MV88E6352_G2_AVB_DATA,
&data[i]);
if (err)
return err;
}
return 0;
}
/* mv88e6xxx_g2_avb_write -- Write one 16-bit word. */
static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop,
u16 data)
{
int err;
err = mv88e6xxx_g2_avb_wait(chip);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data);
if (err)
return err;
err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
MV88E6352_G2_AVB_CMD_BUSY | writeop);
return mv88e6xxx_g2_avb_wait(chip);
}
static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
int port, int addr, u16 *data,
int len)
{
u16 readop = (len == 1 ? MV88E6352_G2_AVB_CMD_OP_READ :
MV88E6352_G2_AVB_CMD_OP_READ_INCR) |
(port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) |
addr;
return mv88e6xxx_g2_avb_read(chip, readop, data, len);
}
static int mv88e6352_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip,
int port, int addr, u16 data)
{
u16 writeop = MV88E6352_G2_AVB_CMD_OP_WRITE | (port << 8) |
(MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr;
return mv88e6xxx_g2_avb_write(chip, writeop, data);
}
static int mv88e6352_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data, int len)
{
return mv88e6352_g2_avb_port_ptp_read(chip,
MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL,
addr, data, len);
}
static int mv88e6352_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr,
u16 data)
{
return mv88e6352_g2_avb_port_ptp_write(chip,
MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL,
addr, data);
}
static int mv88e6352_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data, int len)
{
return mv88e6352_g2_avb_port_ptp_read(chip,
MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL,
addr, data, len);
}
static int mv88e6352_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
u16 data)
{
return mv88e6352_g2_avb_port_ptp_write(chip,
MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL,
addr, data);
}
const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {
.port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
.port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
.ptp_read = mv88e6352_g2_avb_ptp_read,
.ptp_write = mv88e6352_g2_avb_ptp_write,
.tai_read = mv88e6352_g2_avb_tai_read,
.tai_write = mv88e6352_g2_avb_tai_write,
};
static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data, int len)
{
return mv88e6352_g2_avb_port_ptp_read(chip,
MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
addr, data, len);
}
static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
u16 data)
{
return mv88e6352_g2_avb_port_ptp_write(chip,
MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
addr, data);
}
const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {
.port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
.port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
.ptp_read = mv88e6352_g2_avb_ptp_read,
.ptp_write = mv88e6352_g2_avb_ptp_write,
.tai_read = mv88e6165_g2_avb_tai_read,
.tai_write = mv88e6165_g2_avb_tai_write,
};
static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
int port, int addr, u16 *data,
int len)
{
u16 readop = (len == 1 ? MV88E6390_G2_AVB_CMD_OP_READ :
MV88E6390_G2_AVB_CMD_OP_READ_INCR) |
(port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) |
addr;
return mv88e6xxx_g2_avb_read(chip, readop, data, len);
}
static int mv88e6390_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip,
int port, int addr, u16 data)
{
u16 writeop = MV88E6390_G2_AVB_CMD_OP_WRITE | (port << 8) |
(MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr;
return mv88e6xxx_g2_avb_write(chip, writeop, data);
}
static int mv88e6390_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data, int len)
{
return mv88e6390_g2_avb_port_ptp_read(chip,
MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL,
addr, data, len);
}
static int mv88e6390_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr,
u16 data)
{
return mv88e6390_g2_avb_port_ptp_write(chip,
MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL,
addr, data);
}
static int mv88e6390_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
u16 *data, int len)
{
return mv88e6390_g2_avb_port_ptp_read(chip,
MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL,
addr, data, len);
}
static int mv88e6390_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
u16 data)
{
return mv88e6390_g2_avb_port_ptp_write(chip,
MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL,
addr, data);
}
const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {
.port_ptp_read = mv88e6390_g2_avb_port_ptp_read,
.port_ptp_write = mv88e6390_g2_avb_port_ptp_write,
.ptp_read = mv88e6390_g2_avb_ptp_read,
.ptp_write = mv88e6390_g2_avb_ptp_write,
.tai_read = mv88e6390_g2_avb_tai_read,
.tai_write = mv88e6390_g2_avb_tai_write,
};
| linux-master | drivers/net/dsa/mv88e6xxx/global2_avb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch Hidden Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2019 Andrew Lunn <[email protected]>
*/
#include <linux/bitfield.h>
#include "chip.h"
#include "port.h"
/* The mv88e6390 and mv88e6341 have some hidden registers used for debug and
* development. The errata also makes use of them.
*/
int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
int port, int reg, u16 val)
{
u16 ctrl;
int err;
err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT,
MV88E6XXX_PORT_RESERVED_1A, val);
if (err)
return err;
ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY |
MV88E6XXX_PORT_RESERVED_1A_WRITE |
block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT |
reg;
return mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
MV88E6XXX_PORT_RESERVED_1A, ctrl);
}
int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
return mv88e6xxx_port_wait_bit(chip,
MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
int reg, u16 *val)
{
u16 ctrl;
int err;
ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY |
MV88E6XXX_PORT_RESERVED_1A_READ |
block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT |
reg;
err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
MV88E6XXX_PORT_RESERVED_1A, ctrl);
if (err)
return err;
err = mv88e6xxx_port_hidden_wait(chip);
if (err)
return err;
return mv88e6xxx_port_read(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT,
MV88E6XXX_PORT_RESERVED_1A, val);
}
| linux-master | drivers/net/dsa/mv88e6xxx/port_hidden.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Marvell 88E6xxx Switch Global (1) Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
* Copyright (c) 2016-2017 Savoir-faire Linux Inc.
* Vivien Didelot <[email protected]>
*/
#include <linux/bitfield.h>
#include "chip.h"
#include "global1.h"
int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
{
int addr = chip->info->global1_addr;
return mv88e6xxx_read(chip, addr, reg, val);
}
int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
{
int addr = chip->info->global1_addr;
return mv88e6xxx_write(chip, addr, reg, val);
}
int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
bit, int val)
{
return mv88e6xxx_wait_bit(chip, chip->info->global1_addr, reg,
bit, val);
}
int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
u16 mask, u16 val)
{
return mv88e6xxx_wait_mask(chip, chip->info->global1_addr, reg,
mask, val);
}
/* Offset 0x00: Switch Global Status Register */
static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
MV88E6185_G1_STS_PPU_STATE_MASK,
MV88E6185_G1_STS_PPU_STATE_DISABLED);
}
static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
MV88E6185_G1_STS_PPU_STATE_MASK,
MV88E6185_G1_STS_PPU_STATE_POLLING);
}
static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6352_G1_STS_PPU_STATE);
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G1_STS_INIT_READY);
/* Wait up to 1 second for the switch to be ready. The InitReady bit 11
* is set to a one when all units inside the device (ATU, VTU, etc.)
* have finished their initialization and are ready to accept frames.
*/
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
{
const unsigned long timeout = jiffies + 1 * HZ;
u16 val;
int err;
/* Wait up to 1 second for the switch to finish reading the
* EEPROM.
*/
while (time_before(jiffies, timeout)) {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
if (err) {
dev_err(chip->dev, "Error reading status");
return;
}
/* If the switch is still resetting, it may not
* respond on the bus, and so MDIO read returns
* 0xffff. Differentiate between that, and waiting for
* the EEPROM to be done by bit 0 being set.
*/
if (val != 0xffff &&
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
return;
usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout waiting for EEPROM done");
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
*/
int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
u16 reg;
int err;
reg = (addr[0] << 8) | addr[1];
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_01, reg);
if (err)
return err;
reg = (addr[2] << 8) | addr[3];
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_23, reg);
if (err)
return err;
reg = (addr[4] << 8) | addr[5];
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_45, reg);
if (err)
return err;
return 0;
}
/* Offset 0x04: Switch Global Control Register */
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
/* Set the SWReset bit 15 along with the PPUEn bit 14, to also restart
* the PPU, including re-doing PHY detection and initialization
*/
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
val |= MV88E6XXX_G1_CTL1_SW_RESET;
val |= MV88E6XXX_G1_CTL1_PPU_ENABLE;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
err = mv88e6xxx_g1_wait_init_ready(chip);
if (err)
return err;
return mv88e6185_g1_wait_ppu_polling(chip);
}
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
/* Set the SWReset bit 15 */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
val |= MV88E6XXX_G1_CTL1_SW_RESET;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
return mv88e6xxx_g1_wait_init_ready(chip);
}
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6250_g1_reset(chip);
if (err)
return err;
return mv88e6352_g1_wait_ppu_polling(chip);
}
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
val |= MV88E6XXX_G1_CTL1_PPU_ENABLE;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
return mv88e6185_g1_wait_ppu_polling(chip);
}
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
val &= ~MV88E6XXX_G1_CTL1_PPU_ENABLE;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
return mv88e6185_g1_wait_ppu_disabled(chip);
}
int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
{
u16 val;
int err;
mtu += ETH_HLEN + ETH_FCS_LEN;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
val &= ~MV88E6185_G1_CTL1_MAX_FRAME_1632;
if (mtu > 1518)
val |= MV88E6185_G1_CTL1_MAX_FRAME_1632;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
}
/* Offset 0x10: IP-PRI Mapping Register 0
* Offset 0x11: IP-PRI Mapping Register 1
* Offset 0x12: IP-PRI Mapping Register 2
* Offset 0x13: IP-PRI Mapping Register 3
* Offset 0x14: IP-PRI Mapping Register 4
* Offset 0x15: IP-PRI Mapping Register 5
* Offset 0x16: IP-PRI Mapping Register 6
* Offset 0x17: IP-PRI Mapping Register 7
*/
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip)
{
int err;
/* Reset the IP TOS/DiffServ/Traffic priorities to defaults */
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
if (err)
return err;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
if (err)
return err;
return 0;
}
/* Offset 0x18: IEEE-PRI Register */
int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
{
/* Reset the IEEE Tag priorities to defaults */
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
}
int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
{
/* Reset the IEEE Tag priorities to defaults */
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa50);
}
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
{
u16 reg;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6185_G1_MONITOR_CTL, ®);
if (err)
return err;
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
break;
default:
return -EINVAL;
}
return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
}
/* Older generations also call this the ARP destination. It has been
* generalized in more modern devices such that more than ARP can
* egress it
*/
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 reg;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6185_G1_MONITOR_CTL, ®);
if (err)
return err;
reg &= ~MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK;
reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK);
return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
}
static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
u16 pointer, u8 data)
{
u16 reg;
reg = MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE | pointer | data;
return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg);
}
int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
{
u16 ptr;
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
break;
default:
return -EINVAL;
}
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
/* Use the default high priority for management frames sent to
* the CPU.
*/
port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST;
/* Use the default high priority for PTP frames sent to
* the CPU.
*/
port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
u16 ptr;
int err;
/* 01:80:c2:00:00:00-01:80:c2:00:00:07 are Management */
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
/* 01:80:c2:00:00:08-01:80:c2:00:00:0f are Management */
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
/* 01:80:c2:00:00:20-01:80:c2:00:00:27 are Management */
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
/* 01:80:c2:00:00:28-01:80:c2:00:00:2f are Management */
ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
return 0;
}
/* Offset 0x1c: Global Control 2 */
static int mv88e6xxx_g1_ctl2_mask(struct mv88e6xxx_chip *chip, u16 mask,
u16 val)
{
u16 reg;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, ®);
if (err)
return err;
reg &= ~mask;
reg |= val & mask;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, reg);
}
int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port)
{
const u16 mask = MV88E6185_G1_CTL2_CASCADE_PORT_MASK;
return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask));
}
int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_ctl2_mask(chip, MV88E6085_G1_CTL2_P10RM |
MV88E6085_G1_CTL2_RM_ENABLE, 0);
}
int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_ctl2_mask(chip, MV88E6352_G1_CTL2_RMU_MODE_MASK,
MV88E6352_G1_CTL2_RMU_MODE_DISABLED);
}
int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_RMU_MODE_MASK,
MV88E6390_G1_CTL2_RMU_MODE_DISABLED);
}
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK,
MV88E6390_G1_CTL2_HIST_MODE_RX |
MV88E6390_G1_CTL2_HIST_MODE_TX);
}
int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
{
return mv88e6xxx_g1_ctl2_mask(chip,
MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK,
index);
}
/* Offset 0x1d: Statistics Operation 2 */
static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G1_STATS_OP_BUSY);
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STATS_OP, bit, 0);
}
int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_OP, &val);
if (err)
return err;
val |= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
return err;
}
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
int err;
/* Snapshot the hardware statistics counters for this port. */
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY |
MV88E6XXX_G1_STATS_OP_CAPTURE_PORT |
MV88E6XXX_G1_STATS_OP_HIST_RX_TX | port);
if (err)
return err;
/* Wait for the snapshotting to complete. */
return mv88e6xxx_g1_stats_wait(chip);
}
int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
port = (port + 1) << 5;
return mv88e6xxx_g1_stats_snapshot(chip, port);
}
int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
int err;
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY |
MV88E6XXX_G1_STATS_OP_CAPTURE_PORT | port);
if (err)
return err;
/* Wait for the snapshotting to complete. */
return mv88e6xxx_g1_stats_wait(chip);
}
void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val)
{
u32 value;
u16 reg;
int err;
*val = 0;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY |
MV88E6XXX_G1_STATS_OP_READ_CAPTURED | stat);
if (err)
return;
err = mv88e6xxx_g1_stats_wait(chip);
if (err)
return;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_COUNTER_32, ®);
if (err)
return;
value = reg << 16;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_COUNTER_01, ®);
if (err)
return;
*val = value | reg;
}
int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
{
int err;
u16 val;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_OP, &val);
if (err)
return err;
/* Keep the histogram mode bits */
val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
if (err)
return err;
/* Wait for the flush to complete. */
return mv88e6xxx_g1_stats_wait(chip);
}
| linux-master | drivers/net/dsa/mv88e6xxx/global1.c |
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
* Copyright (C) 2019-2022 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/phy.h>
#include <linux/of_net.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/math.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "lan937x_reg.h"
#include "ksz_common.h"
#include "ksz9477.h"
#include "lan937x.h"
static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
return regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
}
static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
u8 bits, bool set)
{
return regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
bits, set ? bits : 0);
}
static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
{
u16 data16;
int ret;
/* Enable Phy access through SPI */
ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
if (ret < 0)
return ret;
ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
if (ret < 0)
return ret;
/* Allow SPI access */
data16 |= VPHY_SPI_INDIRECT_ENABLE;
return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
}
static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
{
u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
u16 temp;
/* get register address based on the logical port */
temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
}
static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
u16 val)
{
unsigned int value;
int ret;
/* Check for internal phy port */
if (!dev->info->internal_phy[addr])
return -EOPNOTSUPP;
ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
if (ret < 0)
return ret;
/* Write the data to be written to the VPHY reg */
ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
if (ret < 0)
return ret;
/* Write the Write En and Busy bit */
ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
(VPHY_IND_WRITE | VPHY_IND_BUSY));
if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(ksz_regmap_16(dev), REG_VPHY_IND_CTRL__2,
value, !(value & VPHY_IND_BUSY), 10,
1000);
if (ret < 0) {
dev_err(dev->dev, "Failed to write phy register\n");
return ret;
}
return 0;
}
static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
u16 *val)
{
unsigned int value;
int ret;
/* Check for internal phy port, return 0xffff for non-existent phy */
if (!dev->info->internal_phy[addr])
return 0xffff;
ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
if (ret < 0)
return ret;
/* Write Read and Busy bit to start the transaction */
ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(ksz_regmap_16(dev), REG_VPHY_IND_CTRL__2,
value, !(value & VPHY_IND_BUSY), 10,
1000);
if (ret < 0) {
dev_err(dev->dev, "Failed to read phy register\n");
return ret;
}
/* Read the VPHY register which has the PHY data */
return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
}
int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
return lan937x_internal_phy_read(dev, addr, reg, data);
}
int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
return lan937x_internal_phy_write(dev, addr, reg, val);
}
int lan937x_reset_switch(struct ksz_device *dev)
{
u32 data32;
int ret;
/* reset switch */
ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
if (ret < 0)
return ret;
/* Enable Auto Aging */
ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
if (ret < 0)
return ret;
/* disable interrupts */
ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
if (ret < 0)
return ret;
ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
if (ret < 0)
return ret;
ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
if (ret < 0)
return ret;
return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
}
void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
const u32 *masks = dev->info->masks;
const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
u8 member;
/* enable tag tail for host port */
if (cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
/* Enable the Port Queue split */
ksz9477_port_queue_split(dev, port);
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
/* enable 802.1p priority */
lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
if (!dev->info->internal_phy[port])
lan937x_port_cfg(dev, port, regs[P_XMII_CTRL_0],
masks[P_MII_TX_FLOW_CTRL] |
masks[P_MII_RX_FLOW_CTRL],
true);
if (cpu_port)
member = dsa_user_ports(ds);
else
member = BIT(dsa_upstream_port(ds, port));
dev->dev_ops->cfg_port_member(dev, port, member);
}
void lan937x_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
dsa_switch_for_each_cpu_port(dp, ds) {
if (dev->info->cpu_ports & (1 << dp->index)) {
dev->cpu_port = dp->index;
/* enable cpu port */
lan937x_port_setup(dev, dp->index, true);
}
}
dsa_switch_for_each_user_port(dp, ds) {
ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
}
}
int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
{
struct dsa_switch *ds = dev->ds;
int ret;
new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(ds, port))
new_mtu += LAN937X_TAG_LEN;
if (new_mtu >= FR_MIN_SIZE)
ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
PORT_JUMBO_PACKET, true);
else
ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
PORT_JUMBO_PACKET, false);
if (ret < 0) {
dev_err(ds->dev, "failed to enable jumbo\n");
return ret;
}
/* Write the frame size in PORT_MAX_FR_SIZE register */
ret = ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
if (ret) {
dev_err(ds->dev, "failed to update mtu for port %d\n", port);
return ret;
}
return 0;
}
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
u32 value;
int ret;
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
if (ret < 0)
return ret;
value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
}
static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
u16 reg, u8 val)
{
u16 data16;
ksz_pread16(dev, port, reg, &data16);
/* Update tune Adjust */
data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
ksz_pwrite16(dev, port, reg, data16);
/* write DLL reset to take effect */
data16 |= PORT_DLL_RESET;
ksz_pwrite16(dev, port, reg, data16);
}
static void lan937x_set_rgmii_tx_delay(struct ksz_device *dev, int port)
{
u8 val;
/* Apply different codes based on the ports as per characterization
* results
*/
val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_TX_DELAY_2NS :
RGMII_2_TX_DELAY_2NS;
lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_5, val);
}
static void lan937x_set_rgmii_rx_delay(struct ksz_device *dev, int port)
{
u8 val;
val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_RX_DELAY_2NS :
RGMII_2_RX_DELAY_2NS;
lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_4, val);
}
void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config)
{
config->mac_capabilities = MAC_100FD;
if (dev->info->supports_rgmii[port]) {
/* MII/RMII/RGMII ports */
config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_100HD | MAC_10 | MAC_1000FD;
}
}
void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
if (p->rgmii_tx_val) {
lan937x_set_rgmii_tx_delay(dev, port);
dev_info(dev->dev, "Applied rgmii tx delay for the port %d\n",
port);
}
if (p->rgmii_rx_val) {
lan937x_set_rgmii_rx_delay(dev, port);
dev_info(dev->dev, "Applied rgmii rx delay for the port %d\n",
port);
}
}
int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
{
return ksz_pwrite32(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
}
int lan937x_switch_init(struct ksz_device *dev)
{
dev->port_mask = (1 << dev->info->port_cnt) - 1;
return 0;
}
int lan937x_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
int ret;
/* enable Indirect Access from SPI to the VPHY registers */
ret = lan937x_enable_spi_indirect_access(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to enable spi indirect access");
return ret;
}
/* The VLAN aware is a global setting. Mixed vlan
* filterings are not supported.
*/
ds->vlan_filtering_is_global = true;
/* Enable aggressive back off for half duplex & UNH mode */
lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
(SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
true);
/* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
* packets when 16 or more collisions occur
*/
lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
/* enable global MIB counter freeze function */
lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
/* disable CLK125 & CLK25, 1: disable, 0: enable */
lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
(SW_CLK125_ENB | SW_CLK25_ENB), true);
return 0;
}
void lan937x_teardown(struct dsa_switch *ds)
{
}
void lan937x_switch_exit(struct ksz_device *dev)
{
lan937x_reset_switch(dev);
}
MODULE_AUTHOR("Arun Ramadoss <[email protected]>");
MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/microchip/lan937x_main.c |
// SPDX-License-Identifier: GPL-2.0
/* Microchip KSZ PTP Implementation
*
* Copyright (C) 2020 ARRI Lighting
* Copyright (C) 2022 Microchip Technology Inc.
*/
#include <linux/dsa/ksz_common.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include "ksz_common.h"
#include "ksz_ptp.h"
#include "ksz_ptp_reg.h"
#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
#define work_to_xmit_work(w) \
container_of((w), struct ksz_deferred_xmit_work, work)
/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
* = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
*/
#define KSZ_MAX_DRIFT_CORR 6249999
#define KSZ_MAX_PULSE_WIDTH 125000000LL
#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
#define KSZ_PTP_SUBNS_BITS 32
#define KSZ_PTP_INT_START 13
static int ksz_ptp_tou_gpio(struct ksz_device *dev)
{
int ret;
if (!is_lan937x(dev))
return 0;
ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
GPIO_OUT);
if (ret)
return ret;
ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
LED_OVR_1 | LED_OVR_2);
if (ret)
return ret;
return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
}
static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
{
u32 data;
int ret;
/* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
if (ret)
return ret;
data = FIELD_PREP(TRIG_INT_M, BIT(unit));
ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
if (ret)
return ret;
/* Clear reset and set GPIO direction */
return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
0);
}
static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
{
u32 data;
if (pulse_ns & 0x3)
return -EINVAL;
data = (pulse_ns / 8);
if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
return -ERANGE;
return 0;
}
static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
struct timespec64 const *ts)
{
int ret;
/* Hardware has only 32 bit */
if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
return -EINVAL;
ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
if (ret)
return ret;
ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
if (ret)
return ret;
return 0;
}
static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
{
u32 data;
int ret;
ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
if (ret)
return ret;
/* Check error flag:
* - the ACTIVE flag is NOT cleared an error!
*/
ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
if (ret)
return ret;
if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
unit);
ret = -EIO;
/* Unit will be reset on next access */
return ret;
}
return 0;
}
static int ksz_ptp_configure_perout(struct ksz_device *dev,
u32 cycle_width_ns, u32 pulse_width_ns,
struct timespec64 const *target_time,
u8 index)
{
u32 data;
int ret;
data = FIELD_PREP(TRIG_NOTIFY, 1) |
FIELD_PREP(TRIG_GPO_M, index) |
FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
if (ret)
return ret;
ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
if (ret)
return ret;
/* Set cycle count 0 - Infinite */
ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
if (ret)
return ret;
data = (pulse_width_ns / 8);
ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
if (ret)
return ret;
ret = ksz_ptp_tou_target_time_set(dev, target_time);
if (ret)
return ret;
return 0;
}
static int ksz_ptp_enable_perout(struct ksz_device *dev,
struct ptp_perout_request const *request,
int on)
{
struct ksz_ptp_data *ptp_data = &dev->ptp_data;
u64 req_pulse_width_ns;
u64 cycle_width_ns;
u64 pulse_width_ns;
int pin = 0;
u32 data32;
int ret;
if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
return -EBUSY;
pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
if (pin < 0)
return -EINVAL;
data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
FIELD_PREP(PTP_TOU_INDEX, request->index);
ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
if (ret)
return ret;
ret = ksz_ptp_tou_reset(dev, request->index);
if (ret)
return ret;
if (!on) {
ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
return 0;
}
ptp_data->perout_target_time_first.tv_sec = request->start.sec;
ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
ptp_data->perout_period.tv_sec = request->period.sec;
ptp_data->perout_period.tv_nsec = request->period.nsec;
cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
return -EINVAL;
if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
pulse_width_ns = request->on.sec * NSEC_PER_SEC +
request->on.nsec;
} else {
/* Use a duty cycle of 50%. Maximum pulse width supported by the
* hardware is a little bit more than 125 ms.
*/
req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
request->period.nsec) / 2;
pulse_width_ns = min_t(u64, req_pulse_width_ns,
KSZ_MAX_PULSE_WIDTH);
}
ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
if (ret)
return ret;
ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
&ptp_data->perout_target_time_first,
pin);
if (ret)
return ret;
ret = ksz_ptp_tou_gpio(dev);
if (ret)
return ret;
ret = ksz_ptp_tou_start(dev, request->index);
if (ret)
return ret;
ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
return 0;
}
static int ksz_ptp_enable_mode(struct ksz_device *dev)
{
struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
struct ksz_ptp_data *ptp_data = &dev->ptp_data;
struct ksz_port *prt;
struct dsa_port *dp;
bool tag_en = false;
int ret;
dsa_switch_for_each_user_port(dp, dev->ds) {
prt = &dev->ports[dp->index];
if (prt->hwts_tx_en || prt->hwts_rx_en) {
tag_en = true;
break;
}
}
if (tag_en) {
ret = ptp_schedule_worker(ptp_data->clock, 0);
if (ret)
return ret;
} else {
ptp_cancel_worker_sync(ptp_data->clock);
}
tagger_data->hwtstamp_set_state(dev->ds, tag_en);
return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
tag_en ? PTP_ENABLE : 0);
}
/* The function is return back the capability of timestamping feature when
* requested through ethtool -T <interface> utility
*/
int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
{
struct ksz_device *dev = ds->priv;
struct ksz_ptp_data *ptp_data;
ptp_data = &dev->ptp_data;
if (!ptp_data->clock)
return -ENODEV;
ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
if (is_lan937x(dev))
ts->tx_types |= BIT(HWTSTAMP_TX_ON);
ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
ts->phc_index = ptp_clock_index(ptp_data->clock);
return 0;
}
int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
struct ksz_device *dev = ds->priv;
struct hwtstamp_config *config;
struct ksz_port *prt;
prt = &dev->ports[port];
config = &prt->tstamp_config;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
static int ksz_set_hwtstamp_config(struct ksz_device *dev,
struct ksz_port *prt,
struct hwtstamp_config *config)
{
int ret;
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
prt->hwts_tx_en = false;
break;
case HWTSTAMP_TX_ONESTEP_P2P:
prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
prt->hwts_tx_en = true;
ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
if (ret)
return ret;
break;
case HWTSTAMP_TX_ON:
if (!is_lan937x(dev))
return -ERANGE;
prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
prt->hwts_tx_en = true;
ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
if (ret)
return ret;
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
prt->hwts_rx_en = false;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
prt->hwts_rx_en = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
prt->hwts_rx_en = true;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
prt->hwts_rx_en = true;
break;
default:
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
return ksz_ptp_enable_mode(dev);
}
int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
struct ksz_device *dev = ds->priv;
struct hwtstamp_config config;
struct ksz_port *prt;
int ret;
prt = &dev->ports[port];
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
ret = ksz_set_hwtstamp_config(dev, prt, &config);
if (ret)
return ret;
memcpy(&prt->tstamp_config, &config, sizeof(config));
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
return 0;
}
static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
{
struct timespec64 ptp_clock_time;
struct ksz_ptp_data *ptp_data;
struct timespec64 diff;
struct timespec64 ts;
ptp_data = &dev->ptp_data;
ts = ktime_to_timespec64(tstamp);
spin_lock_bh(&ptp_data->clock_lock);
ptp_clock_time = ptp_data->clock_time;
spin_unlock_bh(&ptp_data->clock_lock);
/* calculate full time from partial time stamp */
ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
/* find nearest possible point in time */
diff = timespec64_sub(ts, ptp_clock_time);
if (diff.tv_sec > 2)
ts.tv_sec -= 4;
else if (diff.tv_sec < -2)
ts.tv_sec += 4;
return timespec64_to_ktime(ts);
}
bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
unsigned int type)
{
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
struct ksz_device *dev = ds->priv;
struct ptp_header *ptp_hdr;
struct ksz_port *prt;
u8 ptp_msg_type;
ktime_t tstamp;
s64 correction;
prt = &dev->ports[port];
tstamp = KSZ_SKB_CB(skb)->tstamp;
memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
goto out;
ptp_hdr = ptp_parse_header(skb, type);
if (!ptp_hdr)
goto out;
ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
goto out;
/* Only subtract the partial time stamp from the correction field. When
* the hardware adds the egress time stamp to the correction field of
* the PDelay_Resp message on tx, also only the partial time stamp will
* be added.
*/
correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
correction -= ktime_to_ns(tstamp) << 16;
ptp_header_update_correction(skb, type, ptp_hdr, correction);
out:
return false;
}
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct ksz_device *dev = ds->priv;
struct ptp_header *hdr;
struct sk_buff *clone;
struct ksz_port *prt;
unsigned int type;
u8 ptp_msg_type;
prt = &dev->ports[port];
if (!prt->hwts_tx_en)
return;
type = ptp_classify_raw(skb);
if (type == PTP_CLASS_NONE)
return;
hdr = ptp_parse_header(skb, type);
if (!hdr)
return;
ptp_msg_type = ptp_get_msgtype(hdr, type);
switch (ptp_msg_type) {
case PTP_MSGTYPE_SYNC:
if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
return;
break;
case PTP_MSGTYPE_PDELAY_REQ:
break;
case PTP_MSGTYPE_PDELAY_RESP:
if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
KSZ_SKB_CB(skb)->ptp_type = type;
KSZ_SKB_CB(skb)->update_correction = true;
return;
}
break;
default:
return;
}
clone = skb_clone_sk(skb);
if (!clone)
return;
/* caching the value to be used in tag_ksz.c */
KSZ_SKB_CB(skb)->clone = clone;
}
static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
struct ksz_port *prt, struct sk_buff *skb)
{
struct skb_shared_hwtstamps hwtstamps = {};
int ret;
/* timeout must include DSA master to transmit data, tstamp latency,
* IRQ latency and time for reading the time stamp.
*/
ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
msecs_to_jiffies(100));
if (!ret)
return;
hwtstamps.hwtstamp = prt->tstamp_msg;
skb_complete_tx_timestamp(skb, &hwtstamps);
}
void ksz_port_deferred_xmit(struct kthread_work *work)
{
struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
struct sk_buff *clone, *skb = xmit_work->skb;
struct dsa_switch *ds = xmit_work->dp->ds;
struct ksz_device *dev = ds->priv;
struct ksz_port *prt;
prt = &dev->ports[xmit_work->dp->index];
clone = KSZ_SKB_CB(skb)->clone;
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
reinit_completion(&prt->tstamp_msg_comp);
dsa_enqueue_skb(skb, skb->dev);
ksz_ptp_txtstamp_skb(dev, prt, clone);
kfree(xmit_work);
}
static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
{
u32 nanoseconds;
u32 seconds;
u8 phase;
int ret;
/* Copy current PTP clock into shadow registers and read */
ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
if (ret)
return ret;
ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
if (ret)
return ret;
ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
if (ret)
return ret;
ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
if (ret)
return ret;
ts->tv_sec = seconds;
ts->tv_nsec = nanoseconds + phase * 8;
return 0;
}
static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
int ret;
mutex_lock(&ptp_data->lock);
ret = _ksz_ptp_gettime(dev, ts);
mutex_unlock(&ptp_data->lock);
return ret;
}
static int ksz_ptp_restart_perout(struct ksz_device *dev)
{
struct ksz_ptp_data *ptp_data = &dev->ptp_data;
s64 now_ns, first_ns, period_ns, next_ns;
struct ptp_perout_request request;
struct timespec64 next;
struct timespec64 now;
unsigned int count;
int ret;
dev_info(dev->dev, "Restarting periodic output signal\n");
ret = _ksz_ptp_gettime(dev, &now);
if (ret)
return ret;
now_ns = timespec64_to_ns(&now);
first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
/* Calculate next perout event based on start time and period */
period_ns = timespec64_to_ns(&ptp_data->perout_period);
if (first_ns < now_ns) {
count = div_u64(now_ns - first_ns, period_ns);
next_ns = first_ns + count * period_ns;
} else {
next_ns = first_ns;
}
/* Ensure 100 ms guard time prior next event */
while (next_ns < now_ns + 100000000)
next_ns += period_ns;
/* Restart periodic output signal */
next = ns_to_timespec64(next_ns);
request.start.sec = next.tv_sec;
request.start.nsec = next.tv_nsec;
request.period.sec = ptp_data->perout_period.tv_sec;
request.period.nsec = ptp_data->perout_period.tv_nsec;
request.index = 0;
request.flags = 0;
return ksz_ptp_enable_perout(dev, &request, 1);
}
static int ksz_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
int ret;
mutex_lock(&ptp_data->lock);
/* Write to shadow registers and Load PTP clock */
ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
if (ret)
goto unlock;
ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
if (ret)
goto unlock;
ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
if (ret)
goto unlock;
ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
if (ret)
goto unlock;
switch (ptp_data->tou_mode) {
case KSZ_PTP_TOU_IDLE:
break;
case KSZ_PTP_TOU_PEROUT:
ret = ksz_ptp_restart_perout(dev);
if (ret)
goto unlock;
break;
}
spin_lock_bh(&ptp_data->clock_lock);
ptp_data->clock_time = *ts;
spin_unlock_bh(&ptp_data->clock_lock);
unlock:
mutex_unlock(&ptp_data->lock);
return ret;
}
static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
u64 base, adj;
bool negative;
u32 data32;
int ret;
mutex_lock(&ptp_data->lock);
if (scaled_ppm) {
base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
data32 = (u32)adj;
data32 &= PTP_SUBNANOSEC_M;
if (!negative)
data32 |= PTP_RATE_DIR;
ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
if (ret)
goto unlock;
ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
PTP_CLK_ADJ_ENABLE);
if (ret)
goto unlock;
} else {
ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
if (ret)
goto unlock;
}
unlock:
mutex_unlock(&ptp_data->lock);
return ret;
}
static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
struct timespec64 delta64 = ns_to_timespec64(delta);
s32 sec, nsec;
u16 data16;
int ret;
mutex_lock(&ptp_data->lock);
/* do not use ns_to_timespec64(),
* both sec and nsec are subtracted by hw
*/
sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
if (ret)
goto unlock;
ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
if (ret)
goto unlock;
ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
if (ret)
goto unlock;
data16 |= PTP_STEP_ADJ;
/* PTP_STEP_DIR -- 0: subtract, 1: add */
if (delta < 0)
data16 &= ~PTP_STEP_DIR;
else
data16 |= PTP_STEP_DIR;
ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
if (ret)
goto unlock;
switch (ptp_data->tou_mode) {
case KSZ_PTP_TOU_IDLE:
break;
case KSZ_PTP_TOU_PEROUT:
ret = ksz_ptp_restart_perout(dev);
if (ret)
goto unlock;
break;
}
spin_lock_bh(&ptp_data->clock_lock);
ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
spin_unlock_bh(&ptp_data->clock_lock);
unlock:
mutex_unlock(&ptp_data->lock);
return ret;
}
static int ksz_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *req, int on)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
int ret;
switch (req->type) {
case PTP_CLK_REQ_PEROUT:
mutex_lock(&ptp_data->lock);
ret = ksz_ptp_enable_perout(dev, &req->perout, on);
mutex_unlock(&ptp_data->lock);
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
int ret = 0;
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
break;
default:
ret = -1;
break;
}
return ret;
}
/* Function is pointer to the do_aux_work in the ptp_clock capability */
static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
{
struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
struct timespec64 ts;
int ret;
mutex_lock(&ptp_data->lock);
ret = _ksz_ptp_gettime(dev, &ts);
if (ret)
goto out;
spin_lock_bh(&ptp_data->clock_lock);
ptp_data->clock_time = ts;
spin_unlock_bh(&ptp_data->clock_lock);
out:
mutex_unlock(&ptp_data->lock);
return HZ; /* reschedule in 1 second */
}
static int ksz_ptp_start_clock(struct ksz_device *dev)
{
struct ksz_ptp_data *ptp_data = &dev->ptp_data;
int ret;
ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
if (ret)
return ret;
ptp_data->clock_time.tv_sec = 0;
ptp_data->clock_time.tv_nsec = 0;
return 0;
}
int ksz_ptp_clock_register(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_ptp_data *ptp_data;
int ret;
u8 i;
ptp_data = &dev->ptp_data;
mutex_init(&ptp_data->lock);
spin_lock_init(&ptp_data->clock_lock);
ptp_data->caps.owner = THIS_MODULE;
snprintf(ptp_data->caps.name, 16, "Microchip Clock");
ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
ptp_data->caps.gettime64 = ksz_ptp_gettime;
ptp_data->caps.settime64 = ksz_ptp_settime;
ptp_data->caps.adjfine = ksz_ptp_adjfine;
ptp_data->caps.adjtime = ksz_ptp_adjtime;
ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
ptp_data->caps.enable = ksz_ptp_enable;
ptp_data->caps.verify = ksz_ptp_verify_pin;
ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
ptp_data->caps.n_per_out = 3;
ret = ksz_ptp_start_clock(dev);
if (ret)
return ret;
for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
snprintf(ptp_pin->name,
sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
ptp_pin->index = i;
ptp_pin->func = PTP_PF_NONE;
}
ptp_data->caps.pin_config = ptp_data->pin_config;
/* Currently only P2P mode is supported. When 802_1AS bit is set, it
* forwards all PTP packets to host port and none to other ports.
*/
ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
PTP_TC_P2P | PTP_802_1AS);
if (ret)
return ret;
ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
if (IS_ERR_OR_NULL(ptp_data->clock))
return PTR_ERR(ptp_data->clock);
return 0;
}
void ksz_ptp_clock_unregister(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_ptp_data *ptp_data;
ptp_data = &dev->ptp_data;
if (ptp_data->clock)
ptp_clock_unregister(ptp_data->clock);
}
static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
{
struct ksz_ptp_irq *ptpmsg_irq = dev_id;
struct ksz_device *dev;
struct ksz_port *port;
u32 tstamp_raw;
ktime_t tstamp;
int ret;
port = ptpmsg_irq->port;
dev = port->ksz_dev;
if (ptpmsg_irq->ts_en) {
ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
if (ret)
return IRQ_NONE;
tstamp = ksz_decode_tstamp(tstamp_raw);
port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
complete(&port->tstamp_msg_comp);
}
return IRQ_HANDLED;
}
static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
{
struct ksz_irq *ptpirq = dev_id;
unsigned int nhandled = 0;
struct ksz_device *dev;
unsigned int sub_irq;
u16 data;
int ret;
u8 n;
dev = ptpirq->dev;
ret = ksz_read16(dev, ptpirq->reg_status, &data);
if (ret)
goto out;
/* Clear the interrupts W1C */
ret = ksz_write16(dev, ptpirq->reg_status, data);
if (ret)
return IRQ_NONE;
for (n = 0; n < ptpirq->nirqs; ++n) {
if (data & BIT(n + KSZ_PTP_INT_START)) {
sub_irq = irq_find_mapping(ptpirq->domain, n);
handle_nested_irq(sub_irq);
++nhandled;
}
}
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static void ksz_ptp_irq_mask(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
}
static void ksz_ptp_irq_unmask(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
}
static void ksz_ptp_irq_bus_lock(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
mutex_lock(&kirq->dev->lock_irq);
}
static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
struct ksz_device *dev = kirq->dev;
int ret;
ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
mutex_unlock(&dev->lock_irq);
}
static const struct irq_chip ksz_ptp_irq_chip = {
.name = "ksz-irq",
.irq_mask = ksz_ptp_irq_mask,
.irq_unmask = ksz_ptp_irq_unmask,
.irq_bus_lock = ksz_ptp_irq_bus_lock,
.irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
};
static int ksz_ptp_irq_domain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, d->host_data);
irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
.map = ksz_ptp_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
{
struct ksz_ptp_irq *ptpmsg_irq;
ptpmsg_irq = &port->ptpmsg_irq[n];
free_irq(ptpmsg_irq->num, ptpmsg_irq);
irq_dispose_mapping(ptpmsg_irq->num);
}
static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
{
u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
REG_PTP_PORT_SYNC_TS};
static const char * const name[] = {"pdresp-msg", "xdreq-msg",
"sync-msg"};
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
struct ksz_ptp_irq *ptpmsg_irq;
ptpmsg_irq = &port->ptpmsg_irq[n];
ptpmsg_irq->port = port;
ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
if (ptpmsg_irq->num < 0)
return ptpmsg_irq->num;
return request_threaded_irq(ptpmsg_irq->num, NULL,
ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
ptpmsg_irq->name, ptpmsg_irq);
}
int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
{
struct ksz_device *dev = ds->priv;
const struct ksz_dev_ops *ops = dev->dev_ops;
struct ksz_port *port = &dev->ports[p];
struct ksz_irq *ptpirq = &port->ptpirq;
int irq;
int ret;
ptpirq->dev = dev;
ptpirq->masked = 0;
ptpirq->nirqs = 3;
ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
ptpirq->reg_status = ops->get_port_addr(p,
REG_PTP_PORT_TX_INT_STATUS__2);
snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
init_completion(&port->tstamp_msg_comp);
ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
&ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_create_mapping(ptpirq->domain, irq);
ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
if (ptpirq->irq_num < 0) {
ret = ptpirq->irq_num;
goto out;
}
ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
IRQF_ONESHOT, ptpirq->name, ptpirq);
if (ret)
goto out;
for (irq = 0; irq < ptpirq->nirqs; irq++) {
ret = ksz_ptp_msg_irq_setup(port, irq);
if (ret)
goto out_ptp_msg;
}
return 0;
out_ptp_msg:
free_irq(ptpirq->irq_num, ptpirq);
while (irq--)
free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
out:
for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_dispose_mapping(port->ptpmsg_irq[irq].num);
irq_domain_remove(ptpirq->domain);
return ret;
}
void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *port = &dev->ports[p];
struct ksz_irq *ptpirq = &port->ptpirq;
u8 n;
for (n = 0; n < ptpirq->nirqs; n++)
ksz_ptp_msg_irq_free(port, n);
free_irq(ptpirq->irq_num, ptpirq);
irq_dispose_mapping(ptpirq->irq_num);
irq_domain_remove(ptpirq->domain);
}
MODULE_AUTHOR("Christian Eggers <[email protected]>");
MODULE_AUTHOR("Arun Ramadoss <[email protected]>");
MODULE_DESCRIPTION("PTP support for KSZ switch");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/microchip/ksz_ptp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip KSZ9477 switch driver main logic
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz9477_reg.h"
#include "ksz_common.h"
#include "ksz9477.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
bits, set ? bits : 0);
}
static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
{
regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
}
static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
u32 bits, bool set)
{
regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
bits, set ? bits : 0);
}
int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
{
u16 frame_size;
if (!dsa_is_cpu_port(dev->ds, port))
return 0;
frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
REG_SW_MTU_MASK, frame_size);
}
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
val, !(val & VLAN_START), 10, 1000);
}
static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
u32 *vlan_table)
{
int ret;
mutex_lock(&dev->vlan_mutex);
ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
/* wait to be cleared */
ret = ksz9477_wait_vlan_ctrl_ready(dev);
if (ret) {
dev_dbg(dev->dev, "Failed to read vlan table\n");
goto exit;
}
ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
exit:
mutex_unlock(&dev->vlan_mutex);
return ret;
}
static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
u32 *vlan_table)
{
int ret;
mutex_lock(&dev->vlan_mutex);
ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
/* wait to be cleared */
ret = ksz9477_wait_vlan_ctrl_ready(dev);
if (ret) {
dev_dbg(dev->dev, "Failed to write vlan table\n");
goto exit;
}
ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
/* update vlan cache table */
dev->vlan_cache[vid].table[0] = vlan_table[0];
dev->vlan_cache[vid].table[1] = vlan_table[1];
dev->vlan_cache[vid].table[2] = vlan_table[2];
exit:
mutex_unlock(&dev->vlan_mutex);
return ret;
}
static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
{
ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
}
static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
{
ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
}
static int ksz9477_wait_alu_ready(struct ksz_device *dev)
{
unsigned int val;
return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
val, !(val & ALU_START), 10, 1000);
}
static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
{
unsigned int val;
return regmap_read_poll_timeout(ksz_regmap_32(dev),
REG_SW_ALU_STAT_CTRL__4,
val, !(val & ALU_STAT_START),
10, 1000);
}
int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
u32 data32;
/* reset switch */
ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
/* turn off SPI DO Edge select */
regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
/* KSZ9893 compatible chips do not support refclk configuration */
if (dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
return 0;
data8 = SW_ENABLE_REFCLKO;
if (dev->synclko_disable)
data8 = 0;
else if (dev->synclko_125)
data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
return 0;
}
void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
struct ksz_port *p = &dev->ports[port];
unsigned int val;
u32 data;
int ret;
/* retain the flush/freeze bit */
data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
data |= MIB_COUNTER_READ;
data |= (addr << MIB_COUNTER_INDEX_S);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
val, !(val & MIB_COUNTER_READ), 10, 1000);
/* failed to read MIB. get out of loop */
if (ret) {
dev_dbg(dev->dev, "Failed to get MIB\n");
return;
}
/* count resets upon read */
ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
*cnt += data;
}
void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
struct ksz_port *p = &dev->ports[port];
/* enable/disable the port for flush/freeze function */
mutex_lock(&p->mib.cnt_mutex);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
/* used by MIB counter reading code to know freeze is enabled */
p->freeze = freeze;
mutex_unlock(&p->mib.cnt_mutex);
}
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
/* flush all enabled port MIB counters */
mutex_lock(&mib->cnt_mutex);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
MIB_COUNTER_FLUSH_FREEZE);
ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
mutex_unlock(&mib->cnt_mutex);
}
static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
u16 *data)
{
/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
* BMSR_ERCAP bits are set.
*/
if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
}
int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
u16 val = 0xffff;
int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
* still called for that port during initialization.
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
case MII_BMCR:
val = 0x1140;
break;
case MII_BMSR:
val = 0x796d;
break;
case MII_PHYSID1:
val = 0x0022;
break;
case MII_PHYSID2:
val = 0x1631;
break;
case MII_ADVERTISE:
val = 0x05e1;
break;
case MII_LPA:
val = 0xc5e1;
break;
case MII_CTRL1000:
val = 0x0700;
break;
case MII_STAT1000:
if (p->phydev.speed == SPEED_1000)
val = 0x3800;
else
val = 0;
break;
}
} else {
ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
if (ret)
return ret;
ksz9477_r_phy_quirks(dev, addr, reg, &val);
}
*data = val;
return 0;
}
int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
u32 mask, val32;
/* No real PHY after this. */
if (!dev->info->internal_phy[addr])
return 0;
if (reg < 0x10)
return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
/* Errata: When using SPI, I2C, or in-band register access,
* writes to certain PHY registers should be performed as
* 32-bit writes instead of 16-bit writes.
*/
val32 = val;
mask = 0xffff;
if ((reg & 1) == 0) {
val32 <<= 16;
mask <<= 16;
}
reg &= ~1;
return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
}
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
}
void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
const u16 *regs = dev->info->regs;
u8 data;
regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
if (port < dev->info->port_cnt) {
/* flush individual port */
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
if (!(data & PORT_LEARN_DISABLE))
ksz_pwrite8(dev, port, regs[P_STP_CTRL],
data | PORT_LEARN_DISABLE);
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
} else {
/* flush all */
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
}
}
int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
bool flag, struct netlink_ext_ack *extack)
{
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
} else {
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, false);
}
return 0;
}
int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
u32 vlan_table[3];
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
int err;
err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
return err;
}
vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
if (untagged)
vlan_table[1] |= BIT(port);
else
vlan_table[1] &= ~BIT(port);
vlan_table[1] &= ~(BIT(dev->cpu_port));
vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
return err;
}
/* change PVID */
if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
return 0;
}
int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
u16 pvid;
ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
pvid = pvid & 0xFFF;
if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
dev_dbg(dev->dev, "Failed to get vlan table\n");
return -ETIMEDOUT;
}
vlan_table[2] &= ~BIT(port);
if (pvid == vlan->vid)
pvid = 1;
if (untagged)
vlan_table[1] &= ~BIT(port);
if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
dev_dbg(dev->dev, "Failed to set vlan table\n");
return -ETIMEDOUT;
}
ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
return 0;
}
int ksz9477_fdb_add(struct ksz_device *dev, int port,
const unsigned char *addr, u16 vid, struct dsa_db db)
{
u32 alu_table[4];
u32 data;
int ret = 0;
mutex_lock(&dev->alu_mutex);
/* find any entry with mac & vid */
data = vid << ALU_FID_INDEX_S;
data |= ((addr[0] << 8) | addr[1]);
ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
data = ((addr[2] << 24) | (addr[3] << 16));
data |= ((addr[4] << 8) | addr[5]);
ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
/* start read operation */
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
/* wait to be finished */
ret = ksz9477_wait_alu_ready(dev);
if (ret) {
dev_dbg(dev->dev, "Failed to read ALU\n");
goto exit;
}
/* read ALU entry */
ksz9477_read_table(dev, alu_table);
/* update ALU entry */
alu_table[0] = ALU_V_STATIC_VALID;
alu_table[1] |= BIT(port);
if (vid)
alu_table[1] |= ALU_V_USE_FID;
alu_table[2] = (vid << ALU_V_FID_S);
alu_table[2] |= ((addr[0] << 8) | addr[1]);
alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
alu_table[3] |= ((addr[4] << 8) | addr[5]);
ksz9477_write_table(dev, alu_table);
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
/* wait to be finished */
ret = ksz9477_wait_alu_ready(dev);
if (ret)
dev_dbg(dev->dev, "Failed to write ALU\n");
exit:
mutex_unlock(&dev->alu_mutex);
return ret;
}
int ksz9477_fdb_del(struct ksz_device *dev, int port,
const unsigned char *addr, u16 vid, struct dsa_db db)
{
u32 alu_table[4];
u32 data;
int ret = 0;
mutex_lock(&dev->alu_mutex);
/* read any entry with mac & vid */
data = vid << ALU_FID_INDEX_S;
data |= ((addr[0] << 8) | addr[1]);
ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
data = ((addr[2] << 24) | (addr[3] << 16));
data |= ((addr[4] << 8) | addr[5]);
ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
/* start read operation */
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
/* wait to be finished */
ret = ksz9477_wait_alu_ready(dev);
if (ret) {
dev_dbg(dev->dev, "Failed to read ALU\n");
goto exit;
}
ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
if (alu_table[0] & ALU_V_STATIC_VALID) {
ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
/* clear forwarding port */
alu_table[1] &= ~BIT(port);
/* if there is no port to forward, clear table */
if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
alu_table[0] = 0;
alu_table[1] = 0;
alu_table[2] = 0;
alu_table[3] = 0;
}
} else {
alu_table[0] = 0;
alu_table[1] = 0;
alu_table[2] = 0;
alu_table[3] = 0;
}
ksz9477_write_table(dev, alu_table);
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
/* wait to be finished */
ret = ksz9477_wait_alu_ready(dev);
if (ret)
dev_dbg(dev->dev, "Failed to write ALU\n");
exit:
mutex_unlock(&dev->alu_mutex);
return ret;
}
static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
{
alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
ALU_V_PRIO_AGE_CNT_M;
alu->mstp = alu_table[0] & ALU_V_MSTP_M;
alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
alu->mac[1] = alu_table[2] & 0xFF;
alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
alu->mac[5] = alu_table[3] & 0xFF;
}
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
int ret = 0;
u32 ksz_data;
u32 alu_table[4];
struct alu_struct alu;
int timeout;
mutex_lock(&dev->alu_mutex);
/* start ALU search */
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
do {
timeout = 1000;
do {
ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
break;
usleep_range(1, 10);
} while (timeout-- > 0);
if (!timeout) {
dev_dbg(dev->dev, "Failed to search ALU\n");
ret = -ETIMEDOUT;
goto exit;
}
if (!(ksz_data & ALU_VALID))
continue;
/* read ALU table */
ksz9477_read_table(dev, alu_table);
ksz9477_convert_alu(&alu, alu_table);
if (alu.port_forward & BIT(port)) {
ret = cb(alu.mac, alu.fid, alu.is_static, data);
if (ret)
goto exit;
}
} while (ksz_data & ALU_START);
exit:
/* stop ALU search */
ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
mutex_unlock(&dev->alu_mutex);
return ret;
}
int ksz9477_mdb_add(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
u32 static_table[4];
const u8 *shifts;
const u32 *masks;
u32 data;
int index;
u32 mac_hi, mac_lo;
int err = 0;
shifts = dev->info->shifts;
masks = dev->info->masks;
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
data = (index << shifts[ALU_STAT_INDEX]) |
masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
err = ksz9477_wait_alu_sta_ready(dev);
if (err) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
/* read ALU static table */
ksz9477_read_table(dev, static_table);
if (static_table[0] & ALU_V_STATIC_VALID) {
/* check this has same vid & mac address */
if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
static_table[3] == mac_lo) {
/* found matching one */
break;
}
} else {
/* found empty one */
break;
}
}
/* no available entry */
if (index == dev->info->num_statics) {
err = -ENOSPC;
goto exit;
}
/* add entry */
static_table[0] = ALU_V_STATIC_VALID;
static_table[1] |= BIT(port);
if (mdb->vid)
static_table[1] |= ALU_V_USE_FID;
static_table[2] = (mdb->vid << ALU_V_FID_S);
static_table[2] |= mac_hi;
static_table[3] = mac_lo;
ksz9477_write_table(dev, static_table);
data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
if (ksz9477_wait_alu_sta_ready(dev))
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
exit:
mutex_unlock(&dev->alu_mutex);
return err;
}
int ksz9477_mdb_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
u32 static_table[4];
const u8 *shifts;
const u32 *masks;
u32 data;
int index;
int ret = 0;
u32 mac_hi, mac_lo;
shifts = dev->info->shifts;
masks = dev->info->masks;
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
data = (index << shifts[ALU_STAT_INDEX]) |
masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
ret = ksz9477_wait_alu_sta_ready(dev);
if (ret) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
/* read ALU static table */
ksz9477_read_table(dev, static_table);
if (static_table[0] & ALU_V_STATIC_VALID) {
/* check this has same vid & mac address */
if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
static_table[3] == mac_lo) {
/* found matching one */
break;
}
}
}
/* no available entry */
if (index == dev->info->num_statics)
goto exit;
/* clear port */
static_table[1] &= ~BIT(port);
if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
/* delete entry */
static_table[0] = 0;
static_table[1] = 0;
static_table[2] = 0;
static_table[3] = 0;
}
ksz9477_write_table(dev, static_table);
data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
ret = ksz9477_wait_alu_sta_ready(dev);
if (ret)
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
exit:
mutex_unlock(&dev->alu_mutex);
return ret;
}
int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
u8 data;
int p;
/* Limit to one sniffer port
* Check if any of the port is already set for sniffing
* If yes, instruct the user to remove the previous entry & exit
*/
for (p = 0; p < dev->info->port_cnt; p++) {
/* Skip the current sniffing port */
if (p == mirror->to_local_port)
continue;
ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
if (data & PORT_MIRROR_SNIFFER) {
NL_SET_ERR_MSG_MOD(extack,
"Sniffer port is already configured, delete existing rules & retry");
return -EBUSY;
}
}
if (ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
/* configure mirror port */
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, true);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
return 0;
}
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
bool in_use = false;
u8 data;
int p;
if (mirror->ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
/* Check if any of the port is still referring to sniffer port */
for (p = 0; p < dev->info->port_cnt; p++) {
ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
in_use = true;
break;
}
}
/* delete sniffing if there are no other mirroring rules */
if (!in_use)
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, false);
}
static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
{
phy_interface_t interface;
bool gbit;
if (dev->info->internal_phy[port])
return PHY_INTERFACE_MODE_NA;
gbit = ksz_get_gbit(dev, port);
interface = ksz_get_xmii(dev, port, gbit);
return interface;
}
void ksz9477_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config)
{
config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
MAC_SYM_PAUSE;
if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
}
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
u8 value;
u8 data;
int ret;
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
if (ret < 0)
return ret;
data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
value &= ~SW_AGE_CNT_M;
value |= FIELD_PREP(SW_AGE_CNT_M, data);
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
{
u8 data;
if (dev->info->num_tx_queues == 8)
data = PORT_EIGHT_QUEUE;
else if (dev->info->num_tx_queues == 4)
data = PORT_FOUR_QUEUE;
else if (dev->info->num_tx_queues == 2)
data = PORT_TWO_QUEUE;
else
data = PORT_SINGLE_QUEUE;
ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
}
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
u16 data16;
u8 member;
/* enable tag tail for host port */
if (cpu_port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
true);
ksz9477_port_queue_split(dev, port);
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
/* set back pressure */
ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
/* disable DiffServ priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
/* replace priority */
ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
false);
ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
MTI_PVID_REPLACE, false);
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
/* force flow control for non-PHY ports only */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
!dev->info->internal_phy[port]);
if (cpu_port)
member = dsa_user_ports(ds);
else
member = BIT(dsa_upstream_port(ds, port));
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
int i;
for (i = 0; i < dev->info->port_cnt; i++) {
if (dsa_is_cpu_port(ds, i) &&
(dev->info->cpu_ports & (1 << i))) {
phy_interface_t interface;
const char *prev_msg;
const char *prev_mode;
dev->cpu_port = i;
p = &dev->ports[i];
/* Read from XMII register to determine host port
* interface. If set specifically in device tree
* note the difference to help debugging.
*/
interface = ksz9477_get_interface(dev, i);
if (!p->interface) {
if (dev->compat_interface) {
dev_warn(dev->dev,
"Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
"Please update your device tree.\n",
i);
p->interface = dev->compat_interface;
} else {
p->interface = interface;
}
}
if (interface && interface != p->interface) {
prev_msg = " instead of ";
prev_mode = phy_modes(interface);
} else {
prev_msg = "";
prev_mode = "";
}
dev_info(dev->dev,
"Port%d: using phy mode %s%s%s\n",
i,
phy_modes(p->interface),
prev_msg,
prev_mode);
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
}
}
for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
}
}
int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
const u32 *masks;
u32 data;
int ret;
masks = dev->info->masks;
/* Enable Reserved multicast table */
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
/* Set the Override bit for forwarding BPDU packet to CPU */
ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
ALU_V_OVERRIDE | BIT(dev->cpu_port));
if (ret < 0)
return ret;
data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
if (ret < 0)
return ret;
/* wait to be finished */
ret = ksz9477_wait_alu_sta_ready(dev);
if (ret < 0) {
dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
return ret;
}
return 0;
}
int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
int ret = 0;
ds->mtu_enforcement_ingress = true;
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
true);
/* Do not work correctly with tail tagging. */
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
return ret;
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
return 0;
}
u32 ksz9477_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
{
val = val >> 8;
return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
}
int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
int ret;
dev->port_mask = (1 << dev->info->port_cnt) - 1;
/* turn off SPI DO Edge select */
ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
if (ret)
return ret;
data8 &= ~SPI_AUTO_EDGE_DETECTION;
ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
if (ret)
return ret;
return 0;
}
void ksz9477_switch_exit(struct ksz_device *dev)
{
ksz9477_reset_switch(dev);
}
MODULE_AUTHOR("Woojung Huh <[email protected]>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/microchip/ksz9477.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip KSZ8863 series register access through SMI
*
* Copyright (C) 2019 Pengutronix, Michael Grzeschik <[email protected]>
*/
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include "ksz8.h"
#include "ksz_common.h"
/* Serial Management Interface (SMI) uses the following frame format:
*
* preamble|start|Read/Write| PHY | REG |TA| Data bits | Idle
* |frame| OP code |address |address| | |
* read | 32x1´s | 01 | 00 | 1xRRR | RRRRR |Z0| 00000000DDDDDDDD | Z
* write| 32x1´s | 01 | 00 | 0xRRR | RRRRR |10| xxxxxxxxDDDDDDDD | Z
*
*/
#define SMI_KSZ88XX_READ_PHY BIT(4)
static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
void *val_buf, size_t val_len)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
u8 reg = *(u8 *)reg_buf;
u8 *val = val_buf;
int i, ret = 0;
mdev = dev->priv;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < val_len; i++) {
int tmp = reg + i;
ret = __mdiobus_read(mdev->bus, ((tmp & 0xE0) >> 5) |
SMI_KSZ88XX_READ_PHY, tmp);
if (ret < 0)
goto out;
val[i] = ret;
}
ret = 0;
out:
mutex_unlock(&mdev->bus->mdio_lock);
return ret;
}
static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
int i, ret = 0;
u32 reg;
u8 *val;
mdev = dev->priv;
val = (u8 *)(data + 4);
reg = *(u32 *)data;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < (count - 4); i++) {
int tmp = reg + i;
ret = __mdiobus_write(mdev->bus, ((tmp & 0xE0) >> 5),
tmp, val[i]);
if (ret < 0)
goto out;
}
out:
mutex_unlock(&mdev->bus->mdio_lock);
return ret;
}
static const struct regmap_bus regmap_smi[] = {
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
}
};
static const struct regmap_config ksz8863_regmap_config[] = {
{
.name = "#8",
.reg_bits = 8,
.pad_bits = 24,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
.max_register = U8_MAX,
},
{
.name = "#16",
.reg_bits = 8,
.pad_bits = 24,
.val_bits = 16,
.cache_type = REGCACHE_NONE,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
.max_register = U8_MAX,
},
{
.name = "#32",
.reg_bits = 8,
.pad_bits = 24,
.val_bits = 32,
.cache_type = REGCACHE_NONE,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
.max_register = U8_MAX,
}
};
static int ksz8863_smi_probe(struct mdio_device *mdiodev)
{
struct device *ddev = &mdiodev->dev;
const struct ksz_chip_data *chip;
struct regmap_config rc;
struct ksz_device *dev;
int ret;
int i;
dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
if (!dev)
return -ENOMEM;
chip = device_get_match_data(ddev);
if (!chip)
return -EINVAL;
for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = ksz8863_regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
rc.wr_table = chip->wr_table;
rc.rd_table = chip->rd_table;
dev->regmap[i] = devm_regmap_init(&mdiodev->dev,
®map_smi[i], dev,
&rc);
if (IS_ERR(dev->regmap[i])) {
return dev_err_probe(&mdiodev->dev,
PTR_ERR(dev->regmap[i]),
"Failed to initialize regmap%i\n",
ksz8863_regmap_config[i].val_bits);
}
}
if (mdiodev->dev.platform_data)
dev->pdata = mdiodev->dev.platform_data;
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
return ret;
dev_set_drvdata(&mdiodev->dev, dev);
return 0;
}
static void ksz8863_smi_remove(struct mdio_device *mdiodev)
{
struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
if (dev)
ksz_switch_remove(dev);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
{
struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
if (dev)
dsa_switch_shutdown(dev->ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ksz8863_dt_ids[] = {
{
.compatible = "microchip,ksz8863",
.data = &ksz_switch_chips[KSZ8830]
},
{
.compatible = "microchip,ksz8873",
.data = &ksz_switch_chips[KSZ8830]
},
{ },
};
MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
static struct mdio_driver ksz8863_driver = {
.probe = ksz8863_smi_probe,
.remove = ksz8863_smi_remove,
.shutdown = ksz8863_smi_shutdown,
.mdiodrv.driver = {
.name = "ksz8863-switch",
.of_match_table = ksz8863_dt_ids,
},
};
mdio_module_driver(ksz8863_driver);
MODULE_AUTHOR("Michael Grzeschik <[email protected]>");
MODULE_DESCRIPTION("Microchip KSZ8863 SMI Switch driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/microchip/ksz8863_smi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip switch driver main logic
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
*/
#include <linux/delay.h>
#include <linux/dsa/ksz_common.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
#include "ksz_ptp.h"
#include "ksz8.h"
#include "ksz9477.h"
#include "lan937x.h"
#define MIB_COUNTER_NUM 0x20
struct ksz_stats_raw {
u64 rx_hi;
u64 rx_undersize;
u64 rx_fragments;
u64 rx_oversize;
u64 rx_jabbers;
u64 rx_symbol_err;
u64 rx_crc_err;
u64 rx_align_err;
u64 rx_mac_ctrl;
u64 rx_pause;
u64 rx_bcast;
u64 rx_mcast;
u64 rx_ucast;
u64 rx_64_or_less;
u64 rx_65_127;
u64 rx_128_255;
u64 rx_256_511;
u64 rx_512_1023;
u64 rx_1024_1522;
u64 rx_1523_2000;
u64 rx_2001;
u64 tx_hi;
u64 tx_late_col;
u64 tx_pause;
u64 tx_bcast;
u64 tx_mcast;
u64 tx_ucast;
u64 tx_deferred;
u64 tx_total_col;
u64 tx_exc_col;
u64 tx_single_col;
u64 tx_mult_col;
u64 rx_total;
u64 tx_total;
u64 rx_discards;
u64 tx_discards;
};
struct ksz88xx_stats_raw {
u64 rx;
u64 rx_hi;
u64 rx_undersize;
u64 rx_fragments;
u64 rx_oversize;
u64 rx_jabbers;
u64 rx_symbol_err;
u64 rx_crc_err;
u64 rx_align_err;
u64 rx_mac_ctrl;
u64 rx_pause;
u64 rx_bcast;
u64 rx_mcast;
u64 rx_ucast;
u64 rx_64_or_less;
u64 rx_65_127;
u64 rx_128_255;
u64 rx_256_511;
u64 rx_512_1023;
u64 rx_1024_1522;
u64 tx;
u64 tx_hi;
u64 tx_late_col;
u64 tx_pause;
u64 tx_bcast;
u64 tx_mcast;
u64 tx_ucast;
u64 tx_deferred;
u64 tx_total_col;
u64 tx_exc_col;
u64 tx_single_col;
u64 tx_mult_col;
u64 rx_discards;
u64 tx_discards;
};
static const struct ksz_mib_names ksz88xx_mib_names[] = {
{ 0x00, "rx" },
{ 0x01, "rx_hi" },
{ 0x02, "rx_undersize" },
{ 0x03, "rx_fragments" },
{ 0x04, "rx_oversize" },
{ 0x05, "rx_jabbers" },
{ 0x06, "rx_symbol_err" },
{ 0x07, "rx_crc_err" },
{ 0x08, "rx_align_err" },
{ 0x09, "rx_mac_ctrl" },
{ 0x0a, "rx_pause" },
{ 0x0b, "rx_bcast" },
{ 0x0c, "rx_mcast" },
{ 0x0d, "rx_ucast" },
{ 0x0e, "rx_64_or_less" },
{ 0x0f, "rx_65_127" },
{ 0x10, "rx_128_255" },
{ 0x11, "rx_256_511" },
{ 0x12, "rx_512_1023" },
{ 0x13, "rx_1024_1522" },
{ 0x14, "tx" },
{ 0x15, "tx_hi" },
{ 0x16, "tx_late_col" },
{ 0x17, "tx_pause" },
{ 0x18, "tx_bcast" },
{ 0x19, "tx_mcast" },
{ 0x1a, "tx_ucast" },
{ 0x1b, "tx_deferred" },
{ 0x1c, "tx_total_col" },
{ 0x1d, "tx_exc_col" },
{ 0x1e, "tx_single_col" },
{ 0x1f, "tx_mult_col" },
{ 0x100, "rx_discards" },
{ 0x101, "tx_discards" },
};
static const struct ksz_mib_names ksz9477_mib_names[] = {
{ 0x00, "rx_hi" },
{ 0x01, "rx_undersize" },
{ 0x02, "rx_fragments" },
{ 0x03, "rx_oversize" },
{ 0x04, "rx_jabbers" },
{ 0x05, "rx_symbol_err" },
{ 0x06, "rx_crc_err" },
{ 0x07, "rx_align_err" },
{ 0x08, "rx_mac_ctrl" },
{ 0x09, "rx_pause" },
{ 0x0A, "rx_bcast" },
{ 0x0B, "rx_mcast" },
{ 0x0C, "rx_ucast" },
{ 0x0D, "rx_64_or_less" },
{ 0x0E, "rx_65_127" },
{ 0x0F, "rx_128_255" },
{ 0x10, "rx_256_511" },
{ 0x11, "rx_512_1023" },
{ 0x12, "rx_1024_1522" },
{ 0x13, "rx_1523_2000" },
{ 0x14, "rx_2001" },
{ 0x15, "tx_hi" },
{ 0x16, "tx_late_col" },
{ 0x17, "tx_pause" },
{ 0x18, "tx_bcast" },
{ 0x19, "tx_mcast" },
{ 0x1A, "tx_ucast" },
{ 0x1B, "tx_deferred" },
{ 0x1C, "tx_total_col" },
{ 0x1D, "tx_exc_col" },
{ 0x1E, "tx_single_col" },
{ 0x1F, "tx_mult_col" },
{ 0x80, "rx_total" },
{ 0x81, "tx_total" },
{ 0x82, "rx_discards" },
{ 0x83, "tx_discards" },
};
static const struct ksz_dev_ops ksz8_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
.cfg_port_member = ksz8_cfg_port_member,
.flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
.port_setup = ksz8_port_setup,
.r_phy = ksz8_r_phy,
.w_phy = ksz8_w_phy,
.r_mib_cnt = ksz8_r_mib_cnt,
.r_mib_pkt = ksz8_r_mib_pkt,
.r_mib_stat64 = ksz88xx_r_mib_stats64,
.freeze_mib = ksz8_freeze_mib,
.port_init_cnt = ksz8_port_init_cnt,
.fdb_dump = ksz8_fdb_dump,
.fdb_add = ksz8_fdb_add,
.fdb_del = ksz8_fdb_del,
.mdb_add = ksz8_mdb_add,
.mdb_del = ksz8_mdb_del,
.vlan_filtering = ksz8_port_vlan_filtering,
.vlan_add = ksz8_port_vlan_add,
.vlan_del = ksz8_port_vlan_del,
.mirror_add = ksz8_port_mirror_add,
.mirror_del = ksz8_port_mirror_del,
.get_caps = ksz8_get_caps,
.config_cpu_port = ksz8_config_cpu_port,
.enable_stp_addr = ksz8_enable_stp_addr,
.reset = ksz8_reset_switch,
.init = ksz8_switch_init,
.exit = ksz8_switch_exit,
.change_mtu = ksz8_change_mtu,
};
static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev, int speed,
int duplex, bool tx_pause,
bool rx_pause);
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = ksz9477_port_setup,
.set_ageing_time = ksz9477_set_ageing_time,
.r_phy = ksz9477_r_phy,
.w_phy = ksz9477_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
.r_mib_stat64 = ksz_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.vlan_filtering = ksz9477_port_vlan_filtering,
.vlan_add = ksz9477_port_vlan_add,
.vlan_del = ksz9477_port_vlan_del,
.mirror_add = ksz9477_port_mirror_add,
.mirror_del = ksz9477_port_mirror_del,
.get_caps = ksz9477_get_caps,
.fdb_dump = ksz9477_fdb_dump,
.fdb_add = ksz9477_fdb_add,
.fdb_del = ksz9477_fdb_del,
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
.tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
.exit = ksz9477_switch_exit,
};
static const struct ksz_dev_ops lan937x_dev_ops = {
.setup = lan937x_setup,
.teardown = lan937x_teardown,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = lan937x_port_setup,
.set_ageing_time = lan937x_set_ageing_time,
.r_phy = lan937x_r_phy,
.w_phy = lan937x_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
.r_mib_stat64 = ksz_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.vlan_filtering = ksz9477_port_vlan_filtering,
.vlan_add = ksz9477_port_vlan_add,
.vlan_del = ksz9477_port_vlan_del,
.mirror_add = ksz9477_port_mirror_add,
.mirror_del = ksz9477_port_mirror_del,
.get_caps = lan937x_phylink_get_caps,
.setup_rgmii_delay = lan937x_setup_rgmii_delay,
.fdb_dump = ksz9477_fdb_dump,
.fdb_add = ksz9477_fdb_add,
.fdb_del = ksz9477_fdb_del,
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
.init = lan937x_switch_init,
.exit = lan937x_switch_exit,
};
static const u16 ksz8795_regs[] = {
[REG_IND_CTRL_0] = 0x6E,
[REG_IND_DATA_8] = 0x70,
[REG_IND_DATA_CHECK] = 0x72,
[REG_IND_DATA_HI] = 0x71,
[REG_IND_DATA_LO] = 0x75,
[REG_IND_MIB_CHECK] = 0x74,
[REG_IND_BYTE] = 0xA0,
[P_FORCE_CTRL] = 0x0C,
[P_LINK_STATUS] = 0x0E,
[P_LOCAL_CTRL] = 0x07,
[P_NEG_RESTART_CTRL] = 0x0D,
[P_REMOTE_STATUS] = 0x08,
[P_SPEED_STATUS] = 0x09,
[S_TAIL_TAG_CTRL] = 0x0C,
[P_STP_CTRL] = 0x02,
[S_START_CTRL] = 0x01,
[S_BROADCAST_CTRL] = 0x06,
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
[P_XMII_CTRL_1] = 0x06,
};
static const u32 ksz8795_masks[] = {
[PORT_802_1P_REMAPPING] = BIT(7),
[SW_TAIL_TAG_ENABLE] = BIT(1),
[MIB_COUNTER_OVERFLOW] = BIT(6),
[MIB_COUNTER_VALID] = BIT(5),
[VLAN_TABLE_FID] = GENMASK(6, 0),
[VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
[VLAN_TABLE_VALID] = BIT(12),
[STATIC_MAC_TABLE_VALID] = BIT(21),
[STATIC_MAC_TABLE_USE_FID] = BIT(23),
[STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
[STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
[DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
[DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
[DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
[DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(5),
};
static const u8 ksz8795_xmii_ctrl0[] = {
[P_MII_100MBIT] = 0,
[P_MII_10MBIT] = 1,
[P_MII_FULL_DUPLEX] = 0,
[P_MII_HALF_DUPLEX] = 1,
};
static const u8 ksz8795_xmii_ctrl1[] = {
[P_RGMII_SEL] = 3,
[P_GMII_SEL] = 2,
[P_RMII_SEL] = 1,
[P_MII_SEL] = 0,
[P_GMII_1GBIT] = 1,
[P_GMII_NOT_1GBIT] = 0,
};
static const u8 ksz8795_shifts[] = {
[VLAN_TABLE_MEMBERSHIP_S] = 7,
[VLAN_TABLE] = 16,
[STATIC_MAC_FWD_PORTS] = 16,
[STATIC_MAC_FID] = 24,
[DYNAMIC_MAC_ENTRIES_H] = 3,
[DYNAMIC_MAC_ENTRIES] = 29,
[DYNAMIC_MAC_FID] = 16,
[DYNAMIC_MAC_TIMESTAMP] = 27,
[DYNAMIC_MAC_SRC_PORT] = 24,
};
static const u16 ksz8863_regs[] = {
[REG_IND_CTRL_0] = 0x79,
[REG_IND_DATA_8] = 0x7B,
[REG_IND_DATA_CHECK] = 0x7B,
[REG_IND_DATA_HI] = 0x7C,
[REG_IND_DATA_LO] = 0x80,
[REG_IND_MIB_CHECK] = 0x80,
[P_FORCE_CTRL] = 0x0C,
[P_LINK_STATUS] = 0x0E,
[P_LOCAL_CTRL] = 0x0C,
[P_NEG_RESTART_CTRL] = 0x0D,
[P_REMOTE_STATUS] = 0x0E,
[P_SPEED_STATUS] = 0x0F,
[S_TAIL_TAG_CTRL] = 0x03,
[P_STP_CTRL] = 0x02,
[S_START_CTRL] = 0x01,
[S_BROADCAST_CTRL] = 0x06,
[S_MULTICAST_CTRL] = 0x04,
};
static const u32 ksz8863_masks[] = {
[PORT_802_1P_REMAPPING] = BIT(3),
[SW_TAIL_TAG_ENABLE] = BIT(6),
[MIB_COUNTER_OVERFLOW] = BIT(7),
[MIB_COUNTER_VALID] = BIT(6),
[VLAN_TABLE_FID] = GENMASK(15, 12),
[VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
[VLAN_TABLE_VALID] = BIT(19),
[STATIC_MAC_TABLE_VALID] = BIT(19),
[STATIC_MAC_TABLE_USE_FID] = BIT(21),
[STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
[STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
[DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
[DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
[DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
[DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
};
static u8 ksz8863_shifts[] = {
[VLAN_TABLE_MEMBERSHIP_S] = 16,
[STATIC_MAC_FWD_PORTS] = 16,
[STATIC_MAC_FID] = 22,
[DYNAMIC_MAC_ENTRIES_H] = 8,
[DYNAMIC_MAC_ENTRIES] = 24,
[DYNAMIC_MAC_FID] = 16,
[DYNAMIC_MAC_TIMESTAMP] = 22,
[DYNAMIC_MAC_SRC_PORT] = 20,
};
static const u16 ksz9477_regs[] = {
[P_STP_CTRL] = 0x0B04,
[S_START_CTRL] = 0x0300,
[S_BROADCAST_CTRL] = 0x0332,
[S_MULTICAST_CTRL] = 0x0331,
[P_XMII_CTRL_0] = 0x0300,
[P_XMII_CTRL_1] = 0x0301,
};
static const u32 ksz9477_masks[] = {
[ALU_STAT_WRITE] = 0,
[ALU_STAT_READ] = 1,
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
static const u8 ksz9477_shifts[] = {
[ALU_STAT_INDEX] = 16,
};
static const u8 ksz9477_xmii_ctrl0[] = {
[P_MII_100MBIT] = 1,
[P_MII_10MBIT] = 0,
[P_MII_FULL_DUPLEX] = 1,
[P_MII_HALF_DUPLEX] = 0,
};
static const u8 ksz9477_xmii_ctrl1[] = {
[P_RGMII_SEL] = 0,
[P_RMII_SEL] = 1,
[P_GMII_SEL] = 2,
[P_MII_SEL] = 3,
[P_GMII_1GBIT] = 0,
[P_GMII_NOT_1GBIT] = 1,
};
static const u32 lan937x_masks[] = {
[ALU_STAT_WRITE] = 1,
[ALU_STAT_READ] = 2,
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
static const u8 lan937x_shifts[] = {
[ALU_STAT_INDEX] = 8,
};
static const struct regmap_range ksz8563_valid_regs[] = {
regmap_reg_range(0x0000, 0x0003),
regmap_reg_range(0x0006, 0x0006),
regmap_reg_range(0x000f, 0x001f),
regmap_reg_range(0x0100, 0x0100),
regmap_reg_range(0x0104, 0x0107),
regmap_reg_range(0x010d, 0x010d),
regmap_reg_range(0x0110, 0x0113),
regmap_reg_range(0x0120, 0x012b),
regmap_reg_range(0x0201, 0x0201),
regmap_reg_range(0x0210, 0x0213),
regmap_reg_range(0x0300, 0x0300),
regmap_reg_range(0x0302, 0x031b),
regmap_reg_range(0x0320, 0x032b),
regmap_reg_range(0x0330, 0x0336),
regmap_reg_range(0x0338, 0x033e),
regmap_reg_range(0x0340, 0x035f),
regmap_reg_range(0x0370, 0x0370),
regmap_reg_range(0x0378, 0x0378),
regmap_reg_range(0x037c, 0x037d),
regmap_reg_range(0x0390, 0x0393),
regmap_reg_range(0x0400, 0x040e),
regmap_reg_range(0x0410, 0x042f),
regmap_reg_range(0x0500, 0x0519),
regmap_reg_range(0x0520, 0x054b),
regmap_reg_range(0x0550, 0x05b3),
/* port 1 */
regmap_reg_range(0x1000, 0x1001),
regmap_reg_range(0x1004, 0x100b),
regmap_reg_range(0x1013, 0x1013),
regmap_reg_range(0x1017, 0x1017),
regmap_reg_range(0x101b, 0x101b),
regmap_reg_range(0x101f, 0x1021),
regmap_reg_range(0x1030, 0x1030),
regmap_reg_range(0x1100, 0x1111),
regmap_reg_range(0x111a, 0x111d),
regmap_reg_range(0x1122, 0x1127),
regmap_reg_range(0x112a, 0x112b),
regmap_reg_range(0x1136, 0x1139),
regmap_reg_range(0x113e, 0x113f),
regmap_reg_range(0x1400, 0x1401),
regmap_reg_range(0x1403, 0x1403),
regmap_reg_range(0x1410, 0x1417),
regmap_reg_range(0x1420, 0x1423),
regmap_reg_range(0x1500, 0x1507),
regmap_reg_range(0x1600, 0x1612),
regmap_reg_range(0x1800, 0x180f),
regmap_reg_range(0x1900, 0x1907),
regmap_reg_range(0x1914, 0x191b),
regmap_reg_range(0x1a00, 0x1a03),
regmap_reg_range(0x1a04, 0x1a08),
regmap_reg_range(0x1b00, 0x1b01),
regmap_reg_range(0x1b04, 0x1b04),
regmap_reg_range(0x1c00, 0x1c05),
regmap_reg_range(0x1c08, 0x1c1b),
/* port 2 */
regmap_reg_range(0x2000, 0x2001),
regmap_reg_range(0x2004, 0x200b),
regmap_reg_range(0x2013, 0x2013),
regmap_reg_range(0x2017, 0x2017),
regmap_reg_range(0x201b, 0x201b),
regmap_reg_range(0x201f, 0x2021),
regmap_reg_range(0x2030, 0x2030),
regmap_reg_range(0x2100, 0x2111),
regmap_reg_range(0x211a, 0x211d),
regmap_reg_range(0x2122, 0x2127),
regmap_reg_range(0x212a, 0x212b),
regmap_reg_range(0x2136, 0x2139),
regmap_reg_range(0x213e, 0x213f),
regmap_reg_range(0x2400, 0x2401),
regmap_reg_range(0x2403, 0x2403),
regmap_reg_range(0x2410, 0x2417),
regmap_reg_range(0x2420, 0x2423),
regmap_reg_range(0x2500, 0x2507),
regmap_reg_range(0x2600, 0x2612),
regmap_reg_range(0x2800, 0x280f),
regmap_reg_range(0x2900, 0x2907),
regmap_reg_range(0x2914, 0x291b),
regmap_reg_range(0x2a00, 0x2a03),
regmap_reg_range(0x2a04, 0x2a08),
regmap_reg_range(0x2b00, 0x2b01),
regmap_reg_range(0x2b04, 0x2b04),
regmap_reg_range(0x2c00, 0x2c05),
regmap_reg_range(0x2c08, 0x2c1b),
/* port 3 */
regmap_reg_range(0x3000, 0x3001),
regmap_reg_range(0x3004, 0x300b),
regmap_reg_range(0x3013, 0x3013),
regmap_reg_range(0x3017, 0x3017),
regmap_reg_range(0x301b, 0x301b),
regmap_reg_range(0x301f, 0x3021),
regmap_reg_range(0x3030, 0x3030),
regmap_reg_range(0x3300, 0x3301),
regmap_reg_range(0x3303, 0x3303),
regmap_reg_range(0x3400, 0x3401),
regmap_reg_range(0x3403, 0x3403),
regmap_reg_range(0x3410, 0x3417),
regmap_reg_range(0x3420, 0x3423),
regmap_reg_range(0x3500, 0x3507),
regmap_reg_range(0x3600, 0x3612),
regmap_reg_range(0x3800, 0x380f),
regmap_reg_range(0x3900, 0x3907),
regmap_reg_range(0x3914, 0x391b),
regmap_reg_range(0x3a00, 0x3a03),
regmap_reg_range(0x3a04, 0x3a08),
regmap_reg_range(0x3b00, 0x3b01),
regmap_reg_range(0x3b04, 0x3b04),
regmap_reg_range(0x3c00, 0x3c05),
regmap_reg_range(0x3c08, 0x3c1b),
};
static const struct regmap_access_table ksz8563_register_set = {
.yes_ranges = ksz8563_valid_regs,
.n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
};
static const struct regmap_range ksz9477_valid_regs[] = {
regmap_reg_range(0x0000, 0x0003),
regmap_reg_range(0x0006, 0x0006),
regmap_reg_range(0x0010, 0x001f),
regmap_reg_range(0x0100, 0x0100),
regmap_reg_range(0x0103, 0x0107),
regmap_reg_range(0x010d, 0x010d),
regmap_reg_range(0x0110, 0x0113),
regmap_reg_range(0x0120, 0x012b),
regmap_reg_range(0x0201, 0x0201),
regmap_reg_range(0x0210, 0x0213),
regmap_reg_range(0x0300, 0x0300),
regmap_reg_range(0x0302, 0x031b),
regmap_reg_range(0x0320, 0x032b),
regmap_reg_range(0x0330, 0x0336),
regmap_reg_range(0x0338, 0x033b),
regmap_reg_range(0x033e, 0x033e),
regmap_reg_range(0x0340, 0x035f),
regmap_reg_range(0x0370, 0x0370),
regmap_reg_range(0x0378, 0x0378),
regmap_reg_range(0x037c, 0x037d),
regmap_reg_range(0x0390, 0x0393),
regmap_reg_range(0x0400, 0x040e),
regmap_reg_range(0x0410, 0x042f),
regmap_reg_range(0x0444, 0x044b),
regmap_reg_range(0x0450, 0x046f),
regmap_reg_range(0x0500, 0x0519),
regmap_reg_range(0x0520, 0x054b),
regmap_reg_range(0x0550, 0x05b3),
regmap_reg_range(0x0604, 0x060b),
regmap_reg_range(0x0610, 0x0612),
regmap_reg_range(0x0614, 0x062c),
regmap_reg_range(0x0640, 0x0645),
regmap_reg_range(0x0648, 0x064d),
/* port 1 */
regmap_reg_range(0x1000, 0x1001),
regmap_reg_range(0x1013, 0x1013),
regmap_reg_range(0x1017, 0x1017),
regmap_reg_range(0x101b, 0x101b),
regmap_reg_range(0x101f, 0x1020),
regmap_reg_range(0x1030, 0x1030),
regmap_reg_range(0x1100, 0x1115),
regmap_reg_range(0x111a, 0x111f),
regmap_reg_range(0x1120, 0x112b),
regmap_reg_range(0x1134, 0x113b),
regmap_reg_range(0x113c, 0x113f),
regmap_reg_range(0x1400, 0x1401),
regmap_reg_range(0x1403, 0x1403),
regmap_reg_range(0x1410, 0x1417),
regmap_reg_range(0x1420, 0x1423),
regmap_reg_range(0x1500, 0x1507),
regmap_reg_range(0x1600, 0x1613),
regmap_reg_range(0x1800, 0x180f),
regmap_reg_range(0x1820, 0x1827),
regmap_reg_range(0x1830, 0x1837),
regmap_reg_range(0x1840, 0x184b),
regmap_reg_range(0x1900, 0x1907),
regmap_reg_range(0x1914, 0x191b),
regmap_reg_range(0x1920, 0x1920),
regmap_reg_range(0x1923, 0x1927),
regmap_reg_range(0x1a00, 0x1a03),
regmap_reg_range(0x1a04, 0x1a07),
regmap_reg_range(0x1b00, 0x1b01),
regmap_reg_range(0x1b04, 0x1b04),
regmap_reg_range(0x1c00, 0x1c05),
regmap_reg_range(0x1c08, 0x1c1b),
/* port 2 */
regmap_reg_range(0x2000, 0x2001),
regmap_reg_range(0x2013, 0x2013),
regmap_reg_range(0x2017, 0x2017),
regmap_reg_range(0x201b, 0x201b),
regmap_reg_range(0x201f, 0x2020),
regmap_reg_range(0x2030, 0x2030),
regmap_reg_range(0x2100, 0x2115),
regmap_reg_range(0x211a, 0x211f),
regmap_reg_range(0x2120, 0x212b),
regmap_reg_range(0x2134, 0x213b),
regmap_reg_range(0x213c, 0x213f),
regmap_reg_range(0x2400, 0x2401),
regmap_reg_range(0x2403, 0x2403),
regmap_reg_range(0x2410, 0x2417),
regmap_reg_range(0x2420, 0x2423),
regmap_reg_range(0x2500, 0x2507),
regmap_reg_range(0x2600, 0x2613),
regmap_reg_range(0x2800, 0x280f),
regmap_reg_range(0x2820, 0x2827),
regmap_reg_range(0x2830, 0x2837),
regmap_reg_range(0x2840, 0x284b),
regmap_reg_range(0x2900, 0x2907),
regmap_reg_range(0x2914, 0x291b),
regmap_reg_range(0x2920, 0x2920),
regmap_reg_range(0x2923, 0x2927),
regmap_reg_range(0x2a00, 0x2a03),
regmap_reg_range(0x2a04, 0x2a07),
regmap_reg_range(0x2b00, 0x2b01),
regmap_reg_range(0x2b04, 0x2b04),
regmap_reg_range(0x2c00, 0x2c05),
regmap_reg_range(0x2c08, 0x2c1b),
/* port 3 */
regmap_reg_range(0x3000, 0x3001),
regmap_reg_range(0x3013, 0x3013),
regmap_reg_range(0x3017, 0x3017),
regmap_reg_range(0x301b, 0x301b),
regmap_reg_range(0x301f, 0x3020),
regmap_reg_range(0x3030, 0x3030),
regmap_reg_range(0x3100, 0x3115),
regmap_reg_range(0x311a, 0x311f),
regmap_reg_range(0x3120, 0x312b),
regmap_reg_range(0x3134, 0x313b),
regmap_reg_range(0x313c, 0x313f),
regmap_reg_range(0x3400, 0x3401),
regmap_reg_range(0x3403, 0x3403),
regmap_reg_range(0x3410, 0x3417),
regmap_reg_range(0x3420, 0x3423),
regmap_reg_range(0x3500, 0x3507),
regmap_reg_range(0x3600, 0x3613),
regmap_reg_range(0x3800, 0x380f),
regmap_reg_range(0x3820, 0x3827),
regmap_reg_range(0x3830, 0x3837),
regmap_reg_range(0x3840, 0x384b),
regmap_reg_range(0x3900, 0x3907),
regmap_reg_range(0x3914, 0x391b),
regmap_reg_range(0x3920, 0x3920),
regmap_reg_range(0x3923, 0x3927),
regmap_reg_range(0x3a00, 0x3a03),
regmap_reg_range(0x3a04, 0x3a07),
regmap_reg_range(0x3b00, 0x3b01),
regmap_reg_range(0x3b04, 0x3b04),
regmap_reg_range(0x3c00, 0x3c05),
regmap_reg_range(0x3c08, 0x3c1b),
/* port 4 */
regmap_reg_range(0x4000, 0x4001),
regmap_reg_range(0x4013, 0x4013),
regmap_reg_range(0x4017, 0x4017),
regmap_reg_range(0x401b, 0x401b),
regmap_reg_range(0x401f, 0x4020),
regmap_reg_range(0x4030, 0x4030),
regmap_reg_range(0x4100, 0x4115),
regmap_reg_range(0x411a, 0x411f),
regmap_reg_range(0x4120, 0x412b),
regmap_reg_range(0x4134, 0x413b),
regmap_reg_range(0x413c, 0x413f),
regmap_reg_range(0x4400, 0x4401),
regmap_reg_range(0x4403, 0x4403),
regmap_reg_range(0x4410, 0x4417),
regmap_reg_range(0x4420, 0x4423),
regmap_reg_range(0x4500, 0x4507),
regmap_reg_range(0x4600, 0x4613),
regmap_reg_range(0x4800, 0x480f),
regmap_reg_range(0x4820, 0x4827),
regmap_reg_range(0x4830, 0x4837),
regmap_reg_range(0x4840, 0x484b),
regmap_reg_range(0x4900, 0x4907),
regmap_reg_range(0x4914, 0x491b),
regmap_reg_range(0x4920, 0x4920),
regmap_reg_range(0x4923, 0x4927),
regmap_reg_range(0x4a00, 0x4a03),
regmap_reg_range(0x4a04, 0x4a07),
regmap_reg_range(0x4b00, 0x4b01),
regmap_reg_range(0x4b04, 0x4b04),
regmap_reg_range(0x4c00, 0x4c05),
regmap_reg_range(0x4c08, 0x4c1b),
/* port 5 */
regmap_reg_range(0x5000, 0x5001),
regmap_reg_range(0x5013, 0x5013),
regmap_reg_range(0x5017, 0x5017),
regmap_reg_range(0x501b, 0x501b),
regmap_reg_range(0x501f, 0x5020),
regmap_reg_range(0x5030, 0x5030),
regmap_reg_range(0x5100, 0x5115),
regmap_reg_range(0x511a, 0x511f),
regmap_reg_range(0x5120, 0x512b),
regmap_reg_range(0x5134, 0x513b),
regmap_reg_range(0x513c, 0x513f),
regmap_reg_range(0x5400, 0x5401),
regmap_reg_range(0x5403, 0x5403),
regmap_reg_range(0x5410, 0x5417),
regmap_reg_range(0x5420, 0x5423),
regmap_reg_range(0x5500, 0x5507),
regmap_reg_range(0x5600, 0x5613),
regmap_reg_range(0x5800, 0x580f),
regmap_reg_range(0x5820, 0x5827),
regmap_reg_range(0x5830, 0x5837),
regmap_reg_range(0x5840, 0x584b),
regmap_reg_range(0x5900, 0x5907),
regmap_reg_range(0x5914, 0x591b),
regmap_reg_range(0x5920, 0x5920),
regmap_reg_range(0x5923, 0x5927),
regmap_reg_range(0x5a00, 0x5a03),
regmap_reg_range(0x5a04, 0x5a07),
regmap_reg_range(0x5b00, 0x5b01),
regmap_reg_range(0x5b04, 0x5b04),
regmap_reg_range(0x5c00, 0x5c05),
regmap_reg_range(0x5c08, 0x5c1b),
/* port 6 */
regmap_reg_range(0x6000, 0x6001),
regmap_reg_range(0x6013, 0x6013),
regmap_reg_range(0x6017, 0x6017),
regmap_reg_range(0x601b, 0x601b),
regmap_reg_range(0x601f, 0x6020),
regmap_reg_range(0x6030, 0x6030),
regmap_reg_range(0x6300, 0x6301),
regmap_reg_range(0x6400, 0x6401),
regmap_reg_range(0x6403, 0x6403),
regmap_reg_range(0x6410, 0x6417),
regmap_reg_range(0x6420, 0x6423),
regmap_reg_range(0x6500, 0x6507),
regmap_reg_range(0x6600, 0x6613),
regmap_reg_range(0x6800, 0x680f),
regmap_reg_range(0x6820, 0x6827),
regmap_reg_range(0x6830, 0x6837),
regmap_reg_range(0x6840, 0x684b),
regmap_reg_range(0x6900, 0x6907),
regmap_reg_range(0x6914, 0x691b),
regmap_reg_range(0x6920, 0x6920),
regmap_reg_range(0x6923, 0x6927),
regmap_reg_range(0x6a00, 0x6a03),
regmap_reg_range(0x6a04, 0x6a07),
regmap_reg_range(0x6b00, 0x6b01),
regmap_reg_range(0x6b04, 0x6b04),
regmap_reg_range(0x6c00, 0x6c05),
regmap_reg_range(0x6c08, 0x6c1b),
/* port 7 */
regmap_reg_range(0x7000, 0x7001),
regmap_reg_range(0x7013, 0x7013),
regmap_reg_range(0x7017, 0x7017),
regmap_reg_range(0x701b, 0x701b),
regmap_reg_range(0x701f, 0x7020),
regmap_reg_range(0x7030, 0x7030),
regmap_reg_range(0x7200, 0x7203),
regmap_reg_range(0x7206, 0x7207),
regmap_reg_range(0x7300, 0x7301),
regmap_reg_range(0x7400, 0x7401),
regmap_reg_range(0x7403, 0x7403),
regmap_reg_range(0x7410, 0x7417),
regmap_reg_range(0x7420, 0x7423),
regmap_reg_range(0x7500, 0x7507),
regmap_reg_range(0x7600, 0x7613),
regmap_reg_range(0x7800, 0x780f),
regmap_reg_range(0x7820, 0x7827),
regmap_reg_range(0x7830, 0x7837),
regmap_reg_range(0x7840, 0x784b),
regmap_reg_range(0x7900, 0x7907),
regmap_reg_range(0x7914, 0x791b),
regmap_reg_range(0x7920, 0x7920),
regmap_reg_range(0x7923, 0x7927),
regmap_reg_range(0x7a00, 0x7a03),
regmap_reg_range(0x7a04, 0x7a07),
regmap_reg_range(0x7b00, 0x7b01),
regmap_reg_range(0x7b04, 0x7b04),
regmap_reg_range(0x7c00, 0x7c05),
regmap_reg_range(0x7c08, 0x7c1b),
};
static const struct regmap_access_table ksz9477_register_set = {
.yes_ranges = ksz9477_valid_regs,
.n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
};
static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x0000, 0x0003),
regmap_reg_range(0x0006, 0x0006),
regmap_reg_range(0x0010, 0x001f),
regmap_reg_range(0x0100, 0x0100),
regmap_reg_range(0x0103, 0x0107),
regmap_reg_range(0x010d, 0x010d),
regmap_reg_range(0x0110, 0x0113),
regmap_reg_range(0x0120, 0x0127),
regmap_reg_range(0x0201, 0x0201),
regmap_reg_range(0x0210, 0x0213),
regmap_reg_range(0x0300, 0x0300),
regmap_reg_range(0x0302, 0x030b),
regmap_reg_range(0x0310, 0x031b),
regmap_reg_range(0x0320, 0x032b),
regmap_reg_range(0x0330, 0x0336),
regmap_reg_range(0x0338, 0x033b),
regmap_reg_range(0x033e, 0x033e),
regmap_reg_range(0x0340, 0x035f),
regmap_reg_range(0x0370, 0x0370),
regmap_reg_range(0x0378, 0x0378),
regmap_reg_range(0x037c, 0x037d),
regmap_reg_range(0x0390, 0x0393),
regmap_reg_range(0x0400, 0x040e),
regmap_reg_range(0x0410, 0x042f),
/* port 1 */
regmap_reg_range(0x1000, 0x1001),
regmap_reg_range(0x1013, 0x1013),
regmap_reg_range(0x1017, 0x1017),
regmap_reg_range(0x101b, 0x101b),
regmap_reg_range(0x101f, 0x1020),
regmap_reg_range(0x1030, 0x1030),
regmap_reg_range(0x1100, 0x1115),
regmap_reg_range(0x111a, 0x111f),
regmap_reg_range(0x1122, 0x1127),
regmap_reg_range(0x112a, 0x112b),
regmap_reg_range(0x1136, 0x1139),
regmap_reg_range(0x113e, 0x113f),
regmap_reg_range(0x1400, 0x1401),
regmap_reg_range(0x1403, 0x1403),
regmap_reg_range(0x1410, 0x1417),
regmap_reg_range(0x1420, 0x1423),
regmap_reg_range(0x1500, 0x1507),
regmap_reg_range(0x1600, 0x1612),
regmap_reg_range(0x1800, 0x180f),
regmap_reg_range(0x1820, 0x1827),
regmap_reg_range(0x1830, 0x1837),
regmap_reg_range(0x1840, 0x184b),
regmap_reg_range(0x1900, 0x1907),
regmap_reg_range(0x1914, 0x1915),
regmap_reg_range(0x1a00, 0x1a03),
regmap_reg_range(0x1a04, 0x1a07),
regmap_reg_range(0x1b00, 0x1b01),
regmap_reg_range(0x1b04, 0x1b04),
/* port 2 */
regmap_reg_range(0x2000, 0x2001),
regmap_reg_range(0x2013, 0x2013),
regmap_reg_range(0x2017, 0x2017),
regmap_reg_range(0x201b, 0x201b),
regmap_reg_range(0x201f, 0x2020),
regmap_reg_range(0x2030, 0x2030),
regmap_reg_range(0x2100, 0x2115),
regmap_reg_range(0x211a, 0x211f),
regmap_reg_range(0x2122, 0x2127),
regmap_reg_range(0x212a, 0x212b),
regmap_reg_range(0x2136, 0x2139),
regmap_reg_range(0x213e, 0x213f),
regmap_reg_range(0x2400, 0x2401),
regmap_reg_range(0x2403, 0x2403),
regmap_reg_range(0x2410, 0x2417),
regmap_reg_range(0x2420, 0x2423),
regmap_reg_range(0x2500, 0x2507),
regmap_reg_range(0x2600, 0x2612),
regmap_reg_range(0x2800, 0x280f),
regmap_reg_range(0x2820, 0x2827),
regmap_reg_range(0x2830, 0x2837),
regmap_reg_range(0x2840, 0x284b),
regmap_reg_range(0x2900, 0x2907),
regmap_reg_range(0x2914, 0x2915),
regmap_reg_range(0x2a00, 0x2a03),
regmap_reg_range(0x2a04, 0x2a07),
regmap_reg_range(0x2b00, 0x2b01),
regmap_reg_range(0x2b04, 0x2b04),
/* port 3 */
regmap_reg_range(0x3000, 0x3001),
regmap_reg_range(0x3013, 0x3013),
regmap_reg_range(0x3017, 0x3017),
regmap_reg_range(0x301b, 0x301b),
regmap_reg_range(0x301f, 0x3020),
regmap_reg_range(0x3030, 0x3030),
regmap_reg_range(0x3100, 0x3115),
regmap_reg_range(0x311a, 0x311f),
regmap_reg_range(0x3122, 0x3127),
regmap_reg_range(0x312a, 0x312b),
regmap_reg_range(0x3136, 0x3139),
regmap_reg_range(0x313e, 0x313f),
regmap_reg_range(0x3400, 0x3401),
regmap_reg_range(0x3403, 0x3403),
regmap_reg_range(0x3410, 0x3417),
regmap_reg_range(0x3420, 0x3423),
regmap_reg_range(0x3500, 0x3507),
regmap_reg_range(0x3600, 0x3612),
regmap_reg_range(0x3800, 0x380f),
regmap_reg_range(0x3820, 0x3827),
regmap_reg_range(0x3830, 0x3837),
regmap_reg_range(0x3840, 0x384b),
regmap_reg_range(0x3900, 0x3907),
regmap_reg_range(0x3914, 0x3915),
regmap_reg_range(0x3a00, 0x3a03),
regmap_reg_range(0x3a04, 0x3a07),
regmap_reg_range(0x3b00, 0x3b01),
regmap_reg_range(0x3b04, 0x3b04),
/* port 4 */
regmap_reg_range(0x4000, 0x4001),
regmap_reg_range(0x4013, 0x4013),
regmap_reg_range(0x4017, 0x4017),
regmap_reg_range(0x401b, 0x401b),
regmap_reg_range(0x401f, 0x4020),
regmap_reg_range(0x4030, 0x4030),
regmap_reg_range(0x4100, 0x4115),
regmap_reg_range(0x411a, 0x411f),
regmap_reg_range(0x4122, 0x4127),
regmap_reg_range(0x412a, 0x412b),
regmap_reg_range(0x4136, 0x4139),
regmap_reg_range(0x413e, 0x413f),
regmap_reg_range(0x4400, 0x4401),
regmap_reg_range(0x4403, 0x4403),
regmap_reg_range(0x4410, 0x4417),
regmap_reg_range(0x4420, 0x4423),
regmap_reg_range(0x4500, 0x4507),
regmap_reg_range(0x4600, 0x4612),
regmap_reg_range(0x4800, 0x480f),
regmap_reg_range(0x4820, 0x4827),
regmap_reg_range(0x4830, 0x4837),
regmap_reg_range(0x4840, 0x484b),
regmap_reg_range(0x4900, 0x4907),
regmap_reg_range(0x4914, 0x4915),
regmap_reg_range(0x4a00, 0x4a03),
regmap_reg_range(0x4a04, 0x4a07),
regmap_reg_range(0x4b00, 0x4b01),
regmap_reg_range(0x4b04, 0x4b04),
/* port 5 */
regmap_reg_range(0x5000, 0x5001),
regmap_reg_range(0x5013, 0x5013),
regmap_reg_range(0x5017, 0x5017),
regmap_reg_range(0x501b, 0x501b),
regmap_reg_range(0x501f, 0x5020),
regmap_reg_range(0x5030, 0x5030),
regmap_reg_range(0x5100, 0x5115),
regmap_reg_range(0x511a, 0x511f),
regmap_reg_range(0x5122, 0x5127),
regmap_reg_range(0x512a, 0x512b),
regmap_reg_range(0x5136, 0x5139),
regmap_reg_range(0x513e, 0x513f),
regmap_reg_range(0x5400, 0x5401),
regmap_reg_range(0x5403, 0x5403),
regmap_reg_range(0x5410, 0x5417),
regmap_reg_range(0x5420, 0x5423),
regmap_reg_range(0x5500, 0x5507),
regmap_reg_range(0x5600, 0x5612),
regmap_reg_range(0x5800, 0x580f),
regmap_reg_range(0x5820, 0x5827),
regmap_reg_range(0x5830, 0x5837),
regmap_reg_range(0x5840, 0x584b),
regmap_reg_range(0x5900, 0x5907),
regmap_reg_range(0x5914, 0x5915),
regmap_reg_range(0x5a00, 0x5a03),
regmap_reg_range(0x5a04, 0x5a07),
regmap_reg_range(0x5b00, 0x5b01),
regmap_reg_range(0x5b04, 0x5b04),
/* port 6 */
regmap_reg_range(0x6000, 0x6001),
regmap_reg_range(0x6013, 0x6013),
regmap_reg_range(0x6017, 0x6017),
regmap_reg_range(0x601b, 0x601b),
regmap_reg_range(0x601f, 0x6020),
regmap_reg_range(0x6030, 0x6030),
regmap_reg_range(0x6100, 0x6115),
regmap_reg_range(0x611a, 0x611f),
regmap_reg_range(0x6122, 0x6127),
regmap_reg_range(0x612a, 0x612b),
regmap_reg_range(0x6136, 0x6139),
regmap_reg_range(0x613e, 0x613f),
regmap_reg_range(0x6300, 0x6301),
regmap_reg_range(0x6400, 0x6401),
regmap_reg_range(0x6403, 0x6403),
regmap_reg_range(0x6410, 0x6417),
regmap_reg_range(0x6420, 0x6423),
regmap_reg_range(0x6500, 0x6507),
regmap_reg_range(0x6600, 0x6612),
regmap_reg_range(0x6800, 0x680f),
regmap_reg_range(0x6820, 0x6827),
regmap_reg_range(0x6830, 0x6837),
regmap_reg_range(0x6840, 0x684b),
regmap_reg_range(0x6900, 0x6907),
regmap_reg_range(0x6914, 0x6915),
regmap_reg_range(0x6a00, 0x6a03),
regmap_reg_range(0x6a04, 0x6a07),
regmap_reg_range(0x6b00, 0x6b01),
regmap_reg_range(0x6b04, 0x6b04),
};
static const struct regmap_access_table ksz9896_register_set = {
.yes_ranges = ksz9896_valid_regs,
.n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
};
static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x00, 0x01),
/* global control register */
regmap_reg_range(0x02, 0x0f),
/* port registers */
regmap_reg_range(0x10, 0x1d),
regmap_reg_range(0x1e, 0x1f),
regmap_reg_range(0x20, 0x2d),
regmap_reg_range(0x2e, 0x2f),
regmap_reg_range(0x30, 0x39),
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),
regmap_reg_range(0x79, 0x7a),
regmap_reg_range(0x7b, 0x83),
regmap_reg_range(0x8e, 0x99),
regmap_reg_range(0x9a, 0xa5),
regmap_reg_range(0xa6, 0xa6),
regmap_reg_range(0xa7, 0xaa),
regmap_reg_range(0xab, 0xae),
regmap_reg_range(0xaf, 0xba),
regmap_reg_range(0xbb, 0xbc),
regmap_reg_range(0xbd, 0xbd),
regmap_reg_range(0xc0, 0xc0),
regmap_reg_range(0xc2, 0xc2),
regmap_reg_range(0xc3, 0xc3),
regmap_reg_range(0xc4, 0xc4),
regmap_reg_range(0xc6, 0xc6),
};
static const struct regmap_access_table ksz8873_register_set = {
.yes_ranges = ksz8873_valid_regs,
.n_yes_ranges = ARRAY_SIZE(ksz8873_valid_regs),
};
const struct ksz_chip_data ksz_switch_chips[] = {
[KSZ8563] = {
.chip_id = KSZ8563_CHIP_ID,
.dev_name = "KSZ8563",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {false, false, true},
.wr_table = &ksz8563_register_set,
.rd_table = &ksz8563_register_set,
},
[KSZ8795] = {
.chip_id = KSZ8795_CHIP_ID,
.dev_name = "KSZ8795",
.num_vlans = 4096,
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz8795_regs,
.masks = ksz8795_masks,
.shifts = ksz8795_shifts,
.xmii_ctrl0 = ksz8795_xmii_ctrl0,
.xmii_ctrl1 = ksz8795_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
},
[KSZ8794] = {
/* WARNING
* =======
* KSZ8794 is similar to KSZ8795, except the port map
* contains a gap between external and CPU ports, the
* port map is NOT continuous. The per-port register
* map is shifted accordingly too, i.e. registers at
* offset 0x40 are NOT used on KSZ8794 and they ARE
* used on KSZ8795 for external port 3.
* external cpu
* KSZ8794 0,1,2 4
* KSZ8795 0,1,2,3 4
* KSZ8765 0,1,2,3 4
* port_cnt is configured as 5, even though it is 4
*/
.chip_id = KSZ8794_CHIP_ID,
.dev_name = "KSZ8794",
.num_vlans = 4096,
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz8795_regs,
.masks = ksz8795_masks,
.shifts = ksz8795_shifts,
.xmii_ctrl0 = ksz8795_xmii_ctrl0,
.xmii_ctrl1 = ksz8795_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, false, false},
},
[KSZ8765] = {
.chip_id = KSZ8765_CHIP_ID,
.dev_name = "KSZ8765",
.num_vlans = 4096,
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz8795_regs,
.masks = ksz8795_masks,
.shifts = ksz8795_shifts,
.xmii_ctrl0 = ksz8795_xmii_ctrl0,
.xmii_ctrl1 = ksz8795_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
},
[KSZ8830] = {
.chip_id = KSZ8830_CHIP_ID,
.dev_name = "KSZ8863/KSZ8873",
.num_vlans = 16,
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
.num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz8863_regs,
.masks = ksz8863_masks,
.shifts = ksz8863_shifts,
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.internal_phy = {true, true, false},
.wr_table = &ksz8873_register_set,
.rd_table = &ksz8873_register_set,
},
[KSZ9477] = {
.chip_id = KSZ9477_CHIP_ID,
.dev_name = "KSZ9477",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 4,
.num_tx_queues = 4,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true, false},
.supports_rmii = {false, false, false, false,
false, true, false},
.supports_rgmii = {false, false, false, false,
false, true, false},
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
[KSZ9896] = {
.chip_id = KSZ9896_CHIP_ID,
.dev_name = "KSZ9896",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x3F, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true},
.supports_rmii = {false, false, false, false,
false, true},
.supports_rgmii = {false, false, false, false,
false, true},
.internal_phy = {true, true, true, true,
true, false},
.gbit_capable = {true, true, true, true, true, true},
.wr_table = &ksz9896_register_set,
.rd_table = &ksz9896_register_set,
},
[KSZ9897] = {
.chip_id = KSZ9897_CHIP_ID,
.dev_name = "KSZ9897",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true, true},
.supports_rmii = {false, false, false, false,
false, true, true},
.supports_rgmii = {false, false, false, false,
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
},
[KSZ9893] = {
.chip_id = KSZ9893_CHIP_ID,
.dev_name = "KSZ9893",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 2,
.num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
},
[KSZ9563] = {
.chip_id = KSZ9563_CHIP_ID,
.dev_name = "KSZ9563",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
.supports_mii = {false, false, true},
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
},
[KSZ9567] = {
.chip_id = KSZ9567_CHIP_ID,
.dev_name = "KSZ9567",
.num_vlans = 4096,
.num_alus = 4096,
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 3,
.num_tx_queues = 4,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = ksz9477_masks,
.shifts = ksz9477_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
false, true, true},
.supports_rmii = {false, false, false, false,
false, true, true},
.supports_rgmii = {false, false, false, false,
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
},
[LAN9370] = {
.chip_id = LAN9370_CHIP_ID,
.dev_name = "LAN9370",
.num_vlans = 4096,
.num_alus = 1024,
.num_statics = 256,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = lan937x_masks,
.shifts = lan937x_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false, true},
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
},
[LAN9371] = {
.chip_id = LAN9371_CHIP_ID,
.dev_name = "LAN9371",
.num_vlans = 4096,
.num_alus = 1024,
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = lan937x_masks,
.shifts = lan937x_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false, true, true},
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
.internal_phy = {true, true, true, true, false, false},
},
[LAN9372] = {
.chip_id = LAN9372_CHIP_ID,
.dev_name = "LAN9372",
.num_vlans = 4096,
.num_alus = 1024,
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = lan937x_masks,
.shifts = lan937x_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
true, true, false, false},
.supports_rgmii = {false, false, false, false,
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
},
[LAN9373] = {
.chip_id = LAN9373_CHIP_ID,
.dev_name = "LAN9373",
.num_vlans = 4096,
.num_alus = 1024,
.num_statics = 256,
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = lan937x_masks,
.shifts = lan937x_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
true, true, false, false},
.supports_rgmii = {false, false, false, false,
true, true, false, false},
.internal_phy = {true, true, true, false,
false, false, true, true},
},
[LAN9374] = {
.chip_id = LAN9374_CHIP_ID,
.dev_name = "LAN9374",
.num_vlans = 4096,
.num_alus = 1024,
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
.tc_cbs_supported = true,
.tc_ets_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
.regs = ksz9477_regs,
.masks = lan937x_masks,
.shifts = lan937x_shifts,
.xmii_ctrl0 = ksz9477_xmii_ctrl0,
.xmii_ctrl1 = ksz9477_xmii_ctrl1,
.supports_mii = {false, false, false, false,
true, true, false, false},
.supports_rmii = {false, false, false, false,
true, true, false, false},
.supports_rgmii = {false, false, false, false,
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
},
};
EXPORT_SYMBOL_GPL(ksz_switch_chips);
static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
{
int i;
for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
const struct ksz_chip_data *chip = &ksz_switch_chips[i];
if (chip->chip_id == prod_num)
return chip;
}
return NULL;
}
static int ksz_check_device_id(struct ksz_device *dev)
{
const struct ksz_chip_data *dt_chip_data;
dt_chip_data = of_device_get_match_data(dev->dev);
/* Check for Device Tree and Chip ID */
if (dt_chip_data->chip_id != dev->chip_id) {
dev_err(dev->dev,
"Device tree specifies chip %s but found %s, please fix it!\n",
dt_chip_data->dev_name, dev->info->dev_name);
return -ENODEV;
}
return 0;
}
static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct ksz_device *dev = ds->priv;
if (dev->info->supports_mii[port])
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
if (dev->info->supports_rmii[port])
__set_bit(PHY_INTERFACE_MODE_RMII,
config->supported_interfaces);
if (dev->info->supports_rgmii[port])
phy_interface_set_rgmii(config->supported_interfaces);
if (dev->info->internal_phy[port]) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
/* Compatibility for phylib's default interface type when the
* phy-mode property is absent
*/
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
}
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
}
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
{
struct ethtool_pause_stats *pstats;
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
pstats = &mib->pause_stats;
raw = (struct ksz_stats_raw *)mib->counters;
spin_lock(&mib->stats64_lock);
stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
raw->rx_pause;
stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
raw->tx_pause;
/* HW counters are counting bytes + FCS which is not acceptable
* for rtnl_link_stats64 interface
*/
stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
raw->rx_oversize;
stats->rx_crc_errors = raw->rx_crc_err;
stats->rx_frame_errors = raw->rx_align_err;
stats->rx_dropped = raw->rx_discards;
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors + stats->rx_dropped;
stats->tx_window_errors = raw->tx_late_col;
stats->tx_fifo_errors = raw->tx_discards;
stats->tx_aborted_errors = raw->tx_exc_col;
stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
stats->tx_aborted_errors;
stats->multicast = raw->rx_mcast;
stats->collisions = raw->tx_total_col;
pstats->tx_pause_frames = raw->tx_pause;
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
}
void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
{
struct ethtool_pause_stats *pstats;
struct rtnl_link_stats64 *stats;
struct ksz88xx_stats_raw *raw;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
pstats = &mib->pause_stats;
raw = (struct ksz88xx_stats_raw *)mib->counters;
spin_lock(&mib->stats64_lock);
stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
raw->rx_pause;
stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
raw->tx_pause;
/* HW counters are counting bytes + FCS which is not acceptable
* for rtnl_link_stats64 interface
*/
stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN;
stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN;
stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
raw->rx_oversize;
stats->rx_crc_errors = raw->rx_crc_err;
stats->rx_frame_errors = raw->rx_align_err;
stats->rx_dropped = raw->rx_discards;
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors + stats->rx_dropped;
stats->tx_window_errors = raw->tx_late_col;
stats->tx_fifo_errors = raw->tx_discards;
stats->tx_aborted_errors = raw->tx_exc_col;
stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
stats->tx_aborted_errors;
stats->multicast = raw->rx_mcast;
stats->collisions = raw->tx_total_col;
pstats->tx_pause_frames = raw->tx_pause;
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
}
static void ksz_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
struct ksz_device *dev = ds->priv;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
spin_lock(&mib->stats64_lock);
memcpy(s, &mib->stats64, sizeof(*s));
spin_unlock(&mib->stats64_lock);
}
static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
struct ethtool_pause_stats *pause_stats)
{
struct ksz_device *dev = ds->priv;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
spin_lock(&mib->stats64_lock);
memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
spin_unlock(&mib->stats64_lock);
}
static void ksz_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *buf)
{
struct ksz_device *dev = ds->priv;
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < dev->info->mib_cnt; i++) {
memcpy(buf + i * ETH_GSTRING_LEN,
dev->info->mib_names[i].string, ETH_GSTRING_LEN);
}
}
static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
u8 port_member = 0, cpu_port;
const struct dsa_port *dp;
int i, j;
if (!dsa_is_user_port(ds, port))
return;
dp = dsa_to_port(ds, port);
cpu_port = BIT(dsa_upstream_port(ds, port));
for (i = 0; i < ds->num_ports; i++) {
const struct dsa_port *other_dp = dsa_to_port(ds, i);
struct ksz_port *other_p = &dev->ports[i];
u8 val = 0;
if (!dsa_is_user_port(ds, i))
continue;
if (port == i)
continue;
if (!dsa_port_bridge_same(dp, other_dp))
continue;
if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
if (p->stp_state == BR_STATE_FORWARDING) {
val |= BIT(port);
port_member |= BIT(i);
}
/* Retain port [i]'s relationship to other ports than [port] */
for (j = 0; j < ds->num_ports; j++) {
const struct dsa_port *third_dp;
struct ksz_port *third_p;
if (j == i)
continue;
if (j == port)
continue;
if (!dsa_is_user_port(ds, j))
continue;
third_p = &dev->ports[j];
if (third_p->stp_state != BR_STATE_FORWARDING)
continue;
third_dp = dsa_to_port(ds, j);
if (dsa_port_bridge_same(other_dp, third_dp))
val |= BIT(j);
}
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
struct ksz_device *dev = bus->priv;
u16 val;
int ret;
ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
if (ret < 0)
return ret;
return val;
}
static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct ksz_device *dev = bus->priv;
return dev->dev_ops->w_phy(dev, addr, regnum, val);
}
static int ksz_irq_phy_setup(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
int phy;
int irq;
int ret;
for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
if (BIT(phy) & ds->phys_mii_mask) {
irq = irq_find_mapping(dev->ports[phy].pirq.domain,
PORT_SRC_PHY_INT);
if (irq < 0) {
ret = irq;
goto out;
}
ds->slave_mii_bus->irq[phy] = irq;
}
}
return 0;
out:
while (phy--)
if (BIT(phy) & ds->phys_mii_mask)
irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
return ret;
}
static void ksz_irq_phy_free(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
int phy;
for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
if (BIT(phy) & ds->phys_mii_mask)
irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
}
static int ksz_mdio_register(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
struct device_node *mdio_np;
struct mii_bus *bus;
int ret;
mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
if (!mdio_np)
return 0;
bus = devm_mdiobus_alloc(ds->dev);
if (!bus) {
of_node_put(mdio_np);
return -ENOMEM;
}
bus->priv = dev;
bus->read = ksz_sw_mdio_read;
bus->write = ksz_sw_mdio_write;
bus->name = "ksz slave smi";
snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
ds->slave_mii_bus = bus;
if (dev->irq > 0) {
ret = ksz_irq_phy_setup(dev);
if (ret) {
of_node_put(mdio_np);
return ret;
}
}
ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
if (ret) {
dev_err(ds->dev, "unable to register MDIO bus %s\n",
bus->id);
if (dev->irq > 0)
ksz_irq_phy_free(dev);
}
of_node_put(mdio_np);
return ret;
}
static void ksz_irq_mask(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
kirq->masked |= BIT(d->hwirq);
}
static void ksz_irq_unmask(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
kirq->masked &= ~BIT(d->hwirq);
}
static void ksz_irq_bus_lock(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
mutex_lock(&kirq->dev->lock_irq);
}
static void ksz_irq_bus_sync_unlock(struct irq_data *d)
{
struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
struct ksz_device *dev = kirq->dev;
int ret;
ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
mutex_unlock(&dev->lock_irq);
}
static const struct irq_chip ksz_irq_chip = {
.name = "ksz-irq",
.irq_mask = ksz_irq_mask,
.irq_unmask = ksz_irq_unmask,
.irq_bus_lock = ksz_irq_bus_lock,
.irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
};
static int ksz_irq_domain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, d->host_data);
irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops ksz_irq_domain_ops = {
.map = ksz_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
static void ksz_irq_free(struct ksz_irq *kirq)
{
int irq, virq;
free_irq(kirq->irq_num, kirq);
for (irq = 0; irq < kirq->nirqs; irq++) {
virq = irq_find_mapping(kirq->domain, irq);
irq_dispose_mapping(virq);
}
irq_domain_remove(kirq->domain);
}
static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
{
struct ksz_irq *kirq = dev_id;
unsigned int nhandled = 0;
struct ksz_device *dev;
unsigned int sub_irq;
u8 data;
int ret;
u8 n;
dev = kirq->dev;
/* Read interrupt status register */
ret = ksz_read8(dev, kirq->reg_status, &data);
if (ret)
goto out;
for (n = 0; n < kirq->nirqs; ++n) {
if (data & BIT(n)) {
sub_irq = irq_find_mapping(kirq->domain, n);
handle_nested_irq(sub_irq);
++nhandled;
}
}
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
{
int ret, n;
kirq->dev = dev;
kirq->masked = ~0;
kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
&ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
for (n = 0; n < kirq->nirqs; n++)
irq_create_mapping(kirq->domain, n);
ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
IRQF_ONESHOT, kirq->name, kirq);
if (ret)
goto out;
return 0;
out:
ksz_irq_free(kirq);
return ret;
}
static int ksz_girq_setup(struct ksz_device *dev)
{
struct ksz_irq *girq = &dev->girq;
girq->nirqs = dev->info->port_cnt;
girq->reg_mask = REG_SW_PORT_INT_MASK__1;
girq->reg_status = REG_SW_PORT_INT_STATUS__1;
snprintf(girq->name, sizeof(girq->name), "global_port_irq");
girq->irq_num = dev->irq;
return ksz_irq_common_setup(dev, girq);
}
static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
{
struct ksz_irq *pirq = &dev->ports[p].pirq;
pirq->nirqs = dev->info->port_nirqs;
pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
if (pirq->irq_num < 0)
return pirq->irq_num;
return ksz_irq_common_setup(dev, pirq);
}
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
struct ksz_port *p;
const u16 *regs;
int ret;
regs = dev->info->regs;
dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
dev->info->num_vlans, GFP_KERNEL);
if (!dev->vlan_cache)
return -ENOMEM;
ret = dev->dev_ops->reset(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
(BROADCAST_STORM_VALUE *
BROADCAST_STORM_PROT_RATE) / 100);
dev->dev_ops->config_cpu_port(ds);
dev->dev_ops->enable_stp_addr(dev);
ds->num_tx_queues = dev->info->num_tx_queues;
regmap_update_bits(ksz_regmap_8(dev), regs[S_MULTICAST_CTRL],
MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
ksz_init_mib_timer(dev);
ds->configure_vlan_while_not_filtering = false;
if (dev->dev_ops->setup) {
ret = dev->dev_ops->setup(ds);
if (ret)
return ret;
}
/* Start with learning disabled on standalone user ports, and enabled
* on the CPU port. In lack of other finer mechanisms, learning on the
* CPU port will avoid flooding bridge local addresses on the network
* in some cases.
*/
p = &dev->ports[dev->cpu_port];
p->learning = true;
if (dev->irq > 0) {
ret = ksz_girq_setup(dev);
if (ret)
return ret;
dsa_switch_for_each_user_port(dp, dev->ds) {
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
goto out_girq;
ret = ksz_ptp_irq_setup(ds, dp->index);
if (ret)
goto out_pirq;
}
}
ret = ksz_ptp_clock_register(ds);
if (ret) {
dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
goto out_ptpirq;
}
ret = ksz_mdio_register(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to register the mdio");
goto out_ptp_clock_unregister;
}
/* start switch */
regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL],
SW_START, SW_START);
return 0;
out_ptp_clock_unregister:
ksz_ptp_clock_unregister(ds);
out_ptpirq:
if (dev->irq > 0)
dsa_switch_for_each_user_port(dp, dev->ds)
ksz_ptp_irq_free(ds, dp->index);
out_pirq:
if (dev->irq > 0)
dsa_switch_for_each_user_port(dp, dev->ds)
ksz_irq_free(&dev->ports[dp->index].pirq);
out_girq:
if (dev->irq > 0)
ksz_irq_free(&dev->girq);
return ret;
}
static void ksz_teardown(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
ksz_ptp_clock_unregister(ds);
if (dev->irq > 0) {
dsa_switch_for_each_user_port(dp, dev->ds) {
ksz_ptp_irq_free(ds, dp->index);
ksz_irq_free(&dev->ports[dp->index].pirq);
}
ksz_irq_free(&dev->girq);
}
if (dev->dev_ops->teardown)
dev->dev_ops->teardown(ds);
}
static void port_r_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
mib->cnt_ptr = 0;
}
static void ksz_mib_read_work(struct work_struct *work)
{
struct ksz_device *dev = container_of(work, struct ksz_device,
mib_read.work);
struct ksz_port_mib *mib;
struct ksz_port *p;
int i;
for (i = 0; i < dev->info->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;
p = &dev->ports[i];
mib = &p->mib;
mutex_lock(&mib->cnt_mutex);
/* Only read MIB counters when the port is told to do.
* If not, read only dropped counters when link is not up.
*/
if (!p->read) {
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
if (!netif_carrier_ok(dp->slave))
mib->cnt_ptr = dev->info->reg_mib_cnt;
}
port_r_cnt(dev, i);
p->read = false;
if (dev->dev_ops->r_mib_stat64)
dev->dev_ops->r_mib_stat64(dev, i);
mutex_unlock(&mib->cnt_mutex);
}
schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
}
void ksz_init_mib_timer(struct ksz_device *dev)
{
int i;
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
for (i = 0; i < dev->info->port_cnt; i++) {
struct ksz_port_mib *mib = &dev->ports[i].mib;
dev->dev_ops->port_init_cnt(dev, i);
mib->cnt_ptr = 0;
memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
}
}
static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
int ret;
ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
if (ret)
return ret;
return val;
}
static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
int ret;
ret = dev->dev_ops->w_phy(dev, addr, reg, val);
if (ret)
return ret;
return 0;
}
static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
switch (dev->chip_id) {
case KSZ8830_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
*/
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
case KSZ9477_CHIP_ID:
/* KSZ9477 Errata DS80000754C
*
* Module 4: Energy Efficient Ethernet (EEE) feature select must
* be manually disabled
* The EEE feature is enabled by default, but it is not fully
* operational. It must be manually disabled through register
* controls. If not disabled, the PHY ports can auto-negotiate
* to enable EEE, and this feature can cause link drops when
* linked to another device supporting EEE.
*/
return MICREL_NO_EEE;
}
return 0;
}
static void ksz_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
/* Read all MIB counters when the link is going down. */
p->read = true;
/* timer started */
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
if (sset != ETH_SS_STATS)
return 0;
return dev->info->mib_cnt;
}
static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *buf)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
struct ksz_device *dev = ds->priv;
struct ksz_port_mib *mib;
mib = &dev->ports[port].mib;
mutex_lock(&mib->cnt_mutex);
/* Only read dropped counters if no link. */
if (!netif_carrier_ok(dp->slave))
mib->cnt_ptr = dev->info->reg_mib_cnt;
port_r_cnt(dev, port);
memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
*/
return 0;
}
static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
}
static void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->set_ageing_time)
return -EOPNOTSUPP;
return dev->dev_ops->set_ageing_time(dev, msecs);
}
static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->fdb_add)
return -EOPNOTSUPP;
return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
}
static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr,
u16 vid, struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->fdb_del)
return -EOPNOTSUPP;
return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
}
static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->fdb_dump)
return -EOPNOTSUPP;
return dev->dev_ops->fdb_dump(dev, port, cb, data);
}
static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->mdb_add)
return -EOPNOTSUPP;
return dev->dev_ops->mdb_add(dev, port, mdb, db);
}
static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->mdb_del)
return -EOPNOTSUPP;
return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
static int ksz_enable_port(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
if (!dsa_is_user_port(ds, port))
return 0;
/* setup slave port */
dev->dev_ops->port_setup(dev, port, false);
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
*/
return 0;
}
void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
const u16 *regs;
u8 data;
regs = dev->info->regs;
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
p = &dev->ports[port];
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_LISTENING:
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
if (!p->learning)
data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
if (!p->learning)
data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
return;
}
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
p->stp_state = state;
ksz_update_port_member(dev, port);
}
static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~BR_LEARNING)
return -EINVAL;
return 0;
}
static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
if (flags.mask & BR_LEARNING) {
p->learning = !!(flags.val & BR_LEARNING);
/* Make the change take effect immediately */
ksz_port_stp_state_set(ds, port, p->stp_state);
}
return 0;
}
static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
struct ksz_device *dev = ds->priv;
enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
if (dev->chip_id == KSZ8795_CHIP_ID ||
dev->chip_id == KSZ8794_CHIP_ID ||
dev->chip_id == KSZ8765_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ8830_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
if (dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9477;
if (is_lan937x(dev))
proto = DSA_TAG_PROTO_LAN937X_VALUE;
return proto;
}
static int ksz_connect_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct ksz_tagger_data *tagger_data;
tagger_data = ksz_tagger_data(ds);
tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
return 0;
}
static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
bool flag, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->vlan_filtering)
return -EOPNOTSUPP;
return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
}
static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->vlan_add)
return -EOPNOTSUPP;
return dev->dev_ops->vlan_add(dev, port, vlan, extack);
}
static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->vlan_del)
return -EOPNOTSUPP;
return dev->dev_ops->vlan_del(dev, port, vlan);
}
static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->mirror_add)
return -EOPNOTSUPP;
return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
}
static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct ksz_device *dev = ds->priv;
if (dev->dev_ops->mirror_del)
dev->dev_ops->mirror_del(dev, port, mirror);
}
static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
{
struct ksz_device *dev = ds->priv;
if (!dev->dev_ops->change_mtu)
return -EOPNOTSUPP;
return dev->dev_ops->change_mtu(dev, port, mtu);
}
static int ksz_max_mtu(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
switch (dev->chip_id) {
case KSZ8795_CHIP_ID:
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8830_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
case LAN9372_CHIP_ID:
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
return -EOPNOTSUPP;
}
static int ksz_validate_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
if (!dev->info->internal_phy[port])
return -EOPNOTSUPP;
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
return 0;
}
return -EOPNOTSUPP;
}
static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
int ret;
ret = ksz_validate_eee(ds, port);
if (ret)
return ret;
/* There is no documented control of Tx LPI configuration. */
e->tx_lpi_enabled = true;
/* There is no documented control of Tx LPI timer. According to tests
* Tx LPI timer seems to be set by default to minimal value.
*/
e->tx_lpi_timer = 0;
return 0;
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
struct ksz_device *dev = ds->priv;
int ret;
ret = ksz_validate_eee(ds, port);
if (ret)
return ret;
if (!e->tx_lpi_enabled) {
dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
return -EINVAL;
}
if (e->tx_lpi_timer) {
dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n");
return -EINVAL;
}
return 0;
}
static void ksz_set_xmii(struct ksz_device *dev, int port,
phy_interface_t interface)
{
const u8 *bitval = dev->info->xmii_ctrl1;
struct ksz_port *p = &dev->ports[port];
const u16 *regs = dev->info->regs;
u8 data8;
ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
data8 &= ~(P_MII_SEL_M | P_RGMII_ID_IG_ENABLE |
P_RGMII_ID_EG_ENABLE);
switch (interface) {
case PHY_INTERFACE_MODE_MII:
data8 |= bitval[P_MII_SEL];
break;
case PHY_INTERFACE_MODE_RMII:
data8 |= bitval[P_RMII_SEL];
break;
case PHY_INTERFACE_MODE_GMII:
data8 |= bitval[P_GMII_SEL];
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
data8 |= bitval[P_RGMII_SEL];
/* On KSZ9893, disable RGMII in-band status support */
if (dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
data8 &= ~P_MII_MAC_MODE;
break;
default:
dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(interface), port);
return;
}
if (p->rgmii_tx_val)
data8 |= P_RGMII_ID_EG_ENABLE;
if (p->rgmii_rx_val)
data8 |= P_RGMII_ID_IG_ENABLE;
/* Write the updated value */
ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
}
phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
{
const u8 *bitval = dev->info->xmii_ctrl1;
const u16 *regs = dev->info->regs;
phy_interface_t interface;
u8 data8;
u8 val;
ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
val = FIELD_GET(P_MII_SEL_M, data8);
if (val == bitval[P_MII_SEL]) {
if (gbit)
interface = PHY_INTERFACE_MODE_GMII;
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
interface = PHY_INTERFACE_MODE_RGMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)
interface = PHY_INTERFACE_MODE_RGMII_TXID;
if (data8 & P_RGMII_ID_IG_ENABLE) {
interface = PHY_INTERFACE_MODE_RGMII_RXID;
if (data8 & P_RGMII_ID_EG_ENABLE)
interface = PHY_INTERFACE_MODE_RGMII_ID;
}
}
return interface;
}
static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct ksz_device *dev = ds->priv;
if (ksz_is_ksz88x3(dev))
return;
/* Internal PHYs */
if (dev->info->internal_phy[port])
return;
if (phylink_autoneg_inband(mode)) {
dev_err(dev->dev, "In-band AN not supported!\n");
return;
}
ksz_set_xmii(dev, port, state->interface);
if (dev->dev_ops->phylink_mac_config)
dev->dev_ops->phylink_mac_config(dev, port, mode, state);
if (dev->dev_ops->setup_rgmii_delay)
dev->dev_ops->setup_rgmii_delay(dev, port);
}
bool ksz_get_gbit(struct ksz_device *dev, int port)
{
const u8 *bitval = dev->info->xmii_ctrl1;
const u16 *regs = dev->info->regs;
bool gbit = false;
u8 data8;
bool val;
ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
val = FIELD_GET(P_GMII_1GBIT_M, data8);
if (val == bitval[P_GMII_1GBIT])
gbit = true;
return gbit;
}
static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit)
{
const u8 *bitval = dev->info->xmii_ctrl1;
const u16 *regs = dev->info->regs;
u8 data8;
ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
data8 &= ~P_GMII_1GBIT_M;
if (gbit)
data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_1GBIT]);
else
data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_NOT_1GBIT]);
/* Write the updated value */
ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
}
static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed)
{
const u8 *bitval = dev->info->xmii_ctrl0;
const u16 *regs = dev->info->regs;
u8 data8;
ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8);
data8 &= ~P_MII_100MBIT_M;
if (speed == SPEED_100)
data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_100MBIT]);
else
data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_10MBIT]);
/* Write the updated value */
ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8);
}
static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed)
{
if (speed == SPEED_1000)
ksz_set_gbit(dev, port, true);
else
ksz_set_gbit(dev, port, false);
if (speed == SPEED_100 || speed == SPEED_10)
ksz_set_100_10mbit(dev, port, speed);
}
static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
bool tx_pause, bool rx_pause)
{
const u8 *bitval = dev->info->xmii_ctrl0;
const u32 *masks = dev->info->masks;
const u16 *regs = dev->info->regs;
u8 mask;
u8 val;
mask = P_MII_DUPLEX_M | masks[P_MII_TX_FLOW_CTRL] |
masks[P_MII_RX_FLOW_CTRL];
if (duplex == DUPLEX_FULL)
val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_FULL_DUPLEX]);
else
val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_HALF_DUPLEX]);
if (tx_pause)
val |= masks[P_MII_TX_FLOW_CTRL];
if (rx_pause)
val |= masks[P_MII_RX_FLOW_CTRL];
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev, int speed,
int duplex, bool tx_pause,
bool rx_pause)
{
struct ksz_port *p;
p = &dev->ports[port];
/* Internal PHYs */
if (dev->info->internal_phy[port])
return;
p->phydev.speed = speed;
ksz_port_set_xmii_speed(dev, port, speed);
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
}
static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev, int speed,
int duplex, bool tx_pause, bool rx_pause)
{
struct ksz_device *dev = ds->priv;
if (dev->dev_ops->phylink_mac_link_up)
dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
phydev, speed, duplex,
tx_pause, rx_pause);
}
static int ksz_switch_detect(struct ksz_device *dev)
{
u8 id1, id2, id4;
u16 id16;
u32 id32;
int ret;
/* read chip id */
ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
if (ret)
return ret;
id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
id2 = FIELD_GET(SW_CHIP_ID_M, id16);
switch (id1) {
case KSZ87_FAMILY_ID:
if (id2 == KSZ87_CHIP_ID_95) {
u8 val;
dev->chip_id = KSZ8795_CHIP_ID;
ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
if (val & KSZ8_PORT_FIBER_MODE)
dev->chip_id = KSZ8765_CHIP_ID;
} else if (id2 == KSZ87_CHIP_ID_94) {
dev->chip_id = KSZ8794_CHIP_ID;
} else {
return -ENODEV;
}
break;
case KSZ88_FAMILY_ID:
if (id2 == KSZ88_CHIP_ID_63)
dev->chip_id = KSZ8830_CHIP_ID;
else
return -ENODEV;
break;
default:
ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
if (ret)
return ret;
dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
id32 &= ~0xFF;
switch (id32) {
case KSZ9477_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case KSZ9567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
case LAN9372_CHIP_ID:
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
dev->chip_id = id32;
break;
case KSZ9893_CHIP_ID:
ret = ksz_read8(dev, REG_CHIP_ID4,
&id4);
if (ret)
return ret;
if (id4 == SKU_ID_KSZ8563)
dev->chip_id = KSZ8563_CHIP_ID;
else if (id4 == SKU_ID_KSZ9563)
dev->chip_id = KSZ9563_CHIP_ID;
else
dev->chip_id = KSZ9893_CHIP_ID;
break;
default:
dev_err(dev->dev,
"unsupported switch detected %x)\n", id32);
return -ENODEV;
}
}
return 0;
}
/* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth
* is converted to Hex-decimal using the successive multiplication method. On
* every step, integer part is taken and decimal part is carry forwarded.
*/
static int cinc_cal(s32 idle_slope, s32 send_slope, u32 *bw)
{
u32 cinc = 0;
u32 txrate;
u32 rate;
u8 temp;
u8 i;
txrate = idle_slope - send_slope;
if (!txrate)
return -EINVAL;
rate = idle_slope;
/* 24 bit register */
for (i = 0; i < 6; i++) {
rate = rate * 16;
temp = rate / txrate;
rate %= txrate;
cinc = ((cinc << 4) | temp);
}
*bw = cinc;
return 0;
}
static int ksz_setup_tc_mode(struct ksz_device *dev, int port, u8 scheduler,
u8 shaper)
{
return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
FIELD_PREP(MTI_SCHEDULE_MODE_M, scheduler) |
FIELD_PREP(MTI_SHAPING_M, shaper));
}
static int ksz_setup_tc_cbs(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *qopt)
{
struct ksz_device *dev = ds->priv;
int ret;
u32 bw;
if (!dev->info->tc_cbs_supported)
return -EOPNOTSUPP;
if (qopt->queue > dev->info->num_tx_queues)
return -EINVAL;
/* Queue Selection */
ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue);
if (ret)
return ret;
if (!qopt->enable)
return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR,
MTI_SHAPING_OFF);
/* High Credit */
ret = ksz_pwrite16(dev, port, REG_PORT_MTI_HI_WATER_MARK,
qopt->hicredit);
if (ret)
return ret;
/* Low Credit */
ret = ksz_pwrite16(dev, port, REG_PORT_MTI_LO_WATER_MARK,
qopt->locredit);
if (ret)
return ret;
/* Credit Increment Register */
ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw);
if (ret)
return ret;
if (dev->dev_ops->tc_cbs_set_cinc) {
ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw);
if (ret)
return ret;
}
return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO,
MTI_SHAPING_SRP);
}
static int ksz_disable_egress_rate_limit(struct ksz_device *dev, int port)
{
int queue, ret;
/* Configuration will not take effect until the last Port Queue X
* Egress Limit Control Register is written.
*/
for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
ret = ksz_pwrite8(dev, port, KSZ9477_REG_PORT_OUT_RATE_0 + queue,
KSZ9477_OUT_RATE_NO_LIMIT);
if (ret)
return ret;
}
return 0;
}
static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
int band)
{
/* Compared to queues, bands prioritize packets differently. In strict
* priority mode, the lowest priority is assigned to Queue 0 while the
* highest priority is given to Band 0.
*/
return p->bands - 1 - band;
}
static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
{
int ret;
ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue);
if (ret)
return ret;
return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO,
MTI_SHAPING_OFF);
}
static int ksz_queue_set_wrr(struct ksz_device *dev, int port, int queue,
int weight)
{
int ret;
ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue);
if (ret)
return ret;
ret = ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR,
MTI_SHAPING_OFF);
if (ret)
return ret;
return ksz_pwrite8(dev, port, KSZ9477_PORT_MTI_QUEUE_CTRL_1, weight);
}
static int ksz_tc_ets_add(struct ksz_device *dev, int port,
struct tc_ets_qopt_offload_replace_params *p)
{
int ret, band, tc_prio;
u32 queue_map = 0;
/* In order to ensure proper prioritization, it is necessary to set the
* rate limit for the related queue to zero. Otherwise strict priority
* or WRR mode will not work. This is a hardware limitation.
*/
ret = ksz_disable_egress_rate_limit(dev, port);
if (ret)
return ret;
/* Configure queue scheduling mode for all bands. Currently only strict
* prio mode is supported.
*/
for (band = 0; band < p->bands; band++) {
int queue = ksz_ets_band_to_queue(p, band);
ret = ksz_queue_set_strict(dev, port, queue);
if (ret)
return ret;
}
/* Configure the mapping between traffic classes and queues. Note:
* priomap variable support 16 traffic classes, but the chip can handle
* only 8 classes.
*/
for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) {
int queue;
if (tc_prio > KSZ9477_MAX_TC_PRIO)
break;
queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]);
queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
}
return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
}
static int ksz_tc_ets_del(struct ksz_device *dev, int port)
{
int ret, queue, tc_prio, s;
u32 queue_map = 0;
/* To restore the default chip configuration, set all queues to use the
* WRR scheduler with a weight of 1.
*/
for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
ret = ksz_queue_set_wrr(dev, port, queue,
KSZ9477_DEFAULT_WRR_WEIGHT);
if (ret)
return ret;
}
switch (dev->info->num_tx_queues) {
case 2:
s = 2;
break;
case 4:
s = 1;
break;
case 8:
s = 0;
break;
default:
return -EINVAL;
}
/* Revert the queue mapping for TC-priority to its default setting on
* the chip.
*/
for (tc_prio = 0; tc_prio <= KSZ9477_MAX_TC_PRIO; tc_prio++) {
int queue;
queue = tc_prio >> s;
queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
}
return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
}
static int ksz_tc_ets_validate(struct ksz_device *dev, int port,
struct tc_ets_qopt_offload_replace_params *p)
{
int band;
/* Since it is not feasible to share one port among multiple qdisc,
* the user must configure all available queues appropriately.
*/
if (p->bands != dev->info->num_tx_queues) {
dev_err(dev->dev, "Not supported amount of bands. It should be %d\n",
dev->info->num_tx_queues);
return -EOPNOTSUPP;
}
for (band = 0; band < p->bands; ++band) {
/* The KSZ switches utilize a weighted round robin configuration
* where a certain number of packets can be transmitted from a
* queue before the next queue is serviced. For more information
* on this, refer to section 5.2.8.4 of the KSZ8565R
* documentation on the Port Transmit Queue Control 1 Register.
* However, the current ETS Qdisc implementation (as of February
* 2023) assigns a weight to each queue based on the number of
* bytes or extrapolated bandwidth in percentages. Since this
* differs from the KSZ switches' method and we don't want to
* fake support by converting bytes to packets, it is better to
* return an error instead.
*/
if (p->quanta[band]) {
dev_err(dev->dev, "Quanta/weights configuration is not supported.\n");
return -EOPNOTSUPP;
}
}
return 0;
}
static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct tc_ets_qopt_offload *qopt)
{
struct ksz_device *dev = ds->priv;
int ret;
if (!dev->info->tc_ets_supported)
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
dev_err(dev->dev, "Parent should be \"root\"\n");
return -EOPNOTSUPP;
}
switch (qopt->command) {
case TC_ETS_REPLACE:
ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params);
if (ret)
return ret;
return ksz_tc_ets_add(dev, port, &qopt->replace_params);
case TC_ETS_DESTROY:
return ksz_tc_ets_del(dev, port);
case TC_ETS_STATS:
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static int ksz_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_CBS:
return ksz_setup_tc_cbs(ds, port, type_data);
case TC_SETUP_QDISC_ETS:
return ksz_tc_setup_qdisc_ets(ds, port, type_data);
default:
return -EOPNOTSUPP;
}
}
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
.get_phy_flags = ksz_get_phy_flags,
.setup = ksz_setup,
.teardown = ksz_teardown,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
.phylink_mac_config = ksz_phylink_mac_config,
.phylink_mac_link_up = ksz_phylink_mac_link_up,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
.set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
.port_stp_state_set = ksz_port_stp_state_set,
.port_pre_bridge_flags = ksz_port_pre_bridge_flags,
.port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz_port_vlan_filtering,
.port_vlan_add = ksz_port_vlan_add,
.port_vlan_del = ksz_port_vlan_del,
.port_fdb_dump = ksz_port_fdb_dump,
.port_fdb_add = ksz_port_fdb_add,
.port_fdb_del = ksz_port_fdb_del,
.port_mdb_add = ksz_port_mdb_add,
.port_mdb_del = ksz_port_mdb_del,
.port_mirror_add = ksz_port_mirror_add,
.port_mirror_del = ksz_port_mirror_del,
.get_stats64 = ksz_get_stats64,
.get_pause_stats = ksz_get_pause_stats,
.port_change_mtu = ksz_change_mtu,
.port_max_mtu = ksz_max_mtu,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
.port_txtstamp = ksz_port_txtstamp,
.port_rxtstamp = ksz_port_rxtstamp,
.port_setup_tc = ksz_setup_tc,
.get_mac_eee = ksz_get_mac_eee,
.set_mac_eee = ksz_set_mac_eee,
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
struct dsa_switch *ds;
struct ksz_device *swdev;
ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
ds->ops = &ksz_switch_ops;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
return NULL;
ds->priv = swdev;
swdev->dev = base;
swdev->ds = ds;
swdev->priv = priv;
return swdev;
}
EXPORT_SYMBOL(ksz_switch_alloc);
static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
struct device_node *port_dn)
{
phy_interface_t phy_mode = dev->ports[port_num].interface;
int rx_delay = -1, tx_delay = -1;
if (!phy_interface_mode_is_rgmii(phy_mode))
return;
of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
if (rx_delay == -1 && tx_delay == -1) {
dev_warn(dev->dev,
"Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
"please update device tree to specify \"rx-internal-delay-ps\" and "
"\"tx-internal-delay-ps\"",
port_num);
if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
rx_delay = 2000;
if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
tx_delay = 2000;
}
if (rx_delay < 0)
rx_delay = 0;
if (tx_delay < 0)
tx_delay = 0;
dev->ports[port_num].rgmii_rx_val = rx_delay;
dev->ports[port_num].rgmii_tx_val = tx_delay;
}
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
struct device_node *port, *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
int i;
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(dev->reset_gpio))
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
msleep(100);
}
mutex_init(&dev->dev_mutex);
mutex_init(&dev->regmap_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
ret = ksz_switch_detect(dev);
if (ret)
return ret;
info = ksz_lookup_info(dev->chip_id);
if (!info)
return -ENODEV;
/* Update the compatible info with the probed one */
dev->info = info;
dev_info(dev->dev, "found switch: %s, rev %i\n",
dev->info->dev_name, dev->chip_rev);
ret = ksz_check_device_id(dev);
if (ret)
return ret;
dev->dev_ops = dev->info->ops;
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
dev->ports = devm_kzalloc(dev->dev,
dev->info->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
if (!dev->ports)
return -ENOMEM;
for (i = 0; i < dev->info->port_cnt; i++) {
spin_lock_init(&dev->ports[i].mib.stats64_lock);
mutex_init(&dev->ports[i].mib.cnt_mutex);
dev->ports[i].mib.counters =
devm_kzalloc(dev->dev,
sizeof(u64) * (dev->info->mib_cnt + 1),
GFP_KERNEL);
if (!dev->ports[i].mib.counters)
return -ENOMEM;
dev->ports[i].ksz_dev = dev;
dev->ports[i].num = i;
}
/* set the real number of ports */
dev->ds->num_ports = dev->info->port_cnt;
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (ports) {
for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port);
of_node_put(ports);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
ksz_parse_rgmii_delay(dev, port_num, port);
}
of_node_put(ports);
}
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-disable");
if (dev->synclko_125 && dev->synclko_disable) {
dev_err(dev->dev, "inconsistent synclko settings\n");
return -EINVAL;
}
}
ret = dsa_register_switch(dev->ds);
if (ret) {
dev->dev_ops->exit(dev);
return ret;
}
/* Read MIB counters every 30 seconds to avoid overflow. */
dev->mib_read_interval = msecs_to_jiffies(5000);
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
return ret;
}
EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
/* timer started */
if (dev->mib_read_interval) {
dev->mib_read_interval = 0;
cancel_delayed_work_sync(&dev->mib_read);
}
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
if (dev->reset_gpio)
gpiod_set_value_cansleep(dev->reset_gpio, 1);
}
EXPORT_SYMBOL(ksz_switch_remove);
MODULE_AUTHOR("Woojung Huh <[email protected]>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/microchip/ksz_common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip KSZ9477 series register access through I2C
*
* Copyright (C) 2018-2019 Microchip Technology Inc.
*/
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include "ksz_common.h"
KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
dev = ksz_switch_alloc(&i2c->dev, i2c);
if (!dev)
return -ENOMEM;
for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = ksz9477_regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
return dev_err_probe(&i2c->dev, PTR_ERR(dev->regmap[i]),
"Failed to initialize regmap%i\n",
ksz9477_regmap_config[i].val_bits);
}
}
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
dev->irq = i2c->irq;
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
return ret;
i2c_set_clientdata(i2c, dev);
return 0;
}
static void ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (dev)
ksz_switch_remove(dev);
}
static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (!dev)
return;
if (dev->dev_ops->reset)
dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
i2c_set_clientdata(i2c, NULL);
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
{ "ksz9477-switch", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
static const struct of_device_id ksz9477_dt_ids[] = {
{
.compatible = "microchip,ksz9477",
.data = &ksz_switch_chips[KSZ9477]
},
{
.compatible = "microchip,ksz9896",
.data = &ksz_switch_chips[KSZ9896]
},
{
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
},
{
.compatible = "microchip,ksz9893",
.data = &ksz_switch_chips[KSZ9893]
},
{
.compatible = "microchip,ksz9563",
.data = &ksz_switch_chips[KSZ9563]
},
{
.compatible = "microchip,ksz8563",
.data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
.of_match_table = ksz9477_dt_ids,
},
.probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
.shutdown = ksz9477_i2c_shutdown,
.id_table = ksz9477_i2c_id,
};
module_i2c_driver(ksz9477_i2c_driver);
MODULE_AUTHOR("Tristram Ha <[email protected]>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch I2C access Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/microchip/ksz9477_i2c.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip KSZ8795 switch driver
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include <linux/phylink.h>
#include "ksz_common.h"
#include "ksz8795_reg.h"
#include "ksz8.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
bits, set ? bits : 0);
}
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
const u16 *regs;
u16 ctrl_addr;
int ret = 0;
regs = dev->info->regs;
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
if (!ret)
ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
mutex_unlock(&dev->alu_mutex);
return ret;
}
int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
/* reset switch */
ksz_cfg(dev, KSZ8863_REG_SW_RESET,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
ksz_cfg(dev, KSZ8863_REG_SW_RESET,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
} else {
/* reset switch */
ksz_write8(dev, REG_POWER_MANAGEMENT_1,
SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
}
return 0;
}
static int ksz8863_change_mtu(struct ksz_device *dev, int frame_size)
{
u8 ctrl2 = 0;
if (frame_size <= KSZ8_LEGAL_PACKET_SIZE)
ctrl2 |= KSZ8863_LEGAL_PACKET_ENABLE;
else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
ctrl2 |= KSZ8863_HUGE_PACKET_ENABLE;
return ksz_rmw8(dev, REG_SW_CTRL_2, KSZ8863_LEGAL_PACKET_ENABLE |
KSZ8863_HUGE_PACKET_ENABLE, ctrl2);
}
static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
{
u8 ctrl1 = 0, ctrl2 = 0;
int ret;
if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
ctrl2 |= SW_LEGAL_PACKET_DISABLE;
if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
ctrl1 |= SW_HUGE_PACKET;
ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
if (ret)
return ret;
return ksz_rmw8(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, ctrl2);
}
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
{
u16 frame_size;
if (!dsa_is_cpu_port(dev->ds, port))
return 0;
frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
switch (dev->chip_id) {
case KSZ8795_CHIP_ID:
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
case KSZ8830_CHIP_ID:
return ksz8863_change_mtu(dev, frame_size);
}
return -EOPNOTSUPP;
}
static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
{
u8 hi, lo;
/* Number of queues can only be 1, 2, or 4. */
switch (queue) {
case 4:
case 3:
queue = PORT_QUEUE_SPLIT_4;
break;
case 2:
queue = PORT_QUEUE_SPLIT_2;
break;
default:
queue = PORT_QUEUE_SPLIT_1;
}
ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
lo &= ~PORT_QUEUE_SPLIT_L;
if (queue & PORT_QUEUE_SPLIT_2)
lo |= PORT_QUEUE_SPLIT_L;
hi &= ~PORT_QUEUE_SPLIT_H;
if (queue & PORT_QUEUE_SPLIT_4)
hi |= PORT_QUEUE_SPLIT_H;
ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
/* Default is port based for egress rate limit. */
if (queue != PORT_QUEUE_SPLIT_1)
ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
true);
}
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
const u32 *masks;
const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
masks = dev->info->masks;
regs = dev->info->regs;
ctrl_addr = addr + dev->info->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
/* It is almost guaranteed to always read the valid bit because of
* slow SPI speed.
*/
for (loop = 2; loop > 0; loop--) {
ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
if (check & masks[MIB_COUNTER_VALID]) {
ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
if (check & masks[MIB_COUNTER_OVERFLOW])
*cnt += MIB_COUNTER_VALUE + 1;
*cnt += data & MIB_COUNTER_VALUE;
break;
}
}
mutex_unlock(&dev->alu_mutex);
}
static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
const u32 *masks;
const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
masks = dev->info->masks;
regs = dev->info->regs;
addr -= dev->info->reg_mib_cnt;
ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
/* It is almost guaranteed to always read the valid bit because of
* slow SPI speed.
*/
for (loop = 2; loop > 0; loop--) {
ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
if (check & masks[MIB_COUNTER_VALID]) {
ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
if (addr < 2) {
u64 total;
total = check & MIB_TOTAL_BYTES_H;
total <<= 32;
*cnt += total;
*cnt += data;
if (check & masks[MIB_COUNTER_OVERFLOW]) {
total = MIB_TOTAL_BYTES_H + 1;
total <<= 32;
*cnt += total;
}
} else {
if (check & masks[MIB_COUNTER_OVERFLOW])
*cnt += MIB_PACKET_DROPPED + 1;
*cnt += data & MIB_PACKET_DROPPED;
}
break;
}
}
mutex_unlock(&dev->alu_mutex);
}
static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
u32 *last = (u32 *)dropped;
const u16 *regs;
u16 ctrl_addr;
u32 data;
u32 cur;
regs = dev->info->regs;
addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
mutex_unlock(&dev->alu_mutex);
data &= MIB_PACKET_DROPPED;
cur = last[addr];
if (data != cur) {
last[addr] = data;
if (data < cur)
data += MIB_PACKET_DROPPED + 1;
data -= cur;
*cnt += data;
}
}
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
if (ksz_is_ksz88x3(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
else
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
}
void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
if (ksz_is_ksz88x3(dev))
return;
/* enable the port for flush/freeze function */
if (freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze);
/* disable the port after freeze is done */
if (!freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
void ksz8_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
if (!ksz_is_ksz88x3(dev)) {
/* flush all enabled port MIB counters */
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
mib->cnt_ptr = 0;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
}
static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
const u16 *regs;
u16 ctrl_addr;
int ret;
regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
if (ret)
goto unlock_alu;
ret = ksz_read64(dev, regs[REG_IND_DATA_HI], data);
unlock_alu:
mutex_unlock(&dev->alu_mutex);
return ret;
}
static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
const u16 *regs;
u16 ctrl_addr;
int ret;
regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
ret = ksz_write64(dev, regs[REG_IND_DATA_HI], data);
if (ret)
goto unlock_alu;
ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
unlock_alu:
mutex_unlock(&dev->alu_mutex);
return ret;
}
static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
{
int timeout = 100;
const u32 *masks;
const u16 *regs;
masks = dev->info->masks;
regs = dev->info->regs;
do {
ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
timeout--;
} while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
/* Entry is not ready for accessing. */
if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
return -EAGAIN;
/* Entry is ready for accessing. */
} else {
ksz_read8(dev, regs[REG_IND_DATA_8], data);
/* There is no valid entry in the table. */
if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
return -ENXIO;
}
return 0;
}
int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
const u16 *regs;
u16 ctrl_addr;
u8 data;
int rc;
shifts = dev->info->shifts;
masks = dev->info->masks;
regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
rc = ksz8_valid_dyn_entry(dev, &data);
if (rc == -EAGAIN) {
if (addr == 0)
*entries = 0;
} else if (rc == -ENXIO) {
*entries = 0;
/* At least one valid entry in the table. */
} else {
u64 buf = 0;
int cnt;
ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
data_hi = (u32)(buf >> 32);
data_lo = (u32)buf;
/* Check out how many valid entry in the table. */
cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
shifts[DYNAMIC_MAC_ENTRIES];
*entries = cnt + 1;
*fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
shifts[DYNAMIC_MAC_FID];
*src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
shifts[DYNAMIC_MAC_SRC_PORT];
*timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
shifts[DYNAMIC_MAC_TIMESTAMP];
mac_addr[5] = (u8)data_lo;
mac_addr[4] = (u8)(data_lo >> 8);
mac_addr[3] = (u8)(data_lo >> 16);
mac_addr[2] = (u8)(data_lo >> 24);
mac_addr[1] = (u8)data_hi;
mac_addr[0] = (u8)(data_hi >> 8);
rc = 0;
}
mutex_unlock(&dev->alu_mutex);
return rc;
}
static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
struct alu_struct *alu, bool *valid)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
int ret;
shifts = dev->info->shifts;
masks = dev->info->masks;
ret = ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
if (ret)
return ret;
data_hi = data >> 32;
data_lo = (u32)data;
if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
masks[STATIC_MAC_TABLE_OVERRIDE]))) {
*valid = false;
return 0;
}
alu->mac[5] = (u8)data_lo;
alu->mac[4] = (u8)(data_lo >> 8);
alu->mac[3] = (u8)(data_lo >> 16);
alu->mac[2] = (u8)(data_lo >> 24);
alu->mac[1] = (u8)data_hi;
alu->mac[0] = (u8)(data_hi >> 8);
alu->port_forward =
(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
shifts[STATIC_MAC_FWD_PORTS];
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
/* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
* static MAC table compared to doing write.
*/
if (ksz_is_ksz87xx(dev))
data_hi >>= 1;
alu->is_static = true;
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
shifts[STATIC_MAC_FID];
*valid = true;
return 0;
}
static int ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
struct alu_struct *alu)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
shifts = dev->info->shifts;
masks = dev->info->masks;
data_lo = ((u32)alu->mac[2] << 24) |
((u32)alu->mac[3] << 16) |
((u32)alu->mac[4] << 8) | alu->mac[5];
data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS];
if (alu->is_override)
data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE];
if (alu->is_use_fid) {
data_hi |= masks[STATIC_MAC_TABLE_USE_FID];
data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID];
}
if (alu->is_static)
data_hi |= masks[STATIC_MAC_TABLE_VALID];
else
data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
data = (u64)data_hi << 32 | data_lo;
return ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
}
static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
u8 *member, u8 *valid)
{
const u8 *shifts;
const u32 *masks;
shifts = dev->info->shifts;
masks = dev->info->masks;
*fid = vlan & masks[VLAN_TABLE_FID];
*member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
shifts[VLAN_TABLE_MEMBERSHIP_S];
*valid = !!(vlan & masks[VLAN_TABLE_VALID]);
}
static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
u16 *vlan)
{
const u8 *shifts;
const u32 *masks;
shifts = dev->info->shifts;
masks = dev->info->masks;
*vlan = fid;
*vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
if (valid)
*vlan |= masks[VLAN_TABLE_VALID];
}
static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
{
const u8 *shifts;
u64 data;
int i;
shifts = dev->info->shifts;
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
addr *= 4;
for (i = 0; i < 4; i++) {
dev->vlan_cache[addr + i].table[0] = (u16)data;
data >>= shifts[VLAN_TABLE];
}
}
static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
{
int index;
u16 *data;
u16 addr;
u64 buf;
data = (u16 *)&buf;
addr = vid / 4;
index = vid & 3;
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
*vlan = data[index];
}
static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
{
int index;
u16 *data;
u16 addr;
u64 buf;
data = (u16 *)&buf;
addr = vid / 4;
index = vid & 3;
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
data[index] = vlan;
dev->vlan_cache[vid].table[0] = vlan;
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 restart, speed, ctrl, link;
int processed = true;
const u16 *regs;
u8 val1, val2;
u16 data = 0;
u8 p = phy;
int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
if (ret)
return ret;
ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
if (ret)
return ret;
ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
if (ret)
return ret;
if (restart & PORT_PHY_LOOPBACK)
data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
data |= BMCR_SPEED100;
if (ksz_is_ksz88x3(dev)) {
if ((ctrl & PORT_AUTO_NEG_ENABLE))
data |= BMCR_ANENABLE;
} else {
if (!(ctrl & PORT_AUTO_NEG_DISABLE))
data |= BMCR_ANENABLE;
}
if (restart & PORT_POWER_DOWN)
data |= BMCR_PDOWN;
if (restart & PORT_AUTO_NEG_RESTART)
data |= BMCR_ANRESTART;
if (ctrl & PORT_FORCE_FULL_DUPLEX)
data |= BMCR_FULLDPLX;
if (speed & PORT_HP_MDIX)
data |= KSZ886X_BMCR_HP_MDIX;
if (restart & PORT_FORCE_MDIX)
data |= KSZ886X_BMCR_FORCE_MDI;
if (restart & PORT_AUTO_MDIX_DISABLE)
data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
if (restart & PORT_TX_DISABLE)
data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
if (restart & PORT_LED_OFF)
data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
if (ret)
return ret;
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
BMSR_10HALF |
BMSR_ANEGCAPABLE;
if (link & PORT_AUTO_NEG_COMPLETE)
data |= BMSR_ANEGCOMPLETE;
if (link & PORT_STAT_LINK_GOOD)
data |= BMSR_LSTATUS;
break;
case MII_PHYSID1:
data = KSZ8795_ID_HI;
break;
case MII_PHYSID2:
if (ksz_is_ksz88x3(dev))
data = KSZ8863_ID_LO;
else
data = KSZ8795_ID_LO;
break;
case MII_ADVERTISE:
ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
if (ret)
return ret;
data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= ADVERTISE_PAUSE_CAP;
if (ctrl & PORT_AUTO_NEG_100BTX_FD)
data |= ADVERTISE_100FULL;
if (ctrl & PORT_AUTO_NEG_100BTX)
data |= ADVERTISE_100HALF;
if (ctrl & PORT_AUTO_NEG_10BT_FD)
data |= ADVERTISE_10FULL;
if (ctrl & PORT_AUTO_NEG_10BT)
data |= ADVERTISE_10HALF;
break;
case MII_LPA:
ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
if (ret)
return ret;
data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= LPA_PAUSE_CAP;
if (link & PORT_REMOTE_100BTX_FD)
data |= LPA_100FULL;
if (link & PORT_REMOTE_100BTX)
data |= LPA_100HALF;
if (link & PORT_REMOTE_10BT_FD)
data |= LPA_10FULL;
if (link & PORT_REMOTE_10BT)
data |= LPA_10HALF;
if (data & ~LPA_SLCT)
data |= LPA_LPACK;
break;
case PHY_REG_LINK_MD:
ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
if (ret)
return ret;
ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
if (ret)
return ret;
if (val1 & PORT_START_CABLE_DIAG)
data |= PHY_START_CABLE_DIAG;
if (val1 & PORT_CABLE_10M_SHORT)
data |= PHY_CABLE_10M_SHORT;
data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M,
FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1));
data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M,
(FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) |
FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
break;
case PHY_REG_PHY_CTRL:
ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
if (ret)
return ret;
if (link & PORT_MDIX_STATUS)
data |= KSZ886X_CTRL_MDIX_STAT;
break;
default:
processed = false;
break;
}
if (processed)
*val = data;
return 0;
}
int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
u8 restart, speed, ctrl, data;
const u16 *regs;
u8 p = phy;
int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
/* Do not support PHY reset function. */
if (val & BMCR_RESET)
break;
ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
if (ret)
return ret;
data = speed;
if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
if (data != speed) {
ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
if (ret)
return ret;
}
ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
if (ret)
return ret;
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
if ((val & BMCR_ANENABLE))
data |= PORT_AUTO_NEG_ENABLE;
else
data &= ~PORT_AUTO_NEG_ENABLE;
} else {
if (!(val & BMCR_ANENABLE))
data |= PORT_AUTO_NEG_DISABLE;
else
data &= ~PORT_AUTO_NEG_DISABLE;
/* Fiber port does not support auto-negotiation. */
if (dev->ports[p].fiber)
data |= PORT_AUTO_NEG_DISABLE;
}
if (val & BMCR_SPEED100)
data |= PORT_FORCE_100_MBIT;
else
data &= ~PORT_FORCE_100_MBIT;
if (val & BMCR_FULLDPLX)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
if (data != ctrl) {
ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
if (ret)
return ret;
}
ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
if (ret)
return ret;
data = restart;
if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
else
data &= ~PORT_LED_OFF;
if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
data |= PORT_TX_DISABLE;
else
data &= ~PORT_TX_DISABLE;
if (val & BMCR_ANRESTART)
data |= PORT_AUTO_NEG_RESTART;
else
data &= ~(PORT_AUTO_NEG_RESTART);
if (val & BMCR_PDOWN)
data |= PORT_POWER_DOWN;
else
data &= ~PORT_POWER_DOWN;
if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
data |= PORT_AUTO_MDIX_DISABLE;
else
data &= ~PORT_AUTO_MDIX_DISABLE;
if (val & KSZ886X_BMCR_FORCE_MDI)
data |= PORT_FORCE_MDIX;
else
data &= ~PORT_FORCE_MDIX;
if (val & BMCR_LOOPBACK)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
if (data != restart) {
ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
data);
if (ret)
return ret;
}
break;
case MII_ADVERTISE:
ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
if (ret)
return ret;
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT_FD |
PORT_AUTO_NEG_10BT);
if (val & ADVERTISE_PAUSE_CAP)
data |= PORT_AUTO_NEG_SYM_PAUSE;
if (val & ADVERTISE_100FULL)
data |= PORT_AUTO_NEG_100BTX_FD;
if (val & ADVERTISE_100HALF)
data |= PORT_AUTO_NEG_100BTX;
if (val & ADVERTISE_10FULL)
data |= PORT_AUTO_NEG_10BT_FD;
if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
if (data != ctrl) {
ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
if (ret)
return ret;
}
break;
case PHY_REG_LINK_MD:
if (val & PHY_START_CABLE_DIAG)
ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true);
break;
default:
break;
}
return 0;
}
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
data &= ~PORT_VLAN_MEMBERSHIP;
data |= (member & dev->port_mask);
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
}
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
const u16 *regs;
regs = dev->info->regs;
if ((uint)port < dev->info->port_cnt) {
first = port;
cnt = port + 1;
} else {
/* Flush all ports. */
first = 0;
cnt = dev->info->port_cnt;
}
for (index = first; index < cnt; index++) {
ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
if (!(learn[index] & PORT_LEARN_DISABLE))
ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
for (index = first; index < cnt; index++) {
if (!(learn[index] & PORT_LEARN_DISABLE))
ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
}
}
int ksz8_fdb_dump(struct ksz_device *dev, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
int ret = 0;
u16 i = 0;
u16 entries = 0;
u8 timestamp = 0;
u8 fid;
u8 src_port;
u8 mac[ETH_ALEN];
do {
ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
×tamp, &entries);
if (!ret && port == src_port) {
ret = cb(mac, fid, false, data);
if (ret)
break;
}
i++;
} while (i < entries);
if (i >= entries)
ret = 0;
return ret;
}
static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
const unsigned char *addr, u16 vid)
{
struct alu_struct alu;
int index, ret;
int empty = 0;
alu.port_forward = 0;
for (index = 0; index < dev->info->num_statics; index++) {
bool valid;
ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
if (ret)
return ret;
if (!valid) {
/* Remember the first empty entry. */
if (!empty)
empty = index + 1;
continue;
}
if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
break;
}
/* no available entry */
if (index == dev->info->num_statics && !empty)
return -ENOSPC;
/* add entry */
if (index == dev->info->num_statics) {
index = empty - 1;
memset(&alu, 0, sizeof(alu));
memcpy(alu.mac, addr, ETH_ALEN);
alu.is_static = true;
}
alu.port_forward |= BIT(port);
if (vid) {
alu.is_use_fid = true;
/* Need a way to map VID to FID. */
alu.fid = vid;
}
return ksz8_w_sta_mac_table(dev, index, &alu);
}
static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
const unsigned char *addr, u16 vid)
{
struct alu_struct alu;
int index, ret;
for (index = 0; index < dev->info->num_statics; index++) {
bool valid;
ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
if (ret)
return ret;
if (!valid)
continue;
if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
break;
}
/* no available entry */
if (index == dev->info->num_statics)
return 0;
/* clear port */
alu.port_forward &= ~BIT(port);
if (!alu.port_forward)
alu.is_static = false;
return ksz8_w_sta_mac_table(dev, index, &alu);
}
int ksz8_mdb_add(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
}
int ksz8_mdb_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
}
int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
u16 vid, struct dsa_db db)
{
return ksz8_add_sta_mac(dev, port, addr, vid);
}
int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
u16 vid, struct dsa_db db)
{
return ksz8_del_sta_mac(dev, port, addr, vid);
}
int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
struct netlink_ext_ack *extack)
{
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
/* Discard packets with VID not enabled on the switch */
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
/* Discard packets with VID not enabled on the ingress port */
for (port = 0; port < dev->phy_port_cnt; ++port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
flag);
return 0;
}
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
{
if (ksz_is_ksz88x3(dev)) {
ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
0x03 << (4 - 2 * port), state);
} else {
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
}
}
int ksz8_port_vlan_add(struct ksz_device *dev, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct ksz_port *p = &dev->ports[port];
u16 data, new_pvid = 0;
u8 fid, member, valid;
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
/* If a VLAN is added with untagged flag different from the
* port's Remove Tag flag, we need to change the latter.
* Ignore VID 0, which is always untagged.
* Ignore CPU port, which will always be tagged.
*/
if (untagged != p->remove_tag && vlan->vid != 0 &&
port != dev->cpu_port) {
unsigned int vid;
/* Reject attempts to add a VLAN that requires the
* Remove Tag flag to be changed, unless there are no
* other VLANs currently configured.
*/
for (vid = 1; vid < dev->info->num_vlans; ++vid) {
/* Skip the VID we are going to add or reconfigure */
if (vid == vlan->vid)
continue;
ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
&fid, &member, &valid);
if (valid && (member & BIT(port)))
return -EINVAL;
}
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
p->remove_tag = untagged;
}
ksz8_r_vlan_table(dev, vlan->vid, &data);
ksz8_from_vlan(dev, data, &fid, &member, &valid);
/* First time to setup the VLAN entry. */
if (!valid) {
/* Need to find a way to map VID to FID. */
fid = 1;
valid = 1;
}
member |= BIT(port);
ksz8_to_vlan(dev, fid, member, valid, &data);
ksz8_w_vlan_table(dev, vlan->vid, data);
/* change PVID */
if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
new_pvid = vlan->vid;
if (new_pvid) {
u16 vid;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
vid &= ~VLAN_VID_MASK;
vid |= new_pvid;
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
ksz8_port_enable_pvid(dev, port, true);
}
return 0;
}
int ksz8_port_vlan_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_vlan *vlan)
{
u16 data, pvid;
u8 fid, member, valid;
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
pvid = pvid & 0xFFF;
ksz8_r_vlan_table(dev, vlan->vid, &data);
ksz8_from_vlan(dev, data, &fid, &member, &valid);
member &= ~BIT(port);
/* Invalidate the entry if no more member. */
if (!member) {
fid = 0;
valid = 0;
}
ksz8_to_vlan(dev, fid, member, valid, &data);
ksz8_w_vlan_table(dev, vlan->vid, data);
if (pvid == vlan->vid)
ksz8_port_enable_pvid(dev, port, false);
return 0;
}
int ksz8_port_mirror_add(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
if (ingress) {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
} else {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
dev->mirror_tx |= BIT(port);
}
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
/* configure mirror port */
if (dev->mirror_rx || dev->mirror_tx)
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, true);
return 0;
}
void ksz8_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
u8 data;
if (mirror->ingress) {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
dev->mirror_rx &= ~BIT(port);
} else {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
dev->mirror_tx &= ~BIT(port);
}
ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
if (!dev->mirror_rx && !dev->mirror_tx)
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, false);
}
static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
if (!p->interface && dev->compat_interface) {
dev_warn(dev->dev,
"Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
"Please update your device tree.\n",
port);
p->interface = dev->compat_interface;
}
}
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
const u32 *masks;
u8 member;
masks = dev->info->masks;
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
if (!ksz_is_ksz88x3(dev))
ksz8795_set_prio_queue(dev, port, 4);
/* disable DiffServ priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
/* replace priority */
ksz_port_cfg(dev, port, P_802_1P_CTRL,
masks[PORT_802_1P_REMAPPING], false);
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
if (cpu_port) {
if (!ksz_is_ksz88x3(dev))
ksz8795_cpu_interface_select(dev, port);
member = dsa_user_ports(ds);
} else {
member = BIT(dsa_upstream_port(ds, port));
}
ksz8_cfg_port_member(dev, port, member);
}
void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
const u32 *masks;
const u16 *regs;
u8 remote;
int i;
masks = dev->info->masks;
regs = dev->info->regs;
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
ksz8_port_setup(dev, dev->cpu_port, true);
for (i = 0; i < dev->phy_port_cnt; i++) {
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
}
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
if (!ksz_is_ksz88x3(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
}
if (p->fiber)
ksz_port_cfg(dev, i, regs[P_STP_CTRL],
PORT_FORCE_FLOW_CTRL, true);
else
ksz_port_cfg(dev, i, regs[P_STP_CTRL],
PORT_FORCE_FLOW_CTRL, false);
}
}
static int ksz8_handle_global_errata(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
int ret = 0;
/* KSZ87xx Errata DS80000687C.
* Module 2: Link drops with some EEE link partners.
* An issue with the EEE next page exchange between the
* KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
* the link dropping.
*/
if (dev->info->ksz87xx_eee_link_erratum)
ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
return ret;
}
int ksz8_enable_stp_addr(struct ksz_device *dev)
{
struct alu_struct alu;
/* Setup STP address for STP operation. */
memset(&alu, 0, sizeof(alu));
ether_addr_copy(alu.mac, eth_stp_addr);
alu.is_static = true;
alu.is_override = true;
alu.port_forward = dev->info->cpu_ports;
return ksz8_w_sta_mac_table(dev, 0, &alu);
}
int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
int i;
ds->mtu_enforcement_ingress = true;
/* We rely on software untagging on the CPU port, so that we
* can support both tagged and untagged VLANs
*/
ds->untag_bridge_pvid = true;
/* VLAN filtering is partly controlled by the global VLAN
* Enable flag
*/
ds->vlan_filtering_is_global = true;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
/* Enable automatic fast aging when link changed detected. */
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
if (!ksz_is_ksz88x3(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
return ksz8_handle_global_errata(ds);
}
void ksz8_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config)
{
config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
* "Port 1 does not respond to received flow control PAUSE frames"
* So, disable Pause support on "Port 1" (port == 0) for all ksz88x3
* switches.
*/
if (!ksz_is_ksz88x3(dev) || port)
config->mac_capabilities |= MAC_SYM_PAUSE;
/* Asym pause is not supported on KSZ8863 and KSZ8873 */
if (!ksz_is_ksz88x3(dev))
config->mac_capabilities |= MAC_ASYM_PAUSE;
}
u32 ksz8_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
int ksz8_switch_init(struct ksz_device *dev)
{
dev->cpu_port = fls(dev->info->cpu_ports) - 1;
dev->phy_port_cnt = dev->info->port_cnt - 1;
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
return 0;
}
void ksz8_switch_exit(struct ksz_device *dev)
{
ksz8_reset_switch(dev);
}
MODULE_AUTHOR("Tristram Ha <[email protected]>");
MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/microchip/ksz8795.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Microchip ksz series register access through SPI
*
* Copyright (C) 2017 Microchip Technology Inc.
* Tristram Ha <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include "ksz_common.h"
#define KSZ8795_SPI_ADDR_SHIFT 12
#define KSZ8795_SPI_ADDR_ALIGN 3
#define KSZ8795_SPI_TURNAROUND_SHIFT 1
#define KSZ8863_SPI_ADDR_SHIFT 8
#define KSZ8863_SPI_ADDR_ALIGN 8
#define KSZ8863_SPI_TURNAROUND_SHIFT 0
#define KSZ9477_SPI_ADDR_SHIFT 24
#define KSZ9477_SPI_ADDR_ALIGN 3
#define KSZ9477_SPI_TURNAROUND_SHIFT 5
KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
static int ksz_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
const struct ksz_chip_data *chip;
struct device *ddev = &spi->dev;
struct regmap_config rc;
struct ksz_device *dev;
int i, ret = 0;
dev = ksz_switch_alloc(&spi->dev, spi);
if (!dev)
return -ENOMEM;
chip = device_get_match_data(ddev);
if (!chip)
return -EINVAL;
if (chip->chip_id == KSZ8830_CHIP_ID)
regmap_config = ksz8863_regmap_config;
else if (chip->chip_id == KSZ8795_CHIP_ID ||
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
else
regmap_config = ksz9477_regmap_config;
for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
rc.wr_table = chip->wr_table;
rc.rd_table = chip->rd_table;
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
return dev_err_probe(&spi->dev, PTR_ERR(dev->regmap[i]),
"Failed to initialize regmap%i\n",
regmap_config[i].val_bits);
}
}
if (spi->dev.platform_data)
dev->pdata = spi->dev.platform_data;
/* setup spi */
spi->mode = SPI_MODE_3;
ret = spi_setup(spi);
if (ret)
return ret;
dev->irq = spi->irq;
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
return ret;
spi_set_drvdata(spi, dev);
return 0;
}
static void ksz_spi_remove(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
if (dev)
ksz_switch_remove(dev);
}
static void ksz_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
if (!dev)
return;
if (dev->dev_ops->reset)
dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz_dt_ids[] = {
{
.compatible = "microchip,ksz8765",
.data = &ksz_switch_chips[KSZ8765]
},
{
.compatible = "microchip,ksz8794",
.data = &ksz_switch_chips[KSZ8794]
},
{
.compatible = "microchip,ksz8795",
.data = &ksz_switch_chips[KSZ8795]
},
{
.compatible = "microchip,ksz8863",
.data = &ksz_switch_chips[KSZ8830]
},
{
.compatible = "microchip,ksz8873",
.data = &ksz_switch_chips[KSZ8830]
},
{
.compatible = "microchip,ksz9477",
.data = &ksz_switch_chips[KSZ9477]
},
{
.compatible = "microchip,ksz9896",
.data = &ksz_switch_chips[KSZ9896]
},
{
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
},
{
.compatible = "microchip,ksz9893",
.data = &ksz_switch_chips[KSZ9893]
},
{
.compatible = "microchip,ksz9563",
.data = &ksz_switch_chips[KSZ9563]
},
{
.compatible = "microchip,ksz8563",
.data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
{
.compatible = "microchip,lan9370",
.data = &ksz_switch_chips[LAN9370]
},
{
.compatible = "microchip,lan9371",
.data = &ksz_switch_chips[LAN9371]
},
{
.compatible = "microchip,lan9372",
.data = &ksz_switch_chips[LAN9372]
},
{
.compatible = "microchip,lan9373",
.data = &ksz_switch_chips[LAN9373]
},
{
.compatible = "microchip,lan9374",
.data = &ksz_switch_chips[LAN9374]
},
{},
};
MODULE_DEVICE_TABLE(of, ksz_dt_ids);
static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8765" },
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
{ "ksz8873" },
{ "ksz9477" },
{ "ksz9896" },
{ "ksz9897" },
{ "ksz9893" },
{ "ksz9563" },
{ "ksz8563" },
{ "ksz9567" },
{ "lan9370" },
{ "lan9371" },
{ "lan9372" },
{ "lan9373" },
{ "lan9374" },
{ },
};
MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
.owner = THIS_MODULE,
.of_match_table = ksz_dt_ids,
},
.id_table = ksz_spi_ids,
.probe = ksz_spi_probe,
.remove = ksz_spi_remove,
.shutdown = ksz_spi_shutdown,
};
module_spi_driver(ksz_spi_driver);
MODULE_ALIAS("spi:ksz9477");
MODULE_ALIAS("spi:ksz9896");
MODULE_ALIAS("spi:ksz9897");
MODULE_ALIAS("spi:ksz9893");
MODULE_ALIAS("spi:ksz9563");
MODULE_ALIAS("spi:ksz8563");
MODULE_ALIAS("spi:ksz9567");
MODULE_ALIAS("spi:lan937x");
MODULE_AUTHOR("Tristram Ha <[email protected]>");
MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/net/dsa/microchip/ksz_spi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Felix Fietkau <[email protected]>
* Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
* Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <[email protected]>
*/
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <net/dsa.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
#include "qca8k_leds.h"
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
regaddr >>= 1;
*r1 = regaddr & 0x1e;
regaddr >>= 5;
*r2 = regaddr & 0x7;
regaddr >>= 3;
*page = regaddr & 0x3ff;
}
static int
qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
int ret;
u16 lo;
lo = val & 0xffff;
ret = bus->write(bus, phy_id, regnum, lo);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit lo register\n");
return ret;
}
static int
qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
int ret;
u16 hi;
hi = (u16)(val >> 16);
ret = bus->write(bus, phy_id, regnum, hi);
if (ret < 0)
dev_err_ratelimited(&bus->dev,
"failed to write qca8k 32bit hi register\n");
return ret;
}
static int
qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret < 0)
goto err;
*val = ret & 0xffff;
return 0;
err:
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit lo register\n");
*val = 0;
return ret;
}
static int
qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret < 0)
goto err;
*val = ret << 16;
return 0;
err:
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit hi register\n");
*val = 0;
return ret;
}
static int
qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
u32 hi, lo;
int ret;
*val = 0;
ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
if (ret < 0)
goto err;
ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
if (ret < 0)
goto err;
*val = lo | hi;
err:
return ret;
}
static void
qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
return;
qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
}
static int
qca8k_set_page(struct qca8k_priv *priv, u16 page)
{
u16 *cached_page = &priv->mdio_cache.page;
struct mii_bus *bus = priv->bus;
int ret;
if (page == *cached_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to set qca8k page\n");
return ret;
}
*cached_page = page;
usleep_range(1000, 2000);
return 0;
}
static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data;
struct qca8k_priv *priv = ds->priv;
struct qca_mgmt_ethhdr *mgmt_ethhdr;
u32 command;
u8 len, cmd;
int i;
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
mgmt_eth_data = &priv->mgmt_eth_data;
command = get_unaligned_le32(&mgmt_ethhdr->command);
cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
/* Special case for len of 15 as this is the max value for len and needs to
* be increased before converting it from word to dword.
*/
if (len == 15)
len++;
/* We can ignore odd value, we always round up them in the alloc function. */
len *= sizeof(u16);
/* Make sure the seq match the requested packet */
if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
mgmt_eth_data->ack = true;
if (cmd == MDIO_READ) {
u32 *val = mgmt_eth_data->data;
*val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
/* Get the rest of the 12 byte of data.
* The read/write function will extract the requested data.
*/
if (len > QCA_HDR_MGMT_DATA1_LEN) {
__le32 *data2 = (__le32 *)skb->data;
int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
len - QCA_HDR_MGMT_DATA1_LEN);
val++;
for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
*val = get_unaligned_le32(data2);
val++;
data2++;
}
}
}
complete(&mgmt_eth_data->rw_done);
}
static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
int priority, unsigned int len)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
unsigned int real_len;
struct sk_buff *skb;
__le32 *data2;
u32 command;
u16 hdr;
int i;
skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
if (!skb)
return NULL;
/* Hdr mgmt length value is in step of word size.
* As an example to process 4 byte of data the correct length to set is 2.
* To process 8 byte 4, 12 byte 6, 16 byte 8...
*
* Odd values will always return the next size on the ack packet.
* (length of 3 (6 byte) will always return 8 bytes of data)
*
* This means that a value of 15 (0xf) actually means reading/writing 32 bytes
* of data.
*
* To correctly calculate the length we devide the requested len by word and
* round up.
* On the ack function we can skip the odd check as we already handle the
* case here.
*/
real_len = DIV_ROUND_UP(len, sizeof(u16));
/* We check if the result len is odd and we round up another time to
* the next size. (length of 3 will be increased to 4 as switch will always
* return 8 bytes)
*/
if (real_len % sizeof(u16) != 0)
real_len++;
/* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
if (real_len == 16)
real_len--;
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
hdr |= QCA_HDR_XMIT_FROM_CPU;
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
QCA_HDR_MGMT_CHECK_CODE_VAL);
put_unaligned_le32(command, &mgmt_ethhdr->command);
if (cmd == MDIO_WRITE)
put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
mgmt_ethhdr->hdr = htons(hdr);
data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
len - QCA_HDR_MGMT_DATA1_LEN);
val++;
for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
put_unaligned_le32(*val, data2);
data2++;
val++;
}
}
return skb;
}
static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
u32 seq;
seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
put_unaligned_le32(seq, &mgmt_ethhdr->seq);
}
static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb;
bool ack;
int ret;
skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
QCA8K_ETHERNET_MDIO_PRIORITY, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mgmt_eth_data->mutex);
/* Check mgmt_master if is operational */
if (!priv->mgmt_master) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
skb->dev = priv->mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
ack = mgmt_eth_data->ack;
mutex_unlock(&mgmt_eth_data->mutex);
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
return 0;
}
static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb;
bool ack;
int ret;
skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
QCA8K_ETHERNET_MDIO_PRIORITY, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mgmt_eth_data->mutex);
/* Check mgmt_master if is operational */
if (!priv->mgmt_master) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
skb->dev = priv->mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
ack = mgmt_eth_data->ack;
mutex_unlock(&mgmt_eth_data->mutex);
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
return 0;
}
static int
qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
u32 val = 0;
int ret;
ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
if (ret)
return ret;
val &= ~mask;
val |= write_val;
return qca8k_write_eth(priv, reg, &val, sizeof(val));
}
static int
qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
uint32_t mask, uint32_t write_val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
if (ret < 0)
goto exit;
val &= ~mask;
val |= write_val;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
void *val_buf, size_t val_len)
{
int i, count = val_len / sizeof(u32), ret;
u32 reg = *(u32 *)reg_buf & U16_MAX;
struct qca8k_priv *priv = ctx;
if (priv->mgmt_master &&
!qca8k_read_eth(priv, reg, val_buf, val_len))
return 0;
/* loop count times and increment reg of 4 */
for (i = 0; i < count; i++, reg += sizeof(u32)) {
ret = qca8k_read_mii(priv, reg, val_buf + i);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
const void *val_buf, size_t val_len)
{
int i, count = val_len / sizeof(u32), ret;
u32 reg = *(u32 *)reg_buf & U16_MAX;
struct qca8k_priv *priv = ctx;
u32 *val = (u32 *)val_buf;
if (priv->mgmt_master &&
!qca8k_write_eth(priv, reg, val, val_len))
return 0;
/* loop count times, increment reg of 4 and increment val ptr to
* the next value
*/
for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
ret = qca8k_write_mii(priv, reg, *val);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
{
return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
bytes - sizeof(u16));
}
static int
qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
{
struct qca8k_priv *priv = ctx;
if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
return 0;
return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
}
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
.read = qca8k_bulk_read,
.write = qca8k_bulk_write,
.reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
.disable_locking = true, /* Locking is handled by qca8k read/write */
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
.max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */
/* ATU regs suffer from a bug where some data are not correctly
* written. Disable bulk write to correctly write ATU entry.
*/
.use_single_write = true,
};
static int
qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
struct sk_buff *read_skb, u32 *val)
{
struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
bool ack;
int ret;
if (!skb)
return -ENOMEM;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the copy pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0)
return -ETIMEDOUT;
if (!ack)
return -EINVAL;
*val = mgmt_eth_data->data[0];
return 0;
}
static int
qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
int regnum, u16 data)
{
struct sk_buff *write_skb, *clear_skb, *read_skb;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
struct net_device *mgmt_master;
int ret, ret1;
bool ack;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
mgmt_eth_data = &priv->mgmt_eth_data;
write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
if (read) {
write_val |= QCA8K_MDIO_MASTER_READ;
} else {
write_val |= QCA8K_MDIO_MASTER_WRITE;
write_val |= QCA8K_MDIO_MASTER_DATA(data);
}
/* Prealloc all the needed skb before the lock */
write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
if (!write_skb)
return -ENOMEM;
clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
if (!clear_skb) {
ret = -ENOMEM;
goto err_clear_skb;
}
read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
if (!read_skb) {
ret = -ENOMEM;
goto err_read_skb;
}
/* Actually start the request:
* 1. Send mdio master packet
* 2. Busy Wait for mdio master command
* 3. Get the data if we are reading
* 4. Reset the mdio master (even with error)
*/
mutex_lock(&mgmt_eth_data->mutex);
/* Check if mgmt_master is operational */
mgmt_master = priv->mgmt_master;
if (!mgmt_master) {
mutex_unlock(&mgmt_eth_data->mutex);
ret = -EINVAL;
goto err_mgmt_master;
}
read_skb->dev = mgmt_master;
clear_skb->dev = mgmt_master;
write_skb->dev = mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the write pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(write_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
kfree_skb(read_skb);
goto exit;
}
if (!ack) {
ret = -EINVAL;
kfree_skb(read_skb);
goto exit;
}
ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
!(val & QCA8K_MDIO_MASTER_BUSY), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
mgmt_eth_data, read_skb, &val);
if (ret < 0 && ret1 < 0) {
ret = ret1;
goto exit;
}
if (read) {
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the read pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(read_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
goto exit;
}
if (!ack) {
ret = -EINVAL;
goto exit;
}
ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
} else {
kfree_skb(read_skb);
}
exit:
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the clear pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(clear_skb);
wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
return ret;
/* Error handling before lock */
err_mgmt_master:
kfree_skb(read_skb);
err_read_skb:
kfree_skb(clear_skb);
err_clear_skb:
kfree_skb(write_skb);
return ret;
}
static int
qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{
u16 r1, r2, page;
u32 val;
int ret, ret1;
qca8k_split_addr(reg, &r1, &r2, &page);
ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
bus, 0x10 | r2, r1 + 1, &val);
/* Check if qca8k_read has failed for a different reason
* before returnting -ETIMEDOUT
*/
if (ret < 0 && ret1 < 0)
return ret1;
return ret;
}
static int
qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
QCA8K_MDIO_MASTER_DATA(data);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write32(bus, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret)
goto exit;
ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
if (ret >= 0)
ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
return ret;
}
static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
if (!ret)
return 0;
return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
if (ret >= 0)
return ret;
ret = qca8k_mdio_read(priv, phy, regnum);
if (ret < 0)
return 0xffff;
return ret;
}
static int
qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
}
static int
qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
struct device_node *mdio;
struct mii_bus *bus;
bus = devm_mdiobus_alloc(ds->dev);
if (!bus)
return -ENOMEM;
bus->priv = (void *)priv;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
ds->slave_mii_bus = bus;
/* Check if the devicetree declare the port:phy mapping */
mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
if (of_device_is_available(mdio)) {
bus->name = "qca8k slave mii";
bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
return devm_of_mdiobus_register(priv->dev, bus, mdio);
}
/* If a mapping can't be found the legacy mapping is used,
* using the qca8k_port_to_phy function
*/
bus->name = "qca8k-legacy slave mii";
bus->read = qca8k_legacy_mdio_read;
bus->write = qca8k_legacy_mdio_write;
return devm_mdiobus_register(priv->dev, bus);
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
int err;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", ®);
if (err) {
of_node_put(port);
of_node_put(ports);
return err;
}
if (!dsa_is_user_port(priv->ds, reg))
continue;
of_get_phy_mode(port, &mode);
if (of_property_read_bool(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
}
of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
}
/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
* the MDIO_MASTER register also _disconnects_ the external MDC
* passthrough to the internal PHYs. It's not possible to use both
* configurations at the same time!
*
* Because this came up during the review process:
* If the external mdio-bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
* accessors for the time being, it would be possible to pull this
* off.
*/
if (!!external_mdio_mask && !!internal_mdio_mask) {
dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
return -EINVAL;
}
if (external_mdio_mask) {
/* Make sure to disable the internal mdio bus in cases
* a dt-overlay and driver reload changed the configuration
*/
return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_EN);
}
return qca8k_mdio_register(priv);
}
static int
qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
u32 mask = 0;
int ret = 0;
/* SoC specific settings for ipq8064.
* If more device require this consider adding
* a dedicated binding.
*/
if (of_machine_is_compatible("qcom,ipq8064"))
mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
/* SoC specific settings for ipq8065 */
if (of_machine_is_compatible("qcom,ipq8065"))
mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
if (mask) {
ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
QCA8K_MAC_PWR_RGMII0_1_8V |
QCA8K_MAC_PWR_RGMII1_1_8V,
mask);
}
return ret;
}
static int qca8k_find_cpu_port(struct dsa_switch *ds)
{
struct qca8k_priv *priv = ds->priv;
/* Find the connected cpu port. Valid port are 0 or 6 */
if (dsa_is_cpu_port(ds, 0))
return 0;
dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
if (dsa_is_cpu_port(ds, 6))
return 6;
return -EINVAL;
}
static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{
const struct qca8k_match_data *data = priv->info;
struct device_node *node = priv->dev->of_node;
u32 val = 0;
int ret;
/* QCA8327 require to set to the correct mode.
* His bigger brother QCA8328 have the 172 pin layout.
* Should be applied by default but we set this just to make sure.
*/
if (priv->switch_id == QCA8K_ID_QCA8327) {
/* Set the correct package of 148 pin for QCA8327 */
if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
val);
if (ret)
return ret;
}
if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
val |= QCA8K_PWS_POWER_ON_SEL;
if (of_property_read_bool(node, "qca,led-open-drain")) {
if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
return -EINVAL;
}
val |= QCA8K_PWS_LED_OPEN_EN_CSR;
}
return qca8k_rmw(priv, QCA8K_REG_PWS,
QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
val);
}
static int
qca8k_parse_port_config(struct qca8k_priv *priv)
{
int port, cpu_port_index = -1, ret;
struct device_node *port_dn;
phy_interface_t mode;
struct dsa_port *dp;
u32 delay;
/* We have 2 CPU port. Check them */
for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
dp = dsa_to_port(priv->ds, port);
port_dn = dp->dn;
cpu_port_index++;
if (!of_device_is_available(port_dn))
continue;
ret = of_get_phy_mode(port_dn, &mode);
if (ret)
continue;
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_SGMII:
delay = 0;
if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
delay = 0;
if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
/* Skip sgmii parsing for rgmii* mode */
if (mode == PHY_INTERFACE_MODE_RGMII ||
mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
break;
if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
priv->ports_config.sgmii_tx_clk_falling_edge = true;
if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
priv->ports_config.sgmii_rx_clk_falling_edge = true;
if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
priv->ports_config.sgmii_enable_pll = true;
if (priv->switch_id == QCA8K_ID_QCA8327) {
dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
priv->ports_config.sgmii_enable_pll = false;
}
if (priv->switch_revision < 2)
dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
}
break;
default:
continue;
}
}
return 0;
}
static void
qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
{
u32 delay, val = 0;
int ret;
/* Delay can be declared in 3 different way.
* Mode to rgmii and internal-delay standard binding defined
* rgmii-id or rgmii-tx/rx phy mode set.
* The parse logic set a delay different than 0 only when one
* of the 3 different way is used. In all other case delay is
* not enabled. With ID or TX/RXID delay is enabled and set
* to the default and recommended value.
*/
if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
}
if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
}
/* Set RGMII delay based on the selected values */
ret = qca8k_rmw(priv, reg,
QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
val);
if (ret)
dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
}
static struct phylink_pcs *
qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct qca8k_priv *priv = ds->priv;
struct phylink_pcs *pcs = NULL;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
switch (port) {
case 0:
pcs = &priv->pcs_port_0.pcs;
break;
case 6:
pcs = &priv->pcs_port_6.pcs;
break;
}
break;
default:
break;
}
return pcs;
}
static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
int cpu_port_index;
u32 reg;
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY, nothing to do */
return;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
return;
}
if (port != 6 && phylink_autoneg_inband(mode)) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
}
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
/* Configure rgmii delay */
qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
/* QCA8337 requires to set rgmii rx delay for all ports.
* This is enabled through PORT5_PAD_CTRL for all ports,
* rather than individual port registers.
*/
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
phy_modes(state->interface), port);
return;
}
}
static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0: /* 1st CPU port */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
case 6: /* 2nd CPU port / external PHY */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
config->supported_interfaces);
break;
}
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
}
static void
qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct qca8k_priv *priv = ds->priv;
qca8k_port_set_status(priv, port, 0);
}
static void
qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct qca8k_priv *priv = ds->priv;
u32 reg;
if (phylink_autoneg_inband(mode)) {
reg = QCA8K_PORT_STATUS_LINK_AUTO;
} else {
switch (speed) {
case SPEED_10:
reg = QCA8K_PORT_STATUS_SPEED_10;
break;
case SPEED_100:
reg = QCA8K_PORT_STATUS_SPEED_100;
break;
case SPEED_1000:
reg = QCA8K_PORT_STATUS_SPEED_1000;
break;
default:
reg = QCA8K_PORT_STATUS_LINK_AUTO;
break;
}
if (duplex == DUPLEX_FULL)
reg |= QCA8K_PORT_STATUS_DUPLEX;
if (rx_pause || dsa_is_cpu_port(ds, port))
reg |= QCA8K_PORT_STATUS_RXFLOW;
if (tx_pause || dsa_is_cpu_port(ds, port))
reg |= QCA8K_PORT_STATUS_TXFLOW;
}
reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
}
static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct qca8k_pcs, pcs);
}
static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int port = pcs_to_qca8k_pcs(pcs)->port;
u32 reg;
int ret;
ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
if (ret < 0) {
state->link = false;
return;
}
state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
state->an_complete = state->link;
state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
DUPLEX_HALF;
switch (reg & QCA8K_PORT_STATUS_SPEED) {
case QCA8K_PORT_STATUS_SPEED_10:
state->speed = SPEED_10;
break;
case QCA8K_PORT_STATUS_SPEED_100:
state->speed = SPEED_100;
break;
case QCA8K_PORT_STATUS_SPEED_1000:
state->speed = SPEED_1000;
break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
if (reg & QCA8K_PORT_STATUS_RXFLOW)
state->pause |= MLO_PAUSE_RX;
if (reg & QCA8K_PORT_STATUS_TXFLOW)
state->pause |= MLO_PAUSE_TX;
}
static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int cpu_port_index, ret, port;
u32 reg, val;
port = pcs_to_qca8k_pcs(pcs)->port;
switch (port) {
case 0:
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break;
case 6:
reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
WARN_ON(1);
return -EINVAL;
}
/* Enable/disable SerDes auto-negotiation as necessary */
val = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ?
0 : QCA8K_PWS_SERDES_AEN_DIS;
ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8K_PWS_SERDES_AEN_DIS, val);
if (ret)
return ret;
/* Configure the SGMII parameters */
ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
if (ret)
return ret;
val |= QCA8K_SGMII_EN_SD;
if (priv->ports_config.sgmii_enable_pll)
val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(priv->ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_PHY;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_MAC;
} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_BASEX;
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
/* From original code is reported port instability as SGMII also
* require delay set. Apply advised values here or take them from DT.
*/
if (interface == PHY_INTERFACE_MODE_SGMII)
qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
* falling edge is set writing in the PORT0 PAD reg
*/
if (priv->switch_id == QCA8K_ID_QCA8327 ||
priv->switch_id == QCA8K_ID_QCA8337)
reg = QCA8K_REG_PORT0_PAD_CTRL;
val = 0;
/* SGMII Clock phase configuration */
if (priv->ports_config.sgmii_rx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
if (priv->ports_config.sgmii_tx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
if (val)
ret = qca8k_rmw(priv, reg,
QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
val);
return 0;
}
static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
{
}
static const struct phylink_pcs_ops qca8k_pcs_ops = {
.pcs_get_state = qca8k_pcs_get_state,
.pcs_config = qca8k_pcs_config,
.pcs_an_restart = qca8k_pcs_an_restart,
};
static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
qpcs->pcs.neg_mode = true;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
qpcs->priv = priv;
qpcs->port = port;
}
static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
struct mib_ethhdr *mib_ethhdr;
__le32 *data2;
u8 port;
int i;
mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
mib_eth_data = &priv->mib_eth_data;
/* The switch autocast every port. Ignore other packet and
* parse only the requested one.
*/
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
if (port != mib_eth_data->req_port)
goto exit;
data2 = (__le32 *)skb->data;
for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
if (i < 3) {
mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
continue;
}
/* Some mib are 64 bit wide */
if (mib->size == 2)
mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
else
mib_eth_data->data[i] = get_unaligned_le32(data2);
data2 += mib->size;
}
exit:
/* Complete on receiving all the mib packet */
if (refcount_dec_and_test(&mib_eth_data->port_parsed))
complete(&mib_eth_data->rw_done);
}
static int
qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
int ret;
mib_eth_data = &priv->mib_eth_data;
mutex_lock(&mib_eth_data->mutex);
reinit_completion(&mib_eth_data->rw_done);
mib_eth_data->req_port = dp->index;
mib_eth_data->data = data;
refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
mutex_lock(&priv->reg_mutex);
/* Send mib autocast request */
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
QCA8K_MIB_BUSY);
mutex_unlock(&priv->reg_mutex);
if (ret)
goto exit;
ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
exit:
mutex_unlock(&mib_eth_data->mutex);
return ret;
}
static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
/* Communicate to the phy internal driver the switch revision.
* Based on the switch revision different values needs to be
* set to the dbg and mmd reg on the phy.
* The first 2 bit are used to communicate the switch revision
* to the phy driver.
*/
if (port > 0 && port < 6)
return priv->switch_revision;
return 0;
}
static enum dsa_tag_protocol
qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_QCA;
}
static void
qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
bool operational)
{
struct dsa_port *dp = master->dsa_ptr;
struct qca8k_priv *priv = ds->priv;
/* Ethernet MIB/MDIO is only supported for CPU port 0 */
if (dp->index != 0)
return;
mutex_lock(&priv->mgmt_eth_data.mutex);
mutex_lock(&priv->mib_eth_data.mutex);
priv->mgmt_master = operational ? (struct net_device *)master : NULL;
mutex_unlock(&priv->mib_eth_data.mutex);
mutex_unlock(&priv->mgmt_eth_data.mutex);
}
static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct qca_tagger_data *tagger_data;
switch (proto) {
case DSA_TAG_PROTO_QCA:
tagger_data = ds->tagger_data;
tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static void qca8k_setup_hol_fixup(struct qca8k_priv *priv, int port)
{
u32 mask;
switch (port) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
*/
case 0:
case 5:
case 6:
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
break;
default:
mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
}
regmap_write(priv->regmap, QCA8K_REG_PORT_HOL_CTRL0(port), mask);
mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN;
regmap_update_bits(priv->regmap, QCA8K_REG_PORT_HOL_CTRL1(port),
QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = ds->priv;
struct dsa_port *dp;
int cpu_port, ret;
u32 mask;
cpu_port = qca8k_find_cpu_port(ds);
if (cpu_port < 0) {
dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
return cpu_port;
}
/* Parse CPU port config to be later used in phy_link mac_config */
ret = qca8k_parse_port_config(priv);
if (ret)
return ret;
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
ret = qca8k_setup_of_pws_reg(priv);
if (ret)
return ret;
ret = qca8k_setup_mac_pwr_sel(priv);
if (ret)
return ret;
ret = qca8k_setup_led_ctrl(priv);
if (ret)
return ret;
qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
/* Make sure MAC06 is disabled */
ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
if (ret) {
dev_err(priv->dev, "failed disabling MAC06 exchange");
return ret;
}
/* Enable CPU Port */
ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
if (ret) {
dev_err(priv->dev, "failed enabling CPU port");
return ret;
}
/* Enable MIB counters */
ret = qca8k_mib_init(priv);
if (ret)
dev_warn(priv->dev, "mib init failed");
/* Initial setup of all ports */
dsa_switch_for_each_port(dp, ds) {
/* Disable forwarding by default on all ports */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(dp->index),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
}
/* Disable MAC by default on all user ports */
dsa_switch_for_each_user_port(dp, ds)
qca8k_port_set_status(priv, dp->index, 0);
/* Enable QCA header mode on all cpu ports */
dsa_switch_for_each_cpu_port(dp, ds) {
ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(dp->index),
FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
if (ret) {
dev_err(priv->dev, "failed enabling QCA header mode on port %d", dp->index);
return ret;
}
}
/* Forward all unknown frames to CPU port for Linux processing
* Notice that in multi-cpu config only one port should be set
* for igmp, unknown, multicast and broadcast packet
*/
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
if (ret)
return ret;
/* CPU port gets connected to all user ports of the switch */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
/* Setup connection between CPU port & user ports
* Individual user ports get connected to CPU port only
*/
dsa_switch_for_each_user_port(dp, ds) {
u8 port = dp->index;
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER,
BIT(cpu_port));
if (ret)
return ret;
ret = regmap_clear_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_LEARN);
if (ret)
return ret;
/* For port based vlans to work we need to set the
* default egress vid
*/
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
QCA8K_EGREES_VLAN_PORT_MASK(port),
QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF));
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
if (ret)
return ret;
}
/* The port 5 of the qca8337 have some problem in flood condition. The
* original legacy driver had some specific buffer and priority settings
* for the different port suggested by the QCA switch team. Add this
* missing settings to improve switch stability under load condition.
* This problem is limited to qca8337 and other qca8k switch are not affected.
*/
if (priv->switch_id == QCA8K_ID_QCA8337)
dsa_switch_for_each_available_port(dp, ds)
qca8k_setup_hol_fixup(priv, dp->index);
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
if (priv->switch_id == QCA8K_ID_QCA8327) {
mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
mask);
}
/* Setup our port MTUs to match power on defaults */
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
/* Flush the FDB table */
qca8k_fdb_flush(priv);
/* Set min a max ageing value supported */
ds->ageing_time_min = 7000;
ds->ageing_time_max = 458745000;
/* Set max number of LAGs supported */
ds->num_lag_ids = QCA8K_NUM_LAGS;
return 0;
}
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
.get_mac_eee = qca8k_get_mac_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
.port_change_mtu = qca8k_port_change_mtu,
.port_max_mtu = qca8k_port_max_mtu,
.port_stp_state_set = qca8k_port_stp_state_set,
.port_pre_bridge_flags = qca8k_port_pre_bridge_flags,
.port_bridge_flags = qca8k_port_bridge_flags,
.port_bridge_join = qca8k_port_bridge_join,
.port_bridge_leave = qca8k_port_bridge_leave,
.port_fast_age = qca8k_port_fast_age,
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
.port_mdb_add = qca8k_port_mdb_add,
.port_mdb_del = qca8k_port_mdb_del,
.port_mirror_add = qca8k_port_mirror_add,
.port_mirror_del = qca8k_port_mirror_del,
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_get_caps = qca8k_phylink_get_caps,
.phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
.phylink_mac_config = qca8k_phylink_mac_config,
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
.master_state_change = qca8k_master_change,
.connect_tag_protocol = qca8k_connect_tag_protocol,
};
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv;
int ret;
/* allocate the private data struct so that we can probe the switches
* ID register
*/
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
if (IS_ERR(priv->reset_gpio))
return PTR_ERR(priv->reset_gpio);
if (priv->reset_gpio) {
gpiod_set_value_cansleep(priv->reset_gpio, 1);
/* The active low duration must be greater than 10 ms
* and checkpatch.pl wants 20 ms.
*/
msleep(20);
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
/* Start by setting up the register mapping */
priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
&qca8k_regmap_config);
if (IS_ERR(priv->regmap)) {
dev_err(priv->dev, "regmap initialization failed");
return PTR_ERR(priv->regmap);
}
priv->mdio_cache.page = 0xffff;
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
return ret;
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
mutex_init(&priv->mgmt_eth_data.mutex);
init_completion(&priv->mgmt_eth_data.rw_done);
mutex_init(&priv->mib_eth_data.mutex);
init_completion(&priv->mib_eth_data.rw_done);
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
return dsa_register_switch(priv->ds);
}
static void
qca8k_sw_remove(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
int i;
if (!priv)
return;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
}
static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
{
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
int port;
for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Do not enable on resume if the port was
* disabled before.
*/
if (!(priv->port_enabled_map & BIT(port)))
continue;
qca8k_port_set_status(priv, port, enable);
}
}
static int qca8k_suspend(struct device *dev)
{
struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 0);
return dsa_switch_suspend(priv->ds);
}
static int qca8k_resume(struct device *dev)
{
struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 1);
return dsa_switch_resume(priv->ds);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
static const struct qca8k_info_ops qca8xxx_ops = {
.autocast_mib = qca8k_get_ethtool_stats_eth,
};
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
.ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
.ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
.mib_count = QCA8K_QCA833X_MIB_COUNT,
.ops = &qca8xxx_ops,
};
static const struct of_device_id qca8k_of_match[] = {
{ .compatible = "qca,qca8327", .data = &qca8327 },
{ .compatible = "qca,qca8328", .data = &qca8328 },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
};
static struct mdio_driver qca8kmdio_driver = {
.probe = qca8k_sw_probe,
.remove = qca8k_sw_remove,
.shutdown = qca8k_sw_shutdown,
.mdiodrv.driver = {
.name = "qca8k",
.of_match_table = qca8k_of_match,
.pm = &qca8k_pm_ops,
},
};
mdio_module_driver(qca8kmdio_driver);
MODULE_AUTHOR("Mathieu Olivari, John Crispin <[email protected]>");
MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qca8k");
| linux-master | drivers/net/dsa/qca/qca8k-8xxx.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2019 Pengutronix, Oleksij Rempel <[email protected]>
/*
* +----------------------+
* GMAC1----RGMII----|--MAC0 |
* \---MDIO1----|--REGs |----MDIO3----\
* | | | +------+
* | | +--| |
* | MAC1-|----RMII--M-----| PHY0 |-o P0
* | | | | +------+
* | | | +--| |
* | MAC2-|----RMII--------| PHY1 |-o P1
* | | | | +------+
* | | | +--| |
* | MAC3-|----RMII--------| PHY2 |-o P2
* | | | | +------+
* | | | +--| |
* | MAC4-|----RMII--------| PHY3 |-o P3
* | | | | +------+
* | | | +--| |
* | MAC5-|--+-RMII--M-----|-PHY4-|-o P4
* | | | | +------+
* +----------------------+ | \--CFG_SW_PHY_SWAP
* GMAC0---------------RMII--------------------/ \-CFG_SW_PHY_ADDR_SWAP
* \---MDIO0--NC
*
* GMAC0 and MAC5 are connected together and use same PHY. Depending on
* configuration it can be PHY4 (default) or PHY0. Only GMAC0 or MAC5 can be
* used at same time. If GMAC0 is used (default) then MAC5 should be disabled.
*
* CFG_SW_PHY_SWAP - swap connections of PHY0 and PHY4. If this bit is not set
* PHY4 is connected to GMAC0/MAC5 bundle and PHY0 is connected to MAC1. If this
* bit is set, PHY4 is connected to MAC1 and PHY0 is connected to GMAC0/MAC5
* bundle.
*
* CFG_SW_PHY_ADDR_SWAP - swap addresses of PHY0 and PHY4
*
* CFG_SW_PHY_SWAP and CFG_SW_PHY_ADDR_SWAP are part of SoC specific register
* set and not related to switch internal registers.
*/
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <net/dsa.h>
#define AR9331_SW_NAME "ar9331_switch"
#define AR9331_SW_PORTS 6
/* dummy reg to change page */
#define AR9331_SW_REG_PAGE 0x40000
/* Global Interrupt */
#define AR9331_SW_REG_GINT 0x10
#define AR9331_SW_REG_GINT_MASK 0x14
#define AR9331_SW_GINT_PHY_INT BIT(2)
#define AR9331_SW_REG_FLOOD_MASK 0x2c
#define AR9331_SW_FLOOD_MASK_BROAD_TO_CPU BIT(26)
#define AR9331_SW_REG_GLOBAL_CTRL 0x30
#define AR9331_SW_GLOBAL_CTRL_MFS_M GENMASK(13, 0)
#define AR9331_SW_REG_MDIO_CTRL 0x98
#define AR9331_SW_MDIO_CTRL_BUSY BIT(31)
#define AR9331_SW_MDIO_CTRL_MASTER_EN BIT(30)
#define AR9331_SW_MDIO_CTRL_CMD_READ BIT(27)
#define AR9331_SW_MDIO_CTRL_PHY_ADDR_M GENMASK(25, 21)
#define AR9331_SW_MDIO_CTRL_REG_ADDR_M GENMASK(20, 16)
#define AR9331_SW_MDIO_CTRL_DATA_M GENMASK(16, 0)
#define AR9331_SW_REG_PORT_STATUS(_port) (0x100 + (_port) * 0x100)
/* FLOW_LINK_EN - enable mac flow control config auto-neg with phy.
* If not set, mac can be config by software.
*/
#define AR9331_SW_PORT_STATUS_FLOW_LINK_EN BIT(12)
/* LINK_EN - If set, MAC is configured from PHY link status.
* If not set, MAC should be configured by software.
*/
#define AR9331_SW_PORT_STATUS_LINK_EN BIT(9)
#define AR9331_SW_PORT_STATUS_DUPLEX_MODE BIT(6)
#define AR9331_SW_PORT_STATUS_RX_FLOW_EN BIT(5)
#define AR9331_SW_PORT_STATUS_TX_FLOW_EN BIT(4)
#define AR9331_SW_PORT_STATUS_RXMAC BIT(3)
#define AR9331_SW_PORT_STATUS_TXMAC BIT(2)
#define AR9331_SW_PORT_STATUS_SPEED_M GENMASK(1, 0)
#define AR9331_SW_PORT_STATUS_SPEED_1000 2
#define AR9331_SW_PORT_STATUS_SPEED_100 1
#define AR9331_SW_PORT_STATUS_SPEED_10 0
#define AR9331_SW_PORT_STATUS_MAC_MASK \
(AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC)
#define AR9331_SW_PORT_STATUS_LINK_MASK \
(AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
AR9331_SW_PORT_STATUS_SPEED_M)
#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100)
#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11)
#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0)
#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0
#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1
#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2
#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3
#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4
#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100)
#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30)
#define AR9331_SW_8021Q_MODE_SECURE 3
#define AR9331_SW_8021Q_MODE_CHECK 2
#define AR9331_SW_8021Q_MODE_FALLBACK 1
#define AR9331_SW_8021Q_MODE_NONE 0
#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16)
/* MIB registers */
#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
/* Phy bypass mode
* ------------------------------------------------------------------------
* Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
*
* real | start | OP | PhyAddr | Reg Addr | TA |
* atheros| start | OP | 2'b00 |PhyAdd[2:0]| Reg Addr[4:0] | TA |
*
*
* Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
* real | Data |
* atheros| Data |
*
* ------------------------------------------------------------------------
* Page address mode
* ------------------------------------------------------------------------
* Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
* real | start | OP | PhyAddr | Reg Addr | TA |
* atheros| start | OP | 2'b11 | 8'b0 | TA |
*
* Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
* real | Data |
* atheros| | Page [9:0] |
*/
/* In case of Page Address mode, Bit[18:9] of 32 bit register address should be
* written to bits[9:0] of mdio data register.
*/
#define AR9331_SW_ADDR_PAGE GENMASK(18, 9)
/* ------------------------------------------------------------------------
* Normal register access mode
* ------------------------------------------------------------------------
* Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
* real | start | OP | PhyAddr | Reg Addr | TA |
* atheros| start | OP | 2'b10 | low_addr[7:0] | TA |
*
* Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
* real | Data |
* atheros| Data |
* ------------------------------------------------------------------------
*/
#define AR9331_SW_LOW_ADDR_PHY GENMASK(8, 6)
#define AR9331_SW_LOW_ADDR_REG GENMASK(5, 1)
#define AR9331_SW_MDIO_PHY_MODE_M GENMASK(4, 3)
#define AR9331_SW_MDIO_PHY_MODE_PAGE 3
#define AR9331_SW_MDIO_PHY_MODE_REG 2
#define AR9331_SW_MDIO_PHY_MODE_BYPASS 0
#define AR9331_SW_MDIO_PHY_ADDR_M GENMASK(2, 0)
/* Empirical determined values */
#define AR9331_SW_MDIO_POLL_SLEEP_US 1
#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
/* The interval should be small enough to avoid overflow of 32bit MIBs */
/*
* FIXME: until we can read MIBs from stats64 call directly (i.e. sleep
* there), we have to poll stats more frequently then it is actually needed.
* For overflow protection, normally, 100 sec interval should have been OK.
*/
#define STATS_INTERVAL_JIFFIES (3 * HZ)
struct ar9331_sw_stats_raw {
u32 rxbroad; /* 0x00 */
u32 rxpause; /* 0x04 */
u32 rxmulti; /* 0x08 */
u32 rxfcserr; /* 0x0c */
u32 rxalignerr; /* 0x10 */
u32 rxrunt; /* 0x14 */
u32 rxfragment; /* 0x18 */
u32 rx64byte; /* 0x1c */
u32 rx128byte; /* 0x20 */
u32 rx256byte; /* 0x24 */
u32 rx512byte; /* 0x28 */
u32 rx1024byte; /* 0x2c */
u32 rx1518byte; /* 0x30 */
u32 rxmaxbyte; /* 0x34 */
u32 rxtoolong; /* 0x38 */
u32 rxgoodbyte; /* 0x3c */
u32 rxgoodbyte_hi;
u32 rxbadbyte; /* 0x44 */
u32 rxbadbyte_hi;
u32 rxoverflow; /* 0x4c */
u32 filtered; /* 0x50 */
u32 txbroad; /* 0x54 */
u32 txpause; /* 0x58 */
u32 txmulti; /* 0x5c */
u32 txunderrun; /* 0x60 */
u32 tx64byte; /* 0x64 */
u32 tx128byte; /* 0x68 */
u32 tx256byte; /* 0x6c */
u32 tx512byte; /* 0x70 */
u32 tx1024byte; /* 0x74 */
u32 tx1518byte; /* 0x78 */
u32 txmaxbyte; /* 0x7c */
u32 txoversize; /* 0x80 */
u32 txbyte; /* 0x84 */
u32 txbyte_hi;
u32 txcollision; /* 0x8c */
u32 txabortcol; /* 0x90 */
u32 txmulticol; /* 0x94 */
u32 txsinglecol; /* 0x98 */
u32 txexcdefer; /* 0x9c */
u32 txdefer; /* 0xa0 */
u32 txlatecol; /* 0xa4 */
};
struct ar9331_sw_port {
int idx;
struct delayed_work mib_read;
struct rtnl_link_stats64 stats;
struct ethtool_pause_stats pause_stats;
struct spinlock stats_lock;
};
struct ar9331_sw_priv {
struct device *dev;
struct dsa_switch ds;
struct dsa_switch_ops ops;
struct irq_domain *irqdomain;
u32 irq_mask;
struct mutex lock_irq;
struct mii_bus *mbus; /* mdio master */
struct mii_bus *sbus; /* mdio slave */
struct regmap *regmap;
struct reset_control *sw_reset;
struct ar9331_sw_port port[AR9331_SW_PORTS];
};
static struct ar9331_sw_priv *ar9331_sw_port_to_priv(struct ar9331_sw_port *port)
{
struct ar9331_sw_port *p = port - port->idx;
return (struct ar9331_sw_priv *)((void *)p -
offsetof(struct ar9331_sw_priv, port));
}
/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
* If some kind of optimization is used, the request should be repeated.
*/
static int ar9331_sw_reset(struct ar9331_sw_priv *priv)
{
int ret;
ret = reset_control_assert(priv->sw_reset);
if (ret)
goto error;
/* AR9331 doc do not provide any information about proper reset
* sequence. The AR8136 (the closes switch to the AR9331) doc says:
* reset duration should be greater than 10ms. So, let's use this value
* for now.
*/
usleep_range(10000, 15000);
ret = reset_control_deassert(priv->sw_reset);
if (ret)
goto error;
/* There is no information on how long should we wait after reset.
* AR8136 has an EEPROM and there is an Interrupt for EEPROM load
* status. AR9331 has no EEPROM support.
* For now, do not wait. In case AR8136 will be needed, the after
* reset delay can be added as well.
*/
return 0;
error:
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
return ret;
}
static int ar9331_sw_mbus_write(struct mii_bus *mbus, int port, int regnum,
u16 data)
{
struct ar9331_sw_priv *priv = mbus->priv;
struct regmap *regmap = priv->regmap;
u32 val;
int ret;
ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
AR9331_SW_MDIO_CTRL_BUSY |
AR9331_SW_MDIO_CTRL_MASTER_EN |
FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum) |
FIELD_PREP(AR9331_SW_MDIO_CTRL_DATA_M, data));
if (ret)
goto error;
ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
!(val & AR9331_SW_MDIO_CTRL_BUSY),
AR9331_SW_MDIO_POLL_SLEEP_US,
AR9331_SW_MDIO_POLL_TIMEOUT_US);
if (ret)
goto error;
return 0;
error:
dev_err_ratelimited(priv->dev, "PHY write error: %i\n", ret);
return ret;
}
static int ar9331_sw_mbus_read(struct mii_bus *mbus, int port, int regnum)
{
struct ar9331_sw_priv *priv = mbus->priv;
struct regmap *regmap = priv->regmap;
u32 val;
int ret;
ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
AR9331_SW_MDIO_CTRL_BUSY |
AR9331_SW_MDIO_CTRL_MASTER_EN |
AR9331_SW_MDIO_CTRL_CMD_READ |
FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum));
if (ret)
goto error;
ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
!(val & AR9331_SW_MDIO_CTRL_BUSY),
AR9331_SW_MDIO_POLL_SLEEP_US,
AR9331_SW_MDIO_POLL_TIMEOUT_US);
if (ret)
goto error;
ret = regmap_read(regmap, AR9331_SW_REG_MDIO_CTRL, &val);
if (ret)
goto error;
return FIELD_GET(AR9331_SW_MDIO_CTRL_DATA_M, val);
error:
dev_err_ratelimited(priv->dev, "PHY read error: %i\n", ret);
return ret;
}
static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
{
struct device *dev = priv->dev;
struct mii_bus *mbus;
struct device_node *np, *mnp;
int ret;
np = dev->of_node;
mbus = devm_mdiobus_alloc(dev);
if (!mbus)
return -ENOMEM;
mbus->name = np->full_name;
snprintf(mbus->id, MII_BUS_ID_SIZE, "%pOF", np);
mbus->read = ar9331_sw_mbus_read;
mbus->write = ar9331_sw_mbus_write;
mbus->priv = priv;
mbus->parent = dev;
mnp = of_get_child_by_name(np, "mdio");
if (!mnp)
return -ENODEV;
ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
priv->mbus = mbus;
return 0;
}
static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
{
struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
u32 port_mask, port_ctrl, val;
int ret;
/* Generate default port settings */
port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
if (dsa_is_cpu_port(ds, port)) {
/* CPU port should be allowed to communicate with all user
* ports.
*/
port_mask = dsa_user_ports(ds);
/* Enable Atheros header on CPU port. This will allow us
* communicate with each port separately
*/
port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
} else if (dsa_is_user_port(ds, port)) {
/* User ports should communicate only with the CPU port.
*/
port_mask = BIT(dsa_upstream_port(ds, port));
} else {
/* Other ports do not need to communicate at all */
port_mask = 0;
}
val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
AR9331_SW_8021Q_MODE_NONE) |
FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
if (ret)
goto error;
ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
if (ret)
goto error;
return 0;
error:
dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
return ret;
}
static int ar9331_sw_setup(struct dsa_switch *ds)
{
struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
int ret, i;
ret = ar9331_sw_reset(priv);
if (ret)
return ret;
/* Reset will set proper defaults. CPU - Port0 will be enabled and
* configured. All other ports (ports 1 - 5) are disabled
*/
ret = ar9331_sw_mbus_init(priv);
if (ret)
return ret;
/* Do not drop broadcast frames */
ret = regmap_write_bits(regmap, AR9331_SW_REG_FLOOD_MASK,
AR9331_SW_FLOOD_MASK_BROAD_TO_CPU,
AR9331_SW_FLOOD_MASK_BROAD_TO_CPU);
if (ret)
goto error;
/* Set max frame size to the maximum supported value */
ret = regmap_write_bits(regmap, AR9331_SW_REG_GLOBAL_CTRL,
AR9331_SW_GLOBAL_CTRL_MFS_M,
AR9331_SW_GLOBAL_CTRL_MFS_M);
if (ret)
goto error;
for (i = 0; i < ds->num_ports; i++) {
ret = ar9331_sw_setup_port(ds, i);
if (ret)
goto error;
}
ds->configure_vlan_while_not_filtering = false;
return 0;
error:
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
return ret;
}
static void ar9331_sw_port_disable(struct dsa_switch *ds, int port)
{
struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_write(regmap, AR9331_SW_REG_PORT_STATUS(port), 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol m)
{
return DSA_TAG_PROTO_AR9331;
}
static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
switch (port) {
case 0:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
config->mac_capabilities |= MAC_1000;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
}
}
static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
AR9331_SW_PORT_STATUS_LINK_EN |
AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct ar9331_sw_priv *priv = ds->priv;
struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
AR9331_SW_PORT_STATUS_MAC_MASK, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
cancel_delayed_work_sync(&p->mib_read);
}
static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct ar9331_sw_priv *priv = ds->priv;
struct ar9331_sw_port *p = &priv->port[port];
struct regmap *regmap = priv->regmap;
u32 val;
int ret;
schedule_delayed_work(&p->mib_read, 0);
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
case SPEED_1000:
val |= AR9331_SW_PORT_STATUS_SPEED_1000;
break;
case SPEED_100:
val |= AR9331_SW_PORT_STATUS_SPEED_100;
break;
case SPEED_10:
val |= AR9331_SW_PORT_STATUS_SPEED_10;
break;
default:
return;
}
if (duplex)
val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
if (tx_pause)
val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
if (rx_pause)
val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
AR9331_SW_PORT_STATUS_MAC_MASK |
AR9331_SW_PORT_STATUS_LINK_MASK,
val);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
static void ar9331_read_stats(struct ar9331_sw_port *port)
{
struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
struct ethtool_pause_stats *pstats = &port->pause_stats;
struct rtnl_link_stats64 *stats = &port->stats;
struct ar9331_sw_stats_raw raw;
int ret;
/* Do the slowest part first, to avoid needless locking for long time */
ret = regmap_bulk_read(priv->regmap, AR9331_MIB_COUNTER(port->idx),
&raw, sizeof(raw) / sizeof(u32));
if (ret) {
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
return;
}
/* All MIB counters are cleared automatically on read */
spin_lock(&port->stats_lock);
stats->rx_bytes += raw.rxgoodbyte;
stats->tx_bytes += raw.txbyte;
stats->rx_packets += raw.rx64byte + raw.rx128byte + raw.rx256byte +
raw.rx512byte + raw.rx1024byte + raw.rx1518byte + raw.rxmaxbyte;
stats->tx_packets += raw.tx64byte + raw.tx128byte + raw.tx256byte +
raw.tx512byte + raw.tx1024byte + raw.tx1518byte + raw.txmaxbyte;
stats->rx_length_errors += raw.rxrunt + raw.rxfragment + raw.rxtoolong;
stats->rx_crc_errors += raw.rxfcserr;
stats->rx_frame_errors += raw.rxalignerr;
stats->rx_missed_errors += raw.rxoverflow;
stats->rx_dropped += raw.filtered;
stats->rx_errors += raw.rxfcserr + raw.rxalignerr + raw.rxrunt +
raw.rxfragment + raw.rxoverflow + raw.rxtoolong;
stats->tx_window_errors += raw.txlatecol;
stats->tx_fifo_errors += raw.txunderrun;
stats->tx_aborted_errors += raw.txabortcol;
stats->tx_errors += raw.txoversize + raw.txabortcol + raw.txunderrun +
raw.txlatecol;
stats->multicast += raw.rxmulti;
stats->collisions += raw.txcollision;
pstats->tx_pause_frames += raw.txpause;
pstats->rx_pause_frames += raw.rxpause;
spin_unlock(&port->stats_lock);
}
static void ar9331_do_stats_poll(struct work_struct *work)
{
struct ar9331_sw_port *port = container_of(work, struct ar9331_sw_port,
mib_read.work);
ar9331_read_stats(port);
schedule_delayed_work(&port->mib_read, STATS_INTERVAL_JIFFIES);
}
static void ar9331_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
struct ar9331_sw_priv *priv = ds->priv;
struct ar9331_sw_port *p = &priv->port[port];
spin_lock(&p->stats_lock);
memcpy(s, &p->stats, sizeof(*s));
spin_unlock(&p->stats_lock);
}
static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
struct ethtool_pause_stats *pause_stats)
{
struct ar9331_sw_priv *priv = ds->priv;
struct ar9331_sw_port *p = &priv->port[port];
spin_lock(&p->stats_lock);
memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
spin_unlock(&p->stats_lock);
}
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
.phylink_get_caps = ar9331_sw_phylink_get_caps,
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
.get_pause_stats = ar9331_get_pause_stats,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
{
struct ar9331_sw_priv *priv = data;
struct regmap *regmap = priv->regmap;
u32 stat;
int ret;
ret = regmap_read(regmap, AR9331_SW_REG_GINT, &stat);
if (ret) {
dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
if (!stat)
return IRQ_NONE;
if (stat & AR9331_SW_GINT_PHY_INT) {
int child_irq;
child_irq = irq_find_mapping(priv->irqdomain, 0);
handle_nested_irq(child_irq);
}
ret = regmap_write(regmap, AR9331_SW_REG_GINT, stat);
if (ret) {
dev_err(priv->dev, "can't write interrupt status\n");
return IRQ_NONE;
}
return IRQ_HANDLED;
}
static void ar9331_sw_mask_irq(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
priv->irq_mask = 0;
}
static void ar9331_sw_unmask_irq(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
priv->irq_mask = AR9331_SW_GINT_PHY_INT;
}
static void ar9331_sw_irq_bus_lock(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
mutex_lock(&priv->lock_irq);
}
static void ar9331_sw_irq_bus_sync_unlock(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
AR9331_SW_GINT_PHY_INT, priv->irq_mask);
if (ret)
dev_err(priv->dev, "failed to change IRQ mask\n");
mutex_unlock(&priv->lock_irq);
}
static struct irq_chip ar9331_sw_irq_chip = {
.name = AR9331_SW_NAME,
.irq_mask = ar9331_sw_mask_irq,
.irq_unmask = ar9331_sw_unmask_irq,
.irq_bus_lock = ar9331_sw_irq_bus_lock,
.irq_bus_sync_unlock = ar9331_sw_irq_bus_sync_unlock,
};
static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, domain->host_data);
irq_set_chip_and_handler(irq, &ar9331_sw_irq_chip, handle_simple_irq);
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
return 0;
}
static void ar9331_sw_irq_unmap(struct irq_domain *d, unsigned int irq)
{
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
static const struct irq_domain_ops ar9331_sw_irqdomain_ops = {
.map = ar9331_sw_irq_map,
.unmap = ar9331_sw_irq_unmap,
.xlate = irq_domain_xlate_onecell,
};
static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
{
struct device_node *np = priv->dev->of_node;
struct device *dev = priv->dev;
int ret, irq;
irq = of_irq_get(np, 0);
if (irq <= 0) {
dev_err(dev, "failed to get parent IRQ\n");
return irq ? irq : -EINVAL;
}
mutex_init(&priv->lock_irq);
ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
IRQF_ONESHOT, AR9331_SW_NAME, priv);
if (ret) {
dev_err(dev, "unable to request irq: %d\n", ret);
return ret;
}
priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
}
irq_set_parent(irq_create_mapping(priv->irqdomain, 0), irq);
return 0;
}
static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
{
u8 r, p;
p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, mode) |
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
return __mdiobus_write(sbus, p, r, val);
}
static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
{
u8 r, p;
p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, AR9331_SW_MDIO_PHY_MODE_REG) |
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
return __mdiobus_read(sbus, p, r);
}
static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
void *val_buf, size_t val_len)
{
struct ar9331_sw_priv *priv = ctx;
struct mii_bus *sbus = priv->sbus;
u32 reg = *(u32 *)reg_buf;
int ret;
if (reg == AR9331_SW_REG_PAGE) {
/* We cannot read the page selector register from hardware and
* we cache its value in regmap. Return all bits set here,
* that regmap will always write the page on first use.
*/
*(u32 *)val_buf = GENMASK(9, 0);
return 0;
}
mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
ret = __ar9331_mdio_read(sbus, reg);
if (ret < 0)
goto error;
*(u32 *)val_buf = ret;
ret = __ar9331_mdio_read(sbus, reg + 2);
if (ret < 0)
goto error;
*(u32 *)val_buf |= ret << 16;
mutex_unlock(&sbus->mdio_lock);
return 0;
error:
mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
return ret;
}
static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ctx;
struct mii_bus *sbus = priv->sbus;
int ret;
mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
if (reg == AR9331_SW_REG_PAGE) {
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
0, val);
if (ret < 0)
goto error;
mutex_unlock(&sbus->mdio_lock);
return 0;
}
/* In case of this switch we work with 32bit registers on top of 16bit
* bus. Some registers (for example access to forwarding database) have
* trigger bit on the first 16bit half of request, the result and
* configuration of request in the second half.
* To make it work properly, we should do the second part of transfer
* before the first one is done.
*/
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
val >> 16);
if (ret < 0)
goto error;
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
if (ret < 0)
goto error;
mutex_unlock(&sbus->mdio_lock);
return 0;
error:
mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
return ret;
}
static int ar9331_sw_bus_write(void *context, const void *data, size_t count)
{
u32 reg = *(u32 *)data;
u32 val = *((u32 *)data + 1);
return ar9331_mdio_write(context, reg, val);
}
static const struct regmap_range ar9331_valid_regs[] = {
regmap_reg_range(0x0, 0x0),
regmap_reg_range(0x10, 0x14),
regmap_reg_range(0x20, 0x24),
regmap_reg_range(0x2c, 0x30),
regmap_reg_range(0x40, 0x44),
regmap_reg_range(0x50, 0x78),
regmap_reg_range(0x80, 0x98),
regmap_reg_range(0x100, 0x120),
regmap_reg_range(0x200, 0x220),
regmap_reg_range(0x300, 0x320),
regmap_reg_range(0x400, 0x420),
regmap_reg_range(0x500, 0x520),
regmap_reg_range(0x600, 0x620),
regmap_reg_range(0x20000, 0x200a4),
regmap_reg_range(0x20100, 0x201a4),
regmap_reg_range(0x20200, 0x202a4),
regmap_reg_range(0x20300, 0x203a4),
regmap_reg_range(0x20400, 0x204a4),
regmap_reg_range(0x20500, 0x205a4),
/* dummy page selector reg */
regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
};
static const struct regmap_range ar9331_nonvolatile_regs[] = {
regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
};
static const struct regmap_range_cfg ar9331_regmap_range[] = {
{
.selector_reg = AR9331_SW_REG_PAGE,
.selector_mask = GENMASK(9, 0),
.selector_shift = 0,
.window_start = 0,
.window_len = 512,
.range_min = 0,
.range_max = AR9331_SW_REG_PAGE - 4,
},
};
static const struct regmap_access_table ar9331_register_set = {
.yes_ranges = ar9331_valid_regs,
.n_yes_ranges = ARRAY_SIZE(ar9331_valid_regs),
};
static const struct regmap_access_table ar9331_volatile_set = {
.no_ranges = ar9331_nonvolatile_regs,
.n_no_ranges = ARRAY_SIZE(ar9331_nonvolatile_regs),
};
static const struct regmap_config ar9331_mdio_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = AR9331_SW_REG_PAGE,
.use_single_read = true,
.use_single_write = true,
.ranges = ar9331_regmap_range,
.num_ranges = ARRAY_SIZE(ar9331_regmap_range),
.volatile_table = &ar9331_volatile_set,
.wr_table = &ar9331_register_set,
.rd_table = &ar9331_register_set,
.cache_type = REGCACHE_MAPLE,
};
static struct regmap_bus ar9331_sw_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.read = ar9331_mdio_read,
.write = ar9331_sw_bus_write,
};
static int ar9331_sw_probe(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv;
struct dsa_switch *ds;
int ret, i;
priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regmap = devm_regmap_init(&mdiodev->dev, &ar9331_sw_bus, priv,
&ar9331_mdio_regmap_config);
if (IS_ERR(priv->regmap)) {
ret = PTR_ERR(priv->regmap);
dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
return ret;
}
priv->sw_reset = devm_reset_control_get(&mdiodev->dev, "switch");
if (IS_ERR(priv->sw_reset)) {
dev_err(&mdiodev->dev, "missing switch reset\n");
return PTR_ERR(priv->sw_reset);
}
priv->sbus = mdiodev->bus;
priv->dev = &mdiodev->dev;
ret = ar9331_sw_irq_init(priv);
if (ret)
return ret;
ds = &priv->ds;
ds->dev = &mdiodev->dev;
ds->num_ports = AR9331_SW_PORTS;
ds->priv = priv;
priv->ops = ar9331_sw_ops;
ds->ops = &priv->ops;
dev_set_drvdata(&mdiodev->dev, priv);
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
struct ar9331_sw_port *port = &priv->port[i];
port->idx = i;
spin_lock_init(&port->stats_lock);
INIT_DELAYED_WORK(&port->mib_read, ar9331_do_stats_poll);
}
ret = dsa_register_switch(ds);
if (ret)
goto err_remove_irq;
return 0;
err_remove_irq:
irq_domain_remove(priv->irqdomain);
return ret;
}
static void ar9331_sw_remove(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
unsigned int i;
if (!priv)
return;
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
struct ar9331_sw_port *port = &priv->port[i];
cancel_delayed_work_sync(&port->mib_read);
}
irq_domain_remove(priv->irqdomain);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
{
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
dsa_switch_shutdown(&priv->ds);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ar9331_sw_of_match[] = {
{ .compatible = "qca,ar9331-switch" },
{ },
};
static struct mdio_driver ar9331_sw_mdio_driver = {
.probe = ar9331_sw_probe,
.remove = ar9331_sw_remove,
.shutdown = ar9331_sw_shutdown,
.mdiodrv.driver = {
.name = AR9331_SW_NAME,
.of_match_table = ar9331_sw_of_match,
},
};
mdio_module_driver(ar9331_sw_mdio_driver);
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_DESCRIPTION("Driver for Atheros AR9331 switch");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/qca/ar9331.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/property.h>
#include <linux/regmap.h>
#include <net/dsa.h>
#include "qca8k.h"
#include "qca8k_leds.h"
static u32 qca8k_phy_to_port(int phy)
{
/* Internal PHY 0 has port at index 1.
* Internal PHY 1 has port at index 2.
* Internal PHY 2 has port at index 3.
* Internal PHY 3 has port at index 4.
* Internal PHY 4 has port at index 5.
*/
return phy + 1;
}
static int
qca8k_get_enable_led_reg(int port_num, int led_num, struct qca8k_led_pattern_en *reg_info)
{
switch (port_num) {
case 0:
reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
reg_info->shift = QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT;
break;
case 1:
case 2:
case 3:
/* Port 123 are controlled on a different reg */
reg_info->reg = QCA8K_LED_CTRL3_REG;
reg_info->shift = QCA8K_LED_PHY123_PATTERN_EN_SHIFT(port_num, led_num);
break;
case 4:
reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
reg_info->shift = QCA8K_LED_PHY4_CONTROL_RULE_SHIFT;
break;
default:
return -EINVAL;
}
return 0;
}
static int
qca8k_get_control_led_reg(int port_num, int led_num, struct qca8k_led_pattern_en *reg_info)
{
reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
/* 6 total control rule:
* 3 control rules for phy0-3 that applies to all their leds
* 3 control rules for phy4
*/
if (port_num == 4)
reg_info->shift = QCA8K_LED_PHY4_CONTROL_RULE_SHIFT;
else
reg_info->shift = QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT;
return 0;
}
static int
qca8k_parse_netdev(unsigned long rules, u32 *offload_trigger)
{
/* Parsing specific to netdev trigger */
if (test_bit(TRIGGER_NETDEV_TX, &rules))
*offload_trigger |= QCA8K_LED_TX_BLINK_MASK;
if (test_bit(TRIGGER_NETDEV_RX, &rules))
*offload_trigger |= QCA8K_LED_RX_BLINK_MASK;
if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
*offload_trigger |= QCA8K_LED_LINK_10M_EN_MASK;
if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
*offload_trigger |= QCA8K_LED_LINK_100M_EN_MASK;
if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
*offload_trigger |= QCA8K_LED_LINK_1000M_EN_MASK;
if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
*offload_trigger |= QCA8K_LED_HALF_DUPLEX_MASK;
if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
*offload_trigger |= QCA8K_LED_FULL_DUPLEX_MASK;
if (rules && !*offload_trigger)
return -EOPNOTSUPP;
/* Enable some default rule by default to the requested mode:
* - Blink at 4Hz by default
*/
*offload_trigger |= QCA8K_LED_BLINK_4HZ;
return 0;
}
static int
qca8k_led_brightness_set(struct qca8k_led *led,
enum led_brightness brightness)
{
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
u32 mask, val;
qca8k_get_enable_led_reg(led->port_num, led->led_num, ®_info);
val = QCA8K_LED_ALWAYS_OFF;
if (brightness)
val = QCA8K_LED_ALWAYS_ON;
/* HW regs to control brightness is special and port 1-2-3
* are placed in a different reg.
*
* To control port 0 brightness:
* - the 2 bit (15, 14) of:
* - QCA8K_LED_CTRL0_REG for led1
* - QCA8K_LED_CTRL1_REG for led2
* - QCA8K_LED_CTRL2_REG for led3
*
* To control port 4:
* - the 2 bit (31, 30) of:
* - QCA8K_LED_CTRL0_REG for led1
* - QCA8K_LED_CTRL1_REG for led2
* - QCA8K_LED_CTRL2_REG for led3
*
* To control port 1:
* - the 2 bit at (9, 8) of QCA8K_LED_CTRL3_REG are used for led1
* - the 2 bit at (11, 10) of QCA8K_LED_CTRL3_REG are used for led2
* - the 2 bit at (13, 12) of QCA8K_LED_CTRL3_REG are used for led3
*
* To control port 2:
* - the 2 bit at (15, 14) of QCA8K_LED_CTRL3_REG are used for led1
* - the 2 bit at (17, 16) of QCA8K_LED_CTRL3_REG are used for led2
* - the 2 bit at (19, 18) of QCA8K_LED_CTRL3_REG are used for led3
*
* To control port 3:
* - the 2 bit at (21, 20) of QCA8K_LED_CTRL3_REG are used for led1
* - the 2 bit at (23, 22) of QCA8K_LED_CTRL3_REG are used for led2
* - the 2 bit at (25, 24) of QCA8K_LED_CTRL3_REG are used for led3
*
* To abstract this and have less code, we use the port and led numm
* to calculate the shift and the correct reg due to this problem of
* not having a 1:1 map of LED with the regs.
*/
if (led->port_num == 0 || led->port_num == 4) {
mask = QCA8K_LED_PATTERN_EN_MASK;
val <<= QCA8K_LED_PATTERN_EN_SHIFT;
} else {
mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
}
return regmap_update_bits(priv->regmap, reg_info.reg,
mask << reg_info.shift,
val << reg_info.shift);
}
static int
qca8k_cled_brightness_set_blocking(struct led_classdev *ldev,
enum led_brightness brightness)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
return qca8k_led_brightness_set(led, brightness);
}
static enum led_brightness
qca8k_led_brightness_get(struct qca8k_led *led)
{
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
u32 val;
int ret;
qca8k_get_enable_led_reg(led->port_num, led->led_num, ®_info);
ret = regmap_read(priv->regmap, reg_info.reg, &val);
if (ret)
return 0;
val >>= reg_info.shift;
if (led->port_num == 0 || led->port_num == 4) {
val &= QCA8K_LED_PATTERN_EN_MASK;
val >>= QCA8K_LED_PATTERN_EN_SHIFT;
} else {
val &= QCA8K_LED_PHY123_PATTERN_EN_MASK;
}
/* Assume brightness ON only when the LED is set to always ON */
return val == QCA8K_LED_ALWAYS_ON;
}
static int
qca8k_cled_blink_set(struct led_classdev *ldev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
u32 mask, val = QCA8K_LED_ALWAYS_BLINK_4HZ;
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
if (*delay_on == 0 && *delay_off == 0) {
*delay_on = 125;
*delay_off = 125;
}
if (*delay_on != 125 || *delay_off != 125) {
/* The hardware only supports blinking at 4Hz. Fall back
* to software implementation in other cases.
*/
return -EINVAL;
}
qca8k_get_enable_led_reg(led->port_num, led->led_num, ®_info);
if (led->port_num == 0 || led->port_num == 4) {
mask = QCA8K_LED_PATTERN_EN_MASK;
val <<= QCA8K_LED_PATTERN_EN_SHIFT;
} else {
mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
}
regmap_update_bits(priv->regmap, reg_info.reg, mask << reg_info.shift,
val << reg_info.shift);
return 0;
}
static int
qca8k_cled_trigger_offload(struct led_classdev *ldev, bool enable)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
u32 mask, val = QCA8K_LED_ALWAYS_OFF;
qca8k_get_enable_led_reg(led->port_num, led->led_num, ®_info);
if (enable)
val = QCA8K_LED_RULE_CONTROLLED;
if (led->port_num == 0 || led->port_num == 4) {
mask = QCA8K_LED_PATTERN_EN_MASK;
val <<= QCA8K_LED_PATTERN_EN_SHIFT;
} else {
mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
}
return regmap_update_bits(priv->regmap, reg_info.reg, mask << reg_info.shift,
val << reg_info.shift);
}
static bool
qca8k_cled_hw_control_status(struct led_classdev *ldev)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
u32 val;
qca8k_get_enable_led_reg(led->port_num, led->led_num, ®_info);
regmap_read(priv->regmap, reg_info.reg, &val);
val >>= reg_info.shift;
if (led->port_num == 0 || led->port_num == 4) {
val &= QCA8K_LED_PATTERN_EN_MASK;
val >>= QCA8K_LED_PATTERN_EN_SHIFT;
} else {
val &= QCA8K_LED_PHY123_PATTERN_EN_MASK;
}
return val == QCA8K_LED_RULE_CONTROLLED;
}
static int
qca8k_cled_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules)
{
u32 offload_trigger = 0;
return qca8k_parse_netdev(rules, &offload_trigger);
}
static int
qca8k_cled_hw_control_set(struct led_classdev *ldev, unsigned long rules)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
u32 offload_trigger = 0;
int ret;
ret = qca8k_parse_netdev(rules, &offload_trigger);
if (ret)
return ret;
ret = qca8k_cled_trigger_offload(ldev, true);
if (ret)
return ret;
qca8k_get_control_led_reg(led->port_num, led->led_num, ®_info);
return regmap_update_bits(priv->regmap, reg_info.reg,
QCA8K_LED_RULE_MASK << reg_info.shift,
offload_trigger << reg_info.shift);
}
static int
qca8k_cled_hw_control_get(struct led_classdev *ldev, unsigned long *rules)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
struct qca8k_led_pattern_en reg_info;
struct qca8k_priv *priv = led->priv;
u32 val;
int ret;
/* With hw control not active return err */
if (!qca8k_cled_hw_control_status(ldev))
return -EINVAL;
qca8k_get_control_led_reg(led->port_num, led->led_num, ®_info);
ret = regmap_read(priv->regmap, reg_info.reg, &val);
if (ret)
return ret;
val >>= reg_info.shift;
val &= QCA8K_LED_RULE_MASK;
/* Parsing specific to netdev trigger */
if (val & QCA8K_LED_TX_BLINK_MASK)
set_bit(TRIGGER_NETDEV_TX, rules);
if (val & QCA8K_LED_RX_BLINK_MASK)
set_bit(TRIGGER_NETDEV_RX, rules);
if (val & QCA8K_LED_LINK_10M_EN_MASK)
set_bit(TRIGGER_NETDEV_LINK_10, rules);
if (val & QCA8K_LED_LINK_100M_EN_MASK)
set_bit(TRIGGER_NETDEV_LINK_100, rules);
if (val & QCA8K_LED_LINK_1000M_EN_MASK)
set_bit(TRIGGER_NETDEV_LINK_1000, rules);
if (val & QCA8K_LED_HALF_DUPLEX_MASK)
set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
if (val & QCA8K_LED_FULL_DUPLEX_MASK)
set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
return 0;
}
static struct device *qca8k_cled_hw_control_get_device(struct led_classdev *ldev)
{
struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
struct qca8k_priv *priv = led->priv;
struct dsa_port *dp;
dp = dsa_to_port(priv->ds, qca8k_phy_to_port(led->port_num));
if (!dp)
return NULL;
if (dp->slave)
return &dp->slave->dev;
return NULL;
}
static int
qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int port_num)
{
struct fwnode_handle *led = NULL, *leds = NULL;
struct led_init_data init_data = { };
struct dsa_switch *ds = priv->ds;
enum led_default_state state;
struct qca8k_led *port_led;
int led_num, led_index;
int ret;
leds = fwnode_get_named_child_node(port, "leds");
if (!leds) {
dev_dbg(priv->dev, "No Leds node specified in device tree for port %d!\n",
port_num);
return 0;
}
fwnode_for_each_child_node(leds, led) {
/* Reg represent the led number of the port.
* Each port can have at most 3 leds attached
* Commonly:
* 1. is gigabit led
* 2. is mbit led
* 3. additional status led
*/
if (fwnode_property_read_u32(led, "reg", &led_num))
continue;
if (led_num >= QCA8K_LED_PORT_COUNT) {
dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
led_num, port_num);
continue;
}
led_index = QCA8K_LED_PORT_INDEX(port_num, led_num);
port_led = &priv->ports_led[led_index];
port_led->port_num = port_num;
port_led->led_num = led_num;
port_led->priv = priv;
state = led_init_default_state_get(led);
switch (state) {
case LEDS_DEFSTATE_ON:
port_led->cdev.brightness = 1;
qca8k_led_brightness_set(port_led, 1);
break;
case LEDS_DEFSTATE_KEEP:
port_led->cdev.brightness =
qca8k_led_brightness_get(port_led);
break;
default:
port_led->cdev.brightness = 0;
qca8k_led_brightness_set(port_led, 0);
}
port_led->cdev.max_brightness = 1;
port_led->cdev.brightness_set_blocking = qca8k_cled_brightness_set_blocking;
port_led->cdev.blink_set = qca8k_cled_blink_set;
port_led->cdev.hw_control_is_supported = qca8k_cled_hw_control_is_supported;
port_led->cdev.hw_control_set = qca8k_cled_hw_control_set;
port_led->cdev.hw_control_get = qca8k_cled_hw_control_get;
port_led->cdev.hw_control_get_device = qca8k_cled_hw_control_get_device;
port_led->cdev.hw_control_trigger = "netdev";
init_data.default_label = ":port";
init_data.fwnode = led;
init_data.devname_mandatory = true;
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id,
port_num);
if (!init_data.devicename)
return -ENOMEM;
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
dev_warn(priv->dev, "Failed to init LED %d for port %d", led_num, port_num);
kfree(init_data.devicename);
}
return 0;
}
int
qca8k_setup_led_ctrl(struct qca8k_priv *priv)
{
struct fwnode_handle *ports, *port;
int port_num;
int ret;
ports = device_get_named_child_node(priv->dev, "ports");
if (!ports) {
dev_info(priv->dev, "No ports node specified in device tree!");
return 0;
}
fwnode_for_each_child_node(ports, port) {
if (fwnode_property_read_u32(port, "reg", &port_num))
continue;
/* Skip checking for CPU port 0 and CPU port 6 as not supported */
if (port_num == 0 || port_num == 6)
continue;
/* Each port can have at most 3 different leds attached.
* Switch port starts from 0 to 6, but port 0 and 6 are CPU
* port. The port index needs to be decreased by one to identify
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/net/dsa/qca/qca8k-leds.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Felix Fietkau <[email protected]>
* Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
* Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <[email protected]>
*/
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/if_bridge.h>
#include "qca8k.h"
#define MIB_DESC(_s, _o, _n) \
{ \
.size = (_s), \
.offset = (_o), \
.name = (_n), \
}
const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0x00, "RxBroad"),
MIB_DESC(1, 0x04, "RxPause"),
MIB_DESC(1, 0x08, "RxMulti"),
MIB_DESC(1, 0x0c, "RxFcsErr"),
MIB_DESC(1, 0x10, "RxAlignErr"),
MIB_DESC(1, 0x14, "RxRunt"),
MIB_DESC(1, 0x18, "RxFragment"),
MIB_DESC(1, 0x1c, "Rx64Byte"),
MIB_DESC(1, 0x20, "Rx128Byte"),
MIB_DESC(1, 0x24, "Rx256Byte"),
MIB_DESC(1, 0x28, "Rx512Byte"),
MIB_DESC(1, 0x2c, "Rx1024Byte"),
MIB_DESC(1, 0x30, "Rx1518Byte"),
MIB_DESC(1, 0x34, "RxMaxByte"),
MIB_DESC(1, 0x38, "RxTooLong"),
MIB_DESC(2, 0x3c, "RxGoodByte"),
MIB_DESC(2, 0x44, "RxBadByte"),
MIB_DESC(1, 0x4c, "RxOverFlow"),
MIB_DESC(1, 0x50, "Filtered"),
MIB_DESC(1, 0x54, "TxBroad"),
MIB_DESC(1, 0x58, "TxPause"),
MIB_DESC(1, 0x5c, "TxMulti"),
MIB_DESC(1, 0x60, "TxUnderRun"),
MIB_DESC(1, 0x64, "Tx64Byte"),
MIB_DESC(1, 0x68, "Tx128Byte"),
MIB_DESC(1, 0x6c, "Tx256Byte"),
MIB_DESC(1, 0x70, "Tx512Byte"),
MIB_DESC(1, 0x74, "Tx1024Byte"),
MIB_DESC(1, 0x78, "Tx1518Byte"),
MIB_DESC(1, 0x7c, "TxMaxByte"),
MIB_DESC(1, 0x80, "TxOverSize"),
MIB_DESC(2, 0x84, "TxByte"),
MIB_DESC(1, 0x8c, "TxCollision"),
MIB_DESC(1, 0x90, "TxAbortCol"),
MIB_DESC(1, 0x94, "TxMultiCol"),
MIB_DESC(1, 0x98, "TxSingleCol"),
MIB_DESC(1, 0x9c, "TxExcDefer"),
MIB_DESC(1, 0xa0, "TxDefer"),
MIB_DESC(1, 0xa4, "TxLateCol"),
MIB_DESC(1, 0xa8, "RXUnicast"),
MIB_DESC(1, 0xac, "TXUnicast"),
};
int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
return regmap_read(priv->regmap, reg, val);
}
int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
{
return regmap_write(priv->regmap, reg, val);
}
int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
return regmap_update_bits(priv->regmap, reg, mask, write_val);
}
static const struct regmap_range qca8k_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control */
regmap_reg_range(0x0100, 0x0168), /* EEE control */
regmap_reg_range(0x0200, 0x0270), /* Parser control */
regmap_reg_range(0x0400, 0x0454), /* ACL */
regmap_reg_range(0x0600, 0x0718), /* Lookup */
regmap_reg_range(0x0800, 0x0b70), /* QM */
regmap_reg_range(0x0c00, 0x0c80), /* PKT */
regmap_reg_range(0x0e00, 0x0e98), /* L3 */
regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
};
const struct regmap_access_table qca8k_readable_table = {
.yes_ranges = qca8k_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
u32 val;
return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
}
static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
u32 reg[QCA8K_ATU_TABLE_SIZE];
int ret;
/* load the ARL table into an array */
ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
QCA8K_ATU_TABLE_SIZE);
if (ret)
return ret;
/* vid - 83:72 */
fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
/* aging - 67:64 */
fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
/* portmask - 54:48 */
fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
/* mac - 47:0 */
fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
return 0;
}
static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
const u8 *mac, u8 aging)
{
u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
/* aging - 67:64 */
reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
/* portmask - 54:48 */
reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
/* mac - 47:0 */
reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
QCA8K_ATU_TABLE_SIZE);
}
static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
int port)
{
u32 reg;
int ret;
/* Set the command and FDB index */
reg = QCA8K_ATU_FUNC_BUSY;
reg |= cmd;
if (port >= 0) {
reg |= QCA8K_ATU_FUNC_PORT_EN;
reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
}
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_FDB_LOAD) {
ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
if (ret < 0)
return ret;
if (reg & QCA8K_ATU_FUNC_FULL)
return -1;
}
return 0;
}
static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
int port)
{
int ret;
qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
if (ret < 0)
return ret;
return qca8k_fdb_read(priv, fdb);
}
static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
u16 port_mask, u16 vid, u8 aging)
{
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, port_mask, mac, aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
u16 port_mask, u16 vid)
{
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, port_mask, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
mutex_unlock(&priv->reg_mutex);
return ret;
}
void qca8k_fdb_flush(struct qca8k_priv *priv)
{
mutex_lock(&priv->reg_mutex);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
mutex_unlock(&priv->reg_mutex);
}
static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
const u8 *mac, u16 vid, u8 aging)
{
struct qca8k_fdb fdb = { 0 };
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, 0, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
if (ret < 0)
goto exit;
ret = qca8k_fdb_read(priv, &fdb);
if (ret < 0)
goto exit;
/* Rule exist. Delete first */
if (fdb.aging) {
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
if (ret)
goto exit;
} else {
fdb.aging = aging;
}
/* Add port to fdb portmask */
fdb.port_mask |= port_mask;
qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
const u8 *mac, u16 vid)
{
struct qca8k_fdb fdb = { 0 };
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, 0, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
if (ret < 0)
goto exit;
ret = qca8k_fdb_read(priv, &fdb);
if (ret < 0)
goto exit;
/* Rule doesn't exist. Why delete? */
if (!fdb.aging) {
ret = -EINVAL;
goto exit;
}
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
if (ret)
goto exit;
/* Only port in the rule is this port. Don't re insert */
if (fdb.port_mask == port_mask)
goto exit;
/* Remove port from port mask */
fdb.port_mask &= ~port_mask;
qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_vlan_access(struct qca8k_priv *priv,
enum qca8k_vlan_cmd cmd, u16 vid)
{
u32 reg;
int ret;
/* Set the command and VLAN index */
reg = QCA8K_VTU_FUNC1_BUSY;
reg |= cmd;
reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_VLAN_LOAD) {
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
if (ret < 0)
return ret;
if (reg & QCA8K_VTU_FUNC1_FULL)
return -ENOMEM;
}
return 0;
}
static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
bool untagged)
{
u32 reg;
int ret;
/* We do the right thing with VLAN 0 and treat it as untagged while
* preserving the tag on egress.
*/
if (vid == 0)
return 0;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
if (ret < 0)
goto out;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
if (untagged)
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
else
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
goto out;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
out:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
{
u32 reg, mask;
int ret, i;
bool del;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
if (ret < 0)
goto out;
reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
/* Check if we're the last member to be removed */
del = true;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
if ((reg & mask) != mask) {
del = false;
break;
}
}
if (del) {
ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
} else {
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
goto out;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
}
out:
mutex_unlock(&priv->reg_mutex);
return ret;
}
int qca8k_mib_init(struct qca8k_priv *priv)
{
int ret;
mutex_lock(&priv->reg_mutex);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
if (ret)
goto exit;
ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
{
u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
/* Port 0 and 6 have no internal PHY */
if (port > 0 && port < 6)
mask |= QCA8K_PORT_STATUS_LINK_AUTO;
if (enable)
regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
else
regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
struct qca8k_priv *priv = ds->priv;
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < priv->info->mib_count; i++)
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
}
void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
u32 hi = 0;
int ret;
if (priv->mgmt_master && priv->info->ops->autocast_mib &&
priv->info->ops->autocast_mib(ds, port, data) > 0)
return;
for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
ret = qca8k_read(priv, reg, &val);
if (ret < 0)
continue;
if (mib->size == 2) {
ret = qca8k_read(priv, reg + 4, &hi);
if (ret < 0)
continue;
}
data[i] = val;
if (mib->size == 2)
data[i] |= (u64)hi << 32;
}
}
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct qca8k_priv *priv = ds->priv;
if (sset != ETH_SS_STATS)
return 0;
return priv->info->mib_count;
}
int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *eee)
{
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
struct qca8k_priv *priv = ds->priv;
u32 reg;
int ret;
mutex_lock(&priv->reg_mutex);
ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
if (ret < 0)
goto exit;
if (eee->eee_enabled)
reg |= lpi_en;
else
reg &= ~lpi_en;
ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
bool learning)
{
struct qca8k_priv *priv = ds->priv;
if (learning)
return regmap_set_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_LEARN);
else
return regmap_clear_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_LEARN);
}
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct qca8k_priv *priv = ds->priv;
bool learning = false;
u32 stp_state;
switch (state) {
case BR_STATE_DISABLED:
stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
break;
case BR_STATE_BLOCKING:
stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
break;
case BR_STATE_LISTENING:
stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
break;
case BR_STATE_LEARNING:
stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
learning = dp->learning;
break;
case BR_STATE_FORWARDING:
learning = dp->learning;
fallthrough;
default:
stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
break;
}
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
qca8k_port_configure_learning(ds, port, learning);
}
int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~BR_LEARNING)
return -EINVAL;
return 0;
}
int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
int ret;
if (flags.mask & BR_LEARNING) {
ret = qca8k_port_configure_learning(ds, port,
flags.val & BR_LEARNING);
if (ret)
return ret;
}
return 0;
}
int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int port_mask, cpu_port;
int i, ret;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
port_mask = BIT(cpu_port);
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/
ret = regmap_set_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
if (ret)
return ret;
if (i != port)
port_mask |= BIT(i);
}
/* Add all other ports to this ports portvlan mask */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, port_mask);
return ret;
}
void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct qca8k_priv *priv = ds->priv;
int cpu_port, i;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
*/
regmap_clear_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
}
/* Set the cpu port to be the only one in the portvlan mask of
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
void qca8k_port_fast_age(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
mutex_unlock(&priv->reg_mutex);
}
int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct qca8k_priv *priv = ds->priv;
unsigned int secs = msecs / 1000;
u32 val;
/* AGE_TIME reg is set in 7s step */
val = secs / 7;
/* Handle case with 0 as val to NOT disable
* learning
*/
if (!val)
val = 1;
return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
QCA8K_ATU_AGE_TIME_MASK,
QCA8K_ATU_AGE_TIME(val));
}
int qca8k_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct qca8k_priv *priv = ds->priv;
qca8k_port_set_status(priv, port, 1);
priv->port_enabled_map |= BIT(port);
if (dsa_is_user_port(ds, port))
phy_support_asym_pause(phy);
return 0;
}
void qca8k_port_disable(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
qca8k_port_set_status(priv, port, 0);
priv->port_enabled_map &= ~BIT(port);
}
int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
int ret;
/* We have only have a general MTU setting.
* DSA always set the CPU port's MTU to the largest MTU of the slave
* ports.
* Setting MTU just for the CPU port is sufficient to correctly set a
* value for every port.
*/
if (!dsa_is_cpu_port(ds, port))
return 0;
/* To change the MAX_FRAME_SIZE the cpu ports must be off or
* the switch panics.
* Turn off both cpu ports before applying the new value to prevent
* this.
*/
if (priv->port_enabled_map & BIT(0))
qca8k_port_set_status(priv, 0, 0);
if (priv->port_enabled_map & BIT(6))
qca8k_port_set_status(priv, 6, 0);
/* Include L2 header / FCS length */
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
ETH_HLEN + ETH_FCS_LEN);
if (priv->port_enabled_map & BIT(0))
qca8k_port_set_status(priv, 0, 1);
if (priv->port_enabled_map & BIT(6))
qca8k_port_set_status(priv, 6, 1);
return ret;
}
int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
{
return QCA8K_MAX_MTU;
}
int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
u16 port_mask, u16 vid)
{
/* Set the vid to the port vlan id if no vid is set */
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_add(priv, addr, port_mask, vid,
QCA8K_ATU_STATUS_STATIC);
}
int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
u16 port_mask = BIT(port);
return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
}
int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
u16 port_mask = BIT(port);
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_del(priv, addr, port_mask, vid);
}
int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct qca8k_priv *priv = ds->priv;
struct qca8k_fdb _fdb = { 0 };
int cnt = QCA8K_NUM_FDB_RECORDS;
bool is_static;
int ret = 0;
mutex_lock(&priv->reg_mutex);
while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
if (!_fdb.aging)
break;
is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
ret = cb(_fdb.mac, _fdb.vid, is_static, data);
if (ret)
break;
}
mutex_unlock(&priv->reg_mutex);
return 0;
}
int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
QCA8K_ATU_STATUS_STATIC);
}
int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
}
int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int monitor_port, ret;
u32 reg, val;
/* Check for existent entry */
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
return -EEXIST;
ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
if (ret)
return ret;
/* QCA83xx can have only one port set to mirror mode.
* Check that the correct port is requested and return error otherwise.
* When no mirror port is set, the values is set to 0xF
*/
monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
return -EEXIST;
/* Set the monitor port */
val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
mirror->to_local_port);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (ret)
return ret;
if (ingress) {
reg = QCA8K_PORT_LOOKUP_CTRL(port);
val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
} else {
reg = QCA8K_REG_PORT_HOL_CTRL1(port);
val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
}
ret = regmap_update_bits(priv->regmap, reg, val, val);
if (ret)
return ret;
/* Track mirror port for tx and rx to decide when the
* mirror port has to be disabled.
*/
if (ingress)
priv->mirror_rx |= BIT(port);
else
priv->mirror_tx |= BIT(port);
return 0;
}
void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct qca8k_priv *priv = ds->priv;
u32 reg, val;
int ret;
if (mirror->ingress) {
reg = QCA8K_PORT_LOOKUP_CTRL(port);
val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
} else {
reg = QCA8K_REG_PORT_HOL_CTRL1(port);
val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
}
ret = regmap_clear_bits(priv->regmap, reg, val);
if (ret)
goto err;
if (mirror->ingress)
priv->mirror_rx &= ~BIT(port);
else
priv->mirror_tx &= ~BIT(port);
/* No port set to send packet to mirror port. Disable mirror port */
if (!priv->mirror_rx && !priv->mirror_tx) {
val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (ret)
goto err;
}
err:
dev_err(priv->dev, "Failed to del mirror port from %d", port);
}
int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int ret;
if (vlan_filtering) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
}
return ret;
}
int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
int ret;
ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
return ret;
}
if (pvid) {
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
QCA8K_EGREES_VLAN_PORT_MASK(port),
QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
QCA8K_PORT_VLAN_CVID(vlan->vid) |
QCA8K_PORT_VLAN_SVID(vlan->vid));
}
return ret;
}
int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct qca8k_priv *priv = ds->priv;
int ret;
ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
return ret;
}
static bool qca8k_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp;
int members = 0;
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
if (members > QCA8K_NUM_PORTS_FOR_LAG) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot offload more than 4 LAG ports");
return false;
}
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack,
"Can only offload LAG using hash TX type");
return false;
}
if (info->hash_type != NETDEV_LAG_HASH_L2 &&
info->hash_type != NETDEV_LAG_HASH_L23) {
NL_SET_ERR_MSG_MOD(extack,
"Can only offload L2 or L2+L3 TX hash");
return false;
}
return true;
}
static int qca8k_lag_setup_hash(struct dsa_switch *ds,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
unsigned int i;
u32 hash = 0;
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
hash |= QCA8K_TRUNK_HASH_SIP_EN;
hash |= QCA8K_TRUNK_HASH_DIP_EN;
fallthrough;
case NETDEV_LAG_HASH_L2:
hash |= QCA8K_TRUNK_HASH_SA_EN;
hash |= QCA8K_TRUNK_HASH_DA_EN;
break;
default: /* We should NEVER reach this */
return -EOPNOTSUPP;
}
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
/* Hash Mode is global. Make sure the same Hash Mode
* is set to all the 4 possible lag.
* If we are the unique LAG we can set whatever hash
* mode we want.
* To change hash mode it's needed to remove all LAG
* and change the mode with the latest.
*/
if (unique_lag) {
priv->lag_hash_mode = hash;
} else if (priv->lag_hash_mode != hash) {
netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
return -EOPNOTSUPP;
}
return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
QCA8K_TRUNK_HASH_MASK, hash);
}
static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
/* DSA LAG IDs are one-based, hardware is zero-based */
id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
if (ret)
return ret;
/* Shift val to the correct trunk */
val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
if (delete)
val &= ~BIT(port);
else
val |= BIT(port);
/* Update port member. With empty portmap disable trunk */
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
QCA8K_REG_GOL_TRUNK_MEMBER(id) |
QCA8K_REG_GOL_TRUNK_EN(id),
!val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
/* Search empty member if adding or port on deleting */
for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
if (ret)
return ret;
val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
if (delete) {
/* If port flagged to be disabled assume this member is
* empty
*/
if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
continue;
val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
if (val != port)
continue;
} else {
/* If port flagged to be enabled assume this member is
* already set
*/
if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
continue;
}
/* We have found the member to add/remove */
break;
}
/* Set port in the correct port mask or disable port if in delete mode */
return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
!delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
}
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
int ret;
if (!qca8k_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag, info);
if (ret)
return ret;
return qca8k_lag_refresh_portmap(ds, port, lag, false);
}
int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
int qca8k_read_switch_id(struct qca8k_priv *priv)
{
u32 val;
u8 id;
int ret;
if (!priv->info)
return -ENODEV;
ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
if (ret < 0)
return -ENODEV;
id = QCA8K_MASK_CTRL_DEVICE_ID(val);
if (id != priv->info->id) {
dev_err(priv->dev,
"Switch id detected %x but expected %x",
id, priv->info->id);
return -ENODEV;
}
priv->switch_id = id;
/* Save revision to communicate to the internal PHY driver */
priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
return 0;
}
| linux-master | drivers/net/dsa/qca/qca8k-common.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <[email protected]>
*/
#include "sja1105.h"
#define SJA1105_TAS_CLKSRC_DISABLED 0
#define SJA1105_TAS_CLKSRC_STANDALONE 1
#define SJA1105_TAS_CLKSRC_AS6802 2
#define SJA1105_TAS_CLKSRC_PTP 3
#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
#define work_to_sja1105_tas(d) \
container_of((d), struct sja1105_tas_data, tas_work)
#define tas_to_sja1105(d) \
container_of((d), struct sja1105_private, tas_data)
static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct dsa_switch *ds = priv->ds;
s64 earliest_base_time = S64_MAX;
s64 latest_base_time = 0;
s64 its_cycle_time = 0;
s64 max_cycle_time = 0;
int port;
tas_data->enabled = false;
for (port = 0; port < ds->num_ports; port++) {
const struct tc_taprio_qopt_offload *offload;
offload = tas_data->offload[port];
if (!offload)
continue;
tas_data->enabled = true;
if (max_cycle_time < offload->cycle_time)
max_cycle_time = offload->cycle_time;
if (latest_base_time < offload->base_time)
latest_base_time = offload->base_time;
if (earliest_base_time > offload->base_time) {
earliest_base_time = offload->base_time;
its_cycle_time = offload->cycle_time;
}
}
if (!list_empty(&gating_cfg->entries)) {
tas_data->enabled = true;
if (max_cycle_time < gating_cfg->cycle_time)
max_cycle_time = gating_cfg->cycle_time;
if (latest_base_time < gating_cfg->base_time)
latest_base_time = gating_cfg->base_time;
if (earliest_base_time > gating_cfg->base_time) {
earliest_base_time = gating_cfg->base_time;
its_cycle_time = gating_cfg->cycle_time;
}
}
if (!tas_data->enabled)
return 0;
/* Roll the earliest base time over until it is in a comparable
* time base with the latest, then compare their deltas.
* We want to enforce that all ports' base times are within
* SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
*/
earliest_base_time = future_base_time(earliest_base_time,
its_cycle_time,
latest_base_time);
while (earliest_base_time > latest_base_time)
earliest_base_time -= its_cycle_time;
if (latest_base_time - earliest_base_time >
sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
dev_err(ds->dev,
"Base times too far apart: min %llu max %llu\n",
earliest_base_time, latest_base_time);
return -ERANGE;
}
tas_data->earliest_base_time = earliest_base_time;
tas_data->max_cycle_time = max_cycle_time;
dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
return 0;
}
/* Lo and behold: the egress scheduler from hell.
*
* At the hardware level, the Time-Aware Shaper holds a global linear arrray of
* all schedule entries for all ports. These are the Gate Control List (GCL)
* entries, let's call them "timeslots" for short. This linear array of
* timeslots is held in BLK_IDX_SCHEDULE.
*
* Then there are a maximum of 8 "execution threads" inside the switch, which
* iterate cyclically through the "schedule". Each "cycle" has an entry point
* and an exit point, both being timeslot indices in the schedule table. The
* hardware calls each cycle a "subschedule".
*
* Subschedule (cycle) i starts when
* ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
*
* The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
* k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
* k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
*
* For each schedule entry (timeslot) k, the engine executes the gate control
* list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
*
* +---------+
* | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
* +---------+
* |
* +-----------------+
* | .actsubsch
* BLK_IDX_SCHEDULE_ENTRY_POINTS v
* +-------+-------+
* |cycle 0|cycle 1|
* +-------+-------+
* | | | |
* +----------------+ | | +-------------------------------------+
* | .subschindx | | .subschindx |
* | | +---------------+ |
* | .address | .address | |
* | | | |
* | | | |
* | BLK_IDX_SCHEDULE v v |
* | +-------+-------+-------+-------+-------+------+ |
* | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
* | +-------+-------+-------+-------+-------+------+ |
* | ^ ^ ^ ^ |
* | | | | | |
* | +-------------------------+ | | | |
* | | +-------------------------------+ | | |
* | | | +-------------------+ | |
* | | | | | |
* | +---------------------------------------------------------------+ |
* | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
* | +---------------------------------------------------------------+ |
* | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
* | | | |
* +--------+ +-------------------------------------------+
*
* In the above picture there are two subschedules (cycles):
*
* - cycle 0: iterates the schedule table from 0 to 2 (and back)
* - cycle 1: iterates the schedule table from 3 to 5 (and back)
*
* All other possible execution threads must be marked as unused by making
* their "subschedule end index" (subscheind) equal to the last valid
* subschedule's end index (in this case 5).
*/
int sja1105_init_scheduling(struct sja1105_private *priv)
{
struct sja1105_schedule_entry_points_entry *schedule_entry_points;
struct sja1105_schedule_entry_points_params_entry
*schedule_entry_points_params;
struct sja1105_schedule_params_entry *schedule_params;
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct sja1105_schedule_entry *schedule;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int schedule_start_idx;
s64 entry_point_delta;
int schedule_end_idx;
int num_entries = 0;
int num_cycles = 0;
int cycle = 0;
int i, k = 0;
int port, rc;
rc = sja1105_tas_set_runtime_params(priv);
if (rc < 0)
return rc;
/* Discard previous Schedule Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Discard previous Schedule Entry Points Parameters Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Discard previous Schedule Parameters Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Discard previous Schedule Entry Points Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Figure out the dimensioning of the problem */
for (port = 0; port < ds->num_ports; port++) {
if (tas_data->offload[port]) {
num_entries += tas_data->offload[port]->num_entries;
num_cycles++;
}
}
if (!list_empty(&gating_cfg->entries)) {
num_entries += gating_cfg->num_entries;
num_cycles++;
}
/* Nothing to do */
if (!num_cycles)
return 0;
/* Pre-allocate space in the static config tables */
/* Schedule Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = num_entries;
schedule = table->entries;
/* Schedule Points Parameters Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
/* Previously allocated memory will be freed automatically in
* sja1105_static_config_free. This is true for all early
* returns below.
*/
return -ENOMEM;
table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
schedule_entry_points_params = table->entries;
/* Schedule Parameters Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
schedule_params = table->entries;
/* Schedule Entry Points Table */
table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = num_cycles;
schedule_entry_points = table->entries;
/* Finally start populating the static config tables */
schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
schedule_entry_points_params->actsubsch = num_cycles - 1;
for (port = 0; port < ds->num_ports; port++) {
const struct tc_taprio_qopt_offload *offload;
/* Relative base time */
s64 rbt;
offload = tas_data->offload[port];
if (!offload)
continue;
schedule_start_idx = k;
schedule_end_idx = k + offload->num_entries - 1;
/* This is the base time expressed as a number of TAS ticks
* relative to PTPSCHTM, which we'll (perhaps improperly) call
* the operational base time.
*/
rbt = future_base_time(offload->base_time,
offload->cycle_time,
tas_data->earliest_base_time);
rbt -= tas_data->earliest_base_time;
/* UM10944.pdf 4.2.2. Schedule Entry Points table says that
* delta cannot be zero, which is shitty. Advance all relative
* base times by 1 TAS delta, so that even the earliest base
* time becomes 1 in relative terms. Then start the operational
* base time (PTPSCHTM) one TAS delta earlier than planned.
*/
entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
schedule_entry_points[cycle].subschindx = cycle;
schedule_entry_points[cycle].delta = entry_point_delta;
schedule_entry_points[cycle].address = schedule_start_idx;
/* The subschedule end indices need to be
* monotonically increasing.
*/
for (i = cycle; i < 8; i++)
schedule_params->subscheind[i] = schedule_end_idx;
for (i = 0; i < offload->num_entries; i++, k++) {
s64 delta_ns = offload->entries[i].interval;
schedule[k].delta = ns_to_sja1105_delta(delta_ns);
schedule[k].destports = BIT(port);
schedule[k].resmedia_en = true;
schedule[k].resmedia = SJA1105_GATE_MASK &
~offload->entries[i].gate_mask;
}
cycle++;
}
if (!list_empty(&gating_cfg->entries)) {
struct sja1105_gate_entry *e;
/* Relative base time */
s64 rbt;
schedule_start_idx = k;
schedule_end_idx = k + gating_cfg->num_entries - 1;
rbt = future_base_time(gating_cfg->base_time,
gating_cfg->cycle_time,
tas_data->earliest_base_time);
rbt -= tas_data->earliest_base_time;
entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
schedule_entry_points[cycle].subschindx = cycle;
schedule_entry_points[cycle].delta = entry_point_delta;
schedule_entry_points[cycle].address = schedule_start_idx;
for (i = cycle; i < 8; i++)
schedule_params->subscheind[i] = schedule_end_idx;
list_for_each_entry(e, &gating_cfg->entries, list) {
schedule[k].delta = ns_to_sja1105_delta(e->interval);
schedule[k].destports = e->rule->vl.destports;
schedule[k].setvalid = true;
schedule[k].txen = true;
schedule[k].vlindex = e->rule->vl.sharindx;
schedule[k].winstindex = e->rule->vl.sharindx;
if (e->gate_state) /* Gate open */
schedule[k].winst = true;
else /* Gate closed */
schedule[k].winend = true;
k++;
}
}
return 0;
}
/* Be there 2 port subschedules, each executing an arbitrary number of gate
* open/close events cyclically.
* None of those gate events must ever occur at the exact same time, otherwise
* the switch is known to act in exotically strange ways.
* However the hardware doesn't bother performing these integrity checks.
* So here we are with the task of validating whether the new @admin offload
* has any conflict with the already established TAS configuration in
* tas_data->offload. We already know the other ports are in harmony with one
* another, otherwise we wouldn't have saved them.
* Each gate event executes periodically, with a period of @cycle_time and a
* phase given by its cycle's @base_time plus its offset within the cycle
* (which in turn is given by the length of the events prior to it).
* There are two aspects to possible collisions:
* - Collisions within one cycle's (actually the longest cycle's) time frame.
* For that, we need to compare the cartesian product of each possible
* occurrence of each event within one cycle time.
* - Collisions in the future. Events may not collide within one cycle time,
* but if two port schedules don't have the same periodicity (aka the cycle
* times aren't multiples of one another), they surely will some time in the
* future (actually they will collide an infinite amount of times).
*/
static bool
sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
const struct tc_taprio_qopt_offload *admin)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
const struct tc_taprio_qopt_offload *offload;
s64 max_cycle_time, min_cycle_time;
s64 delta1, delta2;
s64 rbt1, rbt2;
s64 stop_time;
s64 t1, t2;
int i, j;
s32 rem;
offload = tas_data->offload[port];
if (!offload)
return false;
/* Check if the two cycle times are multiples of one another.
* If they aren't, then they will surely collide.
*/
max_cycle_time = max(offload->cycle_time, admin->cycle_time);
min_cycle_time = min(offload->cycle_time, admin->cycle_time);
div_s64_rem(max_cycle_time, min_cycle_time, &rem);
if (rem)
return true;
/* Calculate the "reduced" base time of each of the two cycles
* (transposed back as close to 0 as possible) by dividing to
* the cycle time.
*/
div_s64_rem(offload->base_time, offload->cycle_time, &rem);
rbt1 = rem;
div_s64_rem(admin->base_time, admin->cycle_time, &rem);
rbt2 = rem;
stop_time = max_cycle_time + max(rbt1, rbt2);
/* delta1 is the relative base time of each GCL entry within
* the established ports' TAS config.
*/
for (i = 0, delta1 = 0;
i < offload->num_entries;
delta1 += offload->entries[i].interval, i++) {
/* delta2 is the relative base time of each GCL entry
* within the newly added TAS config.
*/
for (j = 0, delta2 = 0;
j < admin->num_entries;
delta2 += admin->entries[j].interval, j++) {
/* t1 follows all possible occurrences of the
* established ports' GCL entry i within the
* first cycle time.
*/
for (t1 = rbt1 + delta1;
t1 <= stop_time;
t1 += offload->cycle_time) {
/* t2 follows all possible occurrences
* of the newly added GCL entry j
* within the first cycle time.
*/
for (t2 = rbt2 + delta2;
t2 <= stop_time;
t2 += admin->cycle_time) {
if (t1 == t2) {
dev_warn(priv->ds->dev,
"GCL entry %d collides with entry %d of port %d\n",
j, i, port);
return true;
}
}
}
}
}
return false;
}
/* Check the tc-taprio configuration on @port for conflicts with the tc-gate
* global subschedule. If @port is -1, check it against all ports.
* To reuse the sja1105_tas_check_conflicts logic without refactoring it,
* convert the gating configuration to a dummy tc-taprio offload structure.
*/
bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack)
{
struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
size_t num_entries = gating_cfg->num_entries;
struct tc_taprio_qopt_offload *dummy;
struct dsa_switch *ds = priv->ds;
struct sja1105_gate_entry *e;
bool conflict;
int i = 0;
if (list_empty(&gating_cfg->entries))
return false;
dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
if (!dummy) {
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
return true;
}
dummy->num_entries = num_entries;
dummy->base_time = gating_cfg->base_time;
dummy->cycle_time = gating_cfg->cycle_time;
list_for_each_entry(e, &gating_cfg->entries, list)
dummy->entries[i++].interval = e->interval;
if (port != -1) {
conflict = sja1105_tas_check_conflicts(priv, port, dummy);
} else {
for (port = 0; port < ds->num_ports; port++) {
conflict = sja1105_tas_check_conflicts(priv, port,
dummy);
if (conflict)
break;
}
}
kfree(dummy);
return conflict;
}
int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
struct tc_taprio_qopt_offload *admin)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_tas_data *tas_data = &priv->tas_data;
int other_port, rc, i;
/* Can't change an already configured port (must delete qdisc first).
* Can't delete the qdisc from an unconfigured port.
*/
if ((!!tas_data->offload[port] && admin->cmd == TAPRIO_CMD_REPLACE) ||
(!tas_data->offload[port] && admin->cmd == TAPRIO_CMD_DESTROY))
return -EINVAL;
if (admin->cmd == TAPRIO_CMD_DESTROY) {
taprio_offload_free(tas_data->offload[port]);
tas_data->offload[port] = NULL;
rc = sja1105_init_scheduling(priv);
if (rc < 0)
return rc;
return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
} else if (admin->cmd != TAPRIO_CMD_REPLACE) {
return -EOPNOTSUPP;
}
/* The cycle time extension is the amount of time the last cycle from
* the old OPER needs to be extended in order to phase-align with the
* base time of the ADMIN when that becomes the new OPER.
* But of course our switch needs to be reset to switch-over between
* the ADMIN and the OPER configs - so much for a seamless transition.
* So don't add insult over injury and just say we don't support cycle
* time extension.
*/
if (admin->cycle_time_extension)
return -ENOTSUPP;
for (i = 0; i < admin->num_entries; i++) {
s64 delta_ns = admin->entries[i].interval;
s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
bool too_long, too_short;
too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
too_short = (delta_cycles == 0);
if (too_long || too_short) {
dev_err(priv->ds->dev,
"Interval %llu too %s for GCL entry %d\n",
delta_ns, too_long ? "long" : "short", i);
return -ERANGE;
}
}
for (other_port = 0; other_port < ds->num_ports; other_port++) {
if (other_port == port)
continue;
if (sja1105_tas_check_conflicts(priv, other_port, admin))
return -ERANGE;
}
if (sja1105_gating_check_conflicts(priv, port, NULL)) {
dev_err(ds->dev, "Conflict with tc-gate schedule\n");
return -ERANGE;
}
tas_data->offload[port] = taprio_offload_get(admin);
rc = sja1105_init_scheduling(priv);
if (rc < 0)
return rc;
return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
}
static int sja1105_tas_check_running(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct dsa_switch *ds = priv->ds;
struct sja1105_ptp_cmd cmd = {0};
int rc;
rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
if (rc < 0)
return rc;
if (cmd.ptpstrtsch == 1)
/* Schedule successfully started */
tas_data->state = SJA1105_TAS_STATE_RUNNING;
else if (cmd.ptpstopsch == 1)
/* Schedule is stopped */
tas_data->state = SJA1105_TAS_STATE_DISABLED;
else
/* Schedule is probably not configured with PTP clock source */
rc = -EINVAL;
return rc;
}
/* Write to PTPCLKCORP */
static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
u64 correction)
{
const struct sja1105_regs *regs = priv->info->regs;
u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
&ptpclkcorp, NULL);
}
/* Write to PTPSCHTM */
static int sja1105_tas_set_base_time(struct sja1105_private *priv,
u64 base_time)
{
const struct sja1105_regs *regs = priv->info->regs;
u64 ptpschtm = ns_to_sja1105_ticks(base_time);
return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
&ptpschtm, NULL);
}
static int sja1105_tas_start(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
struct dsa_switch *ds = priv->ds;
int rc;
dev_dbg(ds->dev, "Starting the TAS\n");
if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
tas_data->state == SJA1105_TAS_STATE_RUNNING) {
dev_err(ds->dev, "TAS already started\n");
return -EINVAL;
}
cmd->ptpstrtsch = 1;
cmd->ptpstopsch = 0;
rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
if (rc < 0)
return rc;
tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
return 0;
}
static int sja1105_tas_stop(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
struct dsa_switch *ds = priv->ds;
int rc;
dev_dbg(ds->dev, "Stopping the TAS\n");
if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
dev_err(ds->dev, "TAS already disabled\n");
return -EINVAL;
}
cmd->ptpstopsch = 1;
cmd->ptpstrtsch = 0;
rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
if (rc < 0)
return rc;
tas_data->state = SJA1105_TAS_STATE_DISABLED;
return 0;
}
/* The schedule engine and the PTP clock are driven by the same oscillator, and
* they run in parallel. But whilst the PTP clock can keep an absolute
* time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
* up a delta, which is 200ns), and wrapping around at the end of each cycle.
* The schedule engine is started when the PTP clock reaches the PTPSCHTM time
* (in PTP domain).
* Because the PTP clock can be rate-corrected (accelerated or slowed down) by
* a software servo, and the schedule engine clock runs in parallel to the PTP
* clock, there is logic internal to the switch that periodically keeps the
* schedule engine from drifting away. The frequency with which this internal
* syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
* a value also in the PTP clock domain, and is also rate-corrected.
* To be precise, during a correction period, there is logic to determine by
* how many scheduler clock ticks has the PTP clock drifted. At the end of each
* correction period/beginning of new one, the length of a delta is shrunk or
* expanded with an integer number of ticks, compared with the typical 25.
* So a delta lasts for 200ns (or 25 ticks) only on average.
* Sometimes it is longer, sometimes it is shorter. The internal syntonization
* logic can adjust for at most 5 ticks each 20 ticks.
*
* The first implication is that you should choose your schedule correction
* period to be an integer multiple of the schedule length. Preferably one.
* In case there are schedules of multiple ports active, then the correction
* period needs to be a multiple of them all. Given the restriction that the
* cycle times have to be multiples of one another anyway, this means the
* correction period can simply be the largest cycle time, hence the current
* choice. This way, the updates are always synchronous to the transmission
* cycle, and therefore predictable.
*
* The second implication is that at the beginning of a correction period, the
* first few deltas will be modulated in time, until the schedule engine is
* properly phase-aligned with the PTP clock. For this reason, you should place
* your best-effort traffic at the beginning of a cycle, and your
* time-triggered traffic afterwards.
*
* The third implication is that once the schedule engine is started, it can
* only adjust for so much drift within a correction period. In the servo you
* can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
* want to do the latter, you need to stop and restart the schedule engine,
* which is what the state machine handles.
*/
static void sja1105_tas_state_machine(struct work_struct *work)
{
struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
struct sja1105_private *priv = tas_to_sja1105(tas_data);
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct timespec64 base_time_ts, now_ts;
struct dsa_switch *ds = priv->ds;
struct timespec64 diff;
s64 base_time, now;
int rc = 0;
mutex_lock(&ptp_data->lock);
switch (tas_data->state) {
case SJA1105_TAS_STATE_DISABLED:
/* Can't do anything at all if clock is still being stepped */
if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
break;
rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
if (rc < 0)
break;
rc = __sja1105_ptp_gettimex(ds, &now, NULL);
if (rc < 0)
break;
/* Plan to start the earliest schedule first. The others
* will be started in hardware, by way of their respective
* entry points delta.
* Try our best to avoid fringe cases (race condition between
* ptpschtm and ptpstrtsch) by pushing the oper_base_time at
* least one second in the future from now. This is not ideal,
* but this only needs to buy us time until the
* sja1105_tas_start command below gets executed.
*/
base_time = future_base_time(tas_data->earliest_base_time,
tas_data->max_cycle_time,
now + 1ull * NSEC_PER_SEC);
base_time -= sja1105_delta_to_ns(1);
rc = sja1105_tas_set_base_time(priv, base_time);
if (rc < 0)
break;
tas_data->oper_base_time = base_time;
rc = sja1105_tas_start(priv);
if (rc < 0)
break;
base_time_ts = ns_to_timespec64(base_time);
now_ts = ns_to_timespec64(now);
dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
base_time_ts.tv_sec, base_time_ts.tv_nsec,
now_ts.tv_sec, now_ts.tv_nsec);
break;
case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
/* Clock was stepped.. bad news for TAS */
sja1105_tas_stop(priv);
break;
}
/* Check if TAS has actually started, by comparing the
* scheduled start time with the SJA1105 PTP clock
*/
rc = __sja1105_ptp_gettimex(ds, &now, NULL);
if (rc < 0)
break;
if (now < tas_data->oper_base_time) {
/* TAS has not started yet */
diff = ns_to_timespec64(tas_data->oper_base_time - now);
dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
diff.tv_sec, diff.tv_nsec);
break;
}
/* Time elapsed, what happened? */
rc = sja1105_tas_check_running(priv);
if (rc < 0)
break;
if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
/* TAS has started */
dev_err(ds->dev,
"TAS not started despite time elapsed\n");
break;
case SJA1105_TAS_STATE_RUNNING:
/* Clock was stepped.. bad news for TAS */
if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
sja1105_tas_stop(priv);
break;
}
rc = sja1105_tas_check_running(priv);
if (rc < 0)
break;
if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
dev_err(ds->dev, "TAS surprisingly stopped\n");
break;
default:
if (net_ratelimit())
dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
}
if (rc && net_ratelimit())
dev_err(ds->dev, "An operation returned %d\n", rc);
mutex_unlock(&ptp_data->lock);
}
void sja1105_tas_clockstep(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_tas_data *tas_data = &priv->tas_data;
if (!tas_data->enabled)
return;
tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
schedule_work(&tas_data->tas_work);
}
void sja1105_tas_adjfreq(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_tas_data *tas_data = &priv->tas_data;
if (!tas_data->enabled)
return;
/* No reason to schedule the workqueue, nothing changed */
if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
return;
tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
schedule_work(&tas_data->tas_work);
}
void sja1105_tas_setup(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_tas_data *tas_data = &priv->tas_data;
INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
tas_data->state = SJA1105_TAS_STATE_DISABLED;
tas_data->last_op = SJA1105_PTP_NONE;
INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
}
void sja1105_tas_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct tc_taprio_qopt_offload *offload;
int port;
cancel_work_sync(&priv->tas_data.tas_work);
for (port = 0; port < ds->num_ports; port++) {
offload = priv->tas_data.offload[port];
if (!offload)
continue;
taprio_offload_free(offload);
}
}
| linux-master | drivers/net/dsa/sja1105/sja1105_tas.c |
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include <linux/packing.h>
#include "sja1105.h"
#define SJA1105_SIZE_CGU_CMD 4
#define SJA1110_BASE_MCSS_CLK SJA1110_CGU_ADDR(0x70)
#define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
struct sja1105_cfg_pad_mii {
u64 d32_os;
u64 d32_ih;
u64 d32_ipud;
u64 d10_ih;
u64 d10_os;
u64 d10_ipud;
u64 ctrl_os;
u64 ctrl_ih;
u64 ctrl_ipud;
u64 clk_os;
u64 clk_ih;
u64 clk_ipud;
};
struct sja1105_cfg_pad_mii_id {
u64 rxc_stable_ovr;
u64 rxc_delay;
u64 rxc_bypass;
u64 rxc_pd;
u64 txc_stable_ovr;
u64 txc_delay;
u64 txc_bypass;
u64 txc_pd;
};
/* UM10944 Table 82.
* IDIV_0_C to IDIV_4_C control registers
* (addr. 10000Bh to 10000Fh)
*/
struct sja1105_cgu_idiv {
u64 clksrc;
u64 autoblock;
u64 idiv;
u64 pd;
};
/* PLL_1_C control register
*
* SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
* SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
*/
struct sja1105_cgu_pll_ctrl {
u64 pllclksrc;
u64 msel;
u64 autoblock;
u64 psel;
u64 direct;
u64 fbsel;
u64 bypass;
u64 pd;
};
struct sja1110_cgu_outclk {
u64 clksrc;
u64 autoblock;
u64 pd;
};
enum {
CLKSRC_MII0_TX_CLK = 0x00,
CLKSRC_MII0_RX_CLK = 0x01,
CLKSRC_MII1_TX_CLK = 0x02,
CLKSRC_MII1_RX_CLK = 0x03,
CLKSRC_MII2_TX_CLK = 0x04,
CLKSRC_MII2_RX_CLK = 0x05,
CLKSRC_MII3_TX_CLK = 0x06,
CLKSRC_MII3_RX_CLK = 0x07,
CLKSRC_MII4_TX_CLK = 0x08,
CLKSRC_MII4_RX_CLK = 0x09,
CLKSRC_PLL0 = 0x0B,
CLKSRC_PLL1 = 0x0E,
CLKSRC_IDIV0 = 0x11,
CLKSRC_IDIV1 = 0x12,
CLKSRC_IDIV2 = 0x13,
CLKSRC_IDIV3 = 0x14,
CLKSRC_IDIV4 = 0x15,
};
/* UM10944 Table 83.
* MIIx clock control registers 1 to 30
* (addresses 100013h to 100035h)
*/
struct sja1105_cgu_mii_ctrl {
u64 clksrc;
u64 autoblock;
u64 pd;
};
static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
}
static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
bool enabled, int factor)
{
const struct sja1105_regs *regs = priv->info->regs;
struct device *dev = priv->ds->dev;
struct sja1105_cgu_idiv idiv;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
return 0;
if (enabled && factor != 1 && factor != 10) {
dev_err(dev, "idiv factor must be 1 or 10\n");
return -ERANGE;
}
/* Payload for packed_buf */
idiv.clksrc = 0x0A; /* 25MHz */
idiv.autoblock = 1; /* Block clk automatically */
idiv.idiv = factor - 1; /* Divide by 1 or 10 */
idiv.pd = enabled ? 0 : 1; /* Power down? */
sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
}
static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
int port, sja1105_mii_role_t role)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_tx_clk;
const int mac_clk_sources[] = {
CLKSRC_MII0_TX_CLK,
CLKSRC_MII1_TX_CLK,
CLKSRC_MII2_TX_CLK,
CLKSRC_MII3_TX_CLK,
CLKSRC_MII4_TX_CLK,
};
const int phy_clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
CLKSRC_IDIV3,
CLKSRC_IDIV4,
};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int clksrc;
if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
return 0;
if (role == XMII_MAC)
clksrc = mac_clk_sources[port];
else
clksrc = phy_clk_sources[port];
/* Payload for packed_buf */
mii_tx_clk.clksrc = clksrc;
mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
mii_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_rx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
const int clk_sources[] = {
CLKSRC_MII0_RX_CLK,
CLKSRC_MII1_RX_CLK,
CLKSRC_MII2_RX_CLK,
CLKSRC_MII3_RX_CLK,
CLKSRC_MII4_RX_CLK,
};
if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload for packed_buf */
mii_rx_clk.clksrc = clk_sources[port];
mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
mii_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
const int clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
CLKSRC_IDIV3,
CLKSRC_IDIV4,
};
if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload for packed_buf */
mii_ext_tx_clk.clksrc = clk_sources[port];
mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
const int clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
CLKSRC_IDIV3,
CLKSRC_IDIV4,
};
if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload for packed_buf */
mii_ext_rx_clk.clksrc = clk_sources[port];
mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
sja1105_mii_role_t role)
{
struct device *dev = priv->ds->dev;
int rc;
dev_dbg(dev, "Configuring MII-%s clocking\n",
(role == XMII_MAC) ? "MAC" : "PHY");
/* If role is MAC, disable IDIV
* If role is PHY, enable IDIV and configure for 1/1 divider
*/
rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
if (rc < 0)
return rc;
/* Configure CLKSRC of MII_TX_CLK_n
* * If role is MAC, select TX_CLK_n
* * If role is PHY, select IDIV_n
*/
rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
if (rc < 0)
return rc;
/* Configure CLKSRC of MII_RX_CLK_n
* Select RX_CLK_n
*/
rc = sja1105_cgu_mii_rx_clk_config(priv, port);
if (rc < 0)
return rc;
if (role == XMII_PHY) {
/* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
/* Configure CLKSRC of EXT_TX_CLK_n
* Select IDIV_n
*/
rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
if (rc < 0)
return rc;
/* Configure CLKSRC of EXT_RX_CLK_n
* Select IDIV_n
*/
rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
if (rc < 0)
return rc;
}
return 0;
}
static void
sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
}
static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
int port, u64 speed)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl txc;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int clksrc;
if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
return 0;
if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
clksrc = CLKSRC_PLL0;
} else {
int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
CLKSRC_IDIV3, CLKSRC_IDIV4};
clksrc = clk_sources[port];
}
/* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
txc.clksrc = clksrc;
/* Autoblock clk while changing clksrc */
txc.autoblock = 1;
/* Power Down off => enabled */
txc.pd = 0;
sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
/* AGU */
static void
sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
}
static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii pad_mii_tx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload */
pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
/* high noise/high speed */
pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
/* high noise/high speed */
pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
/* plain input (default) */
pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
/* plain input (default) */
pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii pad_mii_rx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload */
pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
/* non-Schmitt (default) */
pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
/* plain input (default) */
pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
/* non-Schmitt (default) */
pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
/* plain input (default) */
pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
/* input stage hysteresis: */
/* non-Schmitt (default) */
pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
/* input stage weak pull-up/down: */
/* pull-down */
pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
/* medium noise/fast speed (default) */
pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
/* non-Schmitt (default) */
pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
/* plain input (default) */
sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_CGU_CMD;
sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
static void
sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_CGU_CMD;
u64 range = 4;
/* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
* 0 = 2.5MHz
* 1 = 25MHz
* 2 = 50MHz
* 3 = 125MHz
* 4 = Automatically determined by port speed.
* There's no point in defining a structure different than the one for
* SJA1105, so just hardcode the frequency range to automatic, just as
* before.
*/
sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op);
sja1105_packing(buf, &range, 20, 18, size, op);
sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op);
sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op);
sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op);
sja1105_packing(buf, &range, 4, 2, size, op);
sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
/* The RGMII delay setup procedure is 2-step and gets called upon each
* .phylink_mac_config. Both are strategic.
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
* with recovering from a frequency change of the link partner's RGMII clock.
* The easiest way to recover from this is to temporarily power down the TDL,
* as it will re-lock at the new frequency afterwards.
*/
int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
{
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
int rx_delay = priv->rgmii_rx_delay_ps[port];
int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int rc;
if (rx_delay)
pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
if (tx_delay)
pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
/* Stage 1: Turn the RGMII delay lines off. */
pad_mii_id.rxc_bypass = 1;
pad_mii_id.rxc_pd = 1;
pad_mii_id.txc_bypass = 1;
pad_mii_id.txc_pd = 1;
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
if (rc < 0)
return rc;
/* Stage 2: Turn the RGMII delay lines on. */
if (rx_delay) {
pad_mii_id.rxc_bypass = 0;
pad_mii_id.rxc_pd = 0;
}
if (tx_delay) {
pad_mii_id.txc_bypass = 0;
pad_mii_id.txc_pd = 0;
}
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
int sja1110_setup_rgmii_delay(const void *ctx, int port)
{
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
int rx_delay = priv->rgmii_rx_delay_ps[port];
int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
pad_mii_id.rxc_pd = 1;
pad_mii_id.txc_pd = 1;
if (rx_delay) {
pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
pad_mii_id.rxc_bypass = 1;
pad_mii_id.rxc_pd = 0;
}
if (tx_delay) {
pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
pad_mii_id.txc_bypass = 1;
pad_mii_id.txc_pd = 0;
}
sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
sja1105_mii_role_t role)
{
struct device *dev = priv->ds->dev;
struct sja1105_mac_config_entry *mac;
u64 speed;
int rc;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
speed = mac[port].speed;
dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
port, speed);
if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
/* 1000Mbps, IDIV disabled (125 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, false, 1);
} else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
/* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, true, 1);
} else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
/* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, true, 10);
} else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
/* Skip CGU configuration if there is no speed available
* (e.g. link is not established yet)
*/
dev_dbg(dev, "Speed not available, skipping CGU config\n");
return 0;
} else {
rc = -EINVAL;
}
if (rc < 0) {
dev_err(dev, "Failed to configure idiv\n");
return rc;
}
rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
if (rc < 0) {
dev_err(dev, "Failed to configure RGMII Tx clock\n");
return rc;
}
rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
if (rc < 0) {
dev_err(dev, "Failed to configure Tx pad registers\n");
return rc;
}
if (!priv->info->setup_rgmii_delay)
return 0;
return priv->info->setup_rgmii_delay(priv, port);
}
static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl ref_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
const int clk_sources[] = {
CLKSRC_MII0_TX_CLK,
CLKSRC_MII1_TX_CLK,
CLKSRC_MII2_TX_CLK,
CLKSRC_MII3_TX_CLK,
CLKSRC_MII4_TX_CLK,
};
if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload for packed_buf */
ref_clk.clksrc = clk_sources[port];
ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
ref_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl ext_tx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
return 0;
/* Payload for packed_buf */
ext_tx_clk.clksrc = CLKSRC_PLL1;
ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
{
const struct sja1105_regs *regs = priv->info->regs;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
struct sja1105_cgu_pll_ctrl pll = {0};
struct device *dev = priv->ds->dev;
int rc;
if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
return 0;
/* PLL1 must be enabled and output 50 Mhz.
* This is done by writing first 0x0A010941 to
* the PLL_1_C register and then deasserting
* power down (PD) 0x0A010940.
*/
/* Step 1: PLL1 setup for 50Mhz */
pll.pllclksrc = 0xA;
pll.msel = 0x1;
pll.autoblock = 0x1;
pll.psel = 0x1;
pll.direct = 0x0;
pll.fbsel = 0x1;
pll.bypass = 0x0;
pll.pd = 0x1;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to configure PLL1 for 50MHz\n");
return rc;
}
/* Step 2: Enable PLL1 */
pll.pd = 0x0;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to enable PLL1\n");
return rc;
}
return rc;
}
static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
sja1105_mii_role_t role)
{
struct device *dev = priv->ds->dev;
int rc;
dev_dbg(dev, "Configuring RMII-%s clocking\n",
(role == XMII_MAC) ? "MAC" : "PHY");
/* AH1601.pdf chapter 2.5.1. Sources */
if (role == XMII_MAC) {
/* Configure and enable PLL1 for 50Mhz output */
rc = sja1105_cgu_rmii_pll_config(priv);
if (rc < 0)
return rc;
}
/* Disable IDIV for this port */
rc = sja1105_cgu_idiv_config(priv, port, false, 1);
if (rc < 0)
return rc;
/* Source to sink mappings */
rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
if (rc < 0)
return rc;
if (role == XMII_MAC) {
rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
if (rc < 0)
return rc;
}
return 0;
}
int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
{
struct sja1105_xmii_params_entry *mii;
struct device *dev = priv->ds->dev;
sja1105_phy_interface_t phy_mode;
sja1105_mii_role_t role;
int rc;
mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
/* RGMII etc */
phy_mode = mii->xmii_mode[port];
/* MAC or PHY, for applicable types (not RGMII) */
role = mii->phy_mac[port];
switch (phy_mode) {
case XMII_MODE_MII:
rc = sja1105_mii_clocking_setup(priv, port, role);
break;
case XMII_MODE_RMII:
rc = sja1105_rmii_clocking_setup(priv, port, role);
break;
case XMII_MODE_RGMII:
rc = sja1105_rgmii_clocking_setup(priv, port, role);
break;
case XMII_MODE_SGMII:
/* Nothing to do in the CGU for SGMII */
rc = 0;
break;
default:
dev_err(dev, "Invalid interface mode specified: %d\n",
phy_mode);
return -EINVAL;
}
if (rc) {
dev_err(dev, "Clocking setup for port %d failed: %d\n",
port, rc);
return rc;
}
/* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
return sja1105_cfg_pad_rx_config(priv, port);
}
int sja1105_clocking_setup(struct sja1105_private *priv)
{
struct dsa_switch *ds = priv->ds;
int port, rc;
for (port = 0; port < ds->num_ports; port++) {
rc = sja1105_clocking_setup_port(priv, port);
if (rc < 0)
return rc;
}
return 0;
}
static void
sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
enum packing_op op)
{
const int size = 4;
sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op);
sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
sja1105_packing(buf, &outclk->pd, 0, 0, size, op);
}
int sja1110_disable_microcontroller(struct sja1105_private *priv)
{
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
struct sja1110_cgu_outclk outclk_6_c = {
.clksrc = 0x3,
.pd = true,
};
struct sja1110_cgu_outclk outclk_7_c = {
.clksrc = 0x5,
.pd = true,
};
int rc;
/* Power down the BASE_TIMER_CLK to disable the watchdog timer */
sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
packed_buf, SJA1105_SIZE_CGU_CMD);
if (rc)
return rc;
/* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
packed_buf, SJA1105_SIZE_CGU_CMD);
}
| linux-master | drivers/net/dsa/sja1105/sja1105_clocking.c |
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018 NXP
* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include <linux/spi/spi.h>
#include <linux/packing.h>
#include "sja1105.h"
struct sja1105_chunk {
u8 *buf;
size_t len;
u64 reg_addr;
};
static void
sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
{
const int size = SJA1105_SIZE_SPI_MSG_HEADER;
memset(buf, 0, size);
sja1105_pack(buf, &msg->access, 31, 31, size);
sja1105_pack(buf, &msg->read_count, 30, 25, size);
sja1105_pack(buf, &msg->address, 24, 4, size);
}
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
* address reg_addr, taking @len bytes from *buf
* - SPI_READ: creates and sends an SPI read message from absolute
* address reg_addr, writing @len bytes into *buf
*/
static int sja1105_xfer(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
size_t len, struct ptp_system_timestamp *ptp_sts)
{
u8 hdr_buf[SJA1105_SIZE_SPI_MSG_HEADER] = {0};
struct spi_device *spi = priv->spidev;
struct spi_transfer xfers[2] = {0};
struct spi_transfer *chunk_xfer;
struct spi_transfer *hdr_xfer;
struct sja1105_chunk chunk;
int num_chunks;
int rc, i = 0;
num_chunks = DIV_ROUND_UP(len, priv->max_xfer_len);
chunk.reg_addr = reg_addr;
chunk.buf = buf;
chunk.len = min_t(size_t, len, priv->max_xfer_len);
hdr_xfer = &xfers[0];
chunk_xfer = &xfers[1];
for (i = 0; i < num_chunks; i++) {
struct spi_transfer *ptp_sts_xfer;
struct sja1105_spi_message msg;
/* Populate the transfer's header buffer */
msg.address = chunk.reg_addr;
msg.access = rw;
if (rw == SPI_READ)
msg.read_count = chunk.len / 4;
else
/* Ignored */
msg.read_count = 0;
sja1105_spi_message_pack(hdr_buf, &msg);
hdr_xfer->tx_buf = hdr_buf;
hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
/* Populate the transfer's data buffer */
if (rw == SPI_READ)
chunk_xfer->rx_buf = chunk.buf;
else
chunk_xfer->tx_buf = chunk.buf;
chunk_xfer->len = chunk.len;
/* Request timestamping for the transfer. Instead of letting
* callers specify which byte they want to timestamp, we can
* make certain assumptions:
* - A read operation will request a software timestamp when
* what's being read is the PTP time. That is snapshotted by
* the switch hardware at the end of the command portion
* (hdr_xfer).
* - A write operation will request a software timestamp on
* actions that modify the PTP time. Taking clock stepping as
* an example, the switch writes the PTP time at the end of
* the data portion (chunk_xfer).
*/
if (rw == SPI_READ)
ptp_sts_xfer = hdr_xfer;
else
ptp_sts_xfer = chunk_xfer;
ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
ptp_sts_xfer->ptp_sts = ptp_sts;
/* Calculate next chunk */
chunk.buf += chunk.len;
chunk.reg_addr += chunk.len / 4;
chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
priv->max_xfer_len);
rc = spi_sync_transfer(spi, xfers, 2);
if (rc < 0) {
dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
return rc;
}
}
return 0;
}
int sja1105_xfer_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr,
u8 *buf, size_t len)
{
return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
}
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
* address reg_addr
* - SPI_READ: creates and sends an SPI read message from absolute
* address reg_addr
*
* The u64 *value is unpacked, meaning that it's stored in the native
* CPU endianness and directly usable by software running on the core.
*/
int sja1105_xfer_u64(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
struct ptp_system_timestamp *ptp_sts)
{
u8 packed_buf[8];
int rc;
if (rw == SPI_WRITE)
sja1105_pack(packed_buf, value, 63, 0, 8);
rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
if (rw == SPI_READ)
sja1105_unpack(packed_buf, value, 63, 0, 8);
return rc;
}
/* Same as above, but transfers only a 4 byte word */
int sja1105_xfer_u32(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
struct ptp_system_timestamp *ptp_sts)
{
u8 packed_buf[4];
u64 tmp;
int rc;
if (rw == SPI_WRITE) {
/* The packing API only supports u64 as CPU word size,
* so we need to convert.
*/
tmp = *value;
sja1105_pack(packed_buf, &tmp, 31, 0, 4);
}
rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
if (rw == SPI_READ) {
sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
*value = tmp;
}
return rc;
}
static int sja1105et_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
u32 cold_reset = BIT(3);
/* Cold reset */
return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
}
static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
u32 cold_reset = BIT(2);
/* Cold reset */
return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
}
static int sja1110_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
u32 switch_reset = BIT(20);
/* Only reset the switch core.
* A full cold reset would re-enable the BASE_MCSS_CLOCK PLL which
* would turn on the microcontroller, potentially letting it execute
* code which could interfere with our configuration.
*/
return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &switch_reset, NULL);
}
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited)
{
const struct sja1105_regs *regs = priv->info->regs;
u32 inhibit_cmd;
int rc;
rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
&inhibit_cmd, NULL);
if (rc < 0)
return rc;
if (tx_inhibited)
inhibit_cmd |= port_bitmap;
else
inhibit_cmd &= ~port_bitmap;
return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
&inhibit_cmd, NULL);
}
struct sja1105_status {
u64 configs;
u64 crcchkl;
u64 ids;
u64 crcchkg;
};
/* This is not reading the entire General Status area, which is also
* divergent between E/T and P/Q/R/S, but only the relevant bits for
* ensuring that the static config upload procedure was successful.
*/
static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
{
/* So that addition translates to 4 bytes */
u32 *p = buf;
/* device_id is missing from the buffer, but we don't
* want to diverge from the manual definition of the
* register addresses, so we'll back off one step with
* the register pointer, and never access p[0].
*/
p--;
sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
}
static int sja1105_status_get(struct sja1105_private *priv,
struct sja1105_status *status)
{
const struct sja1105_regs *regs = priv->info->regs;
u8 packed_buf[4];
int rc;
rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
if (rc < 0)
return rc;
sja1105_status_unpack(packed_buf, status);
return 0;
}
/* Not const because unpacking priv->static_config into buffers and preparing
* for upload requires the recalculation of table CRCs and updating the
* structures with these.
*/
int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
void *config_buf, int buf_len)
{
struct sja1105_static_config *config = &priv->static_config;
struct sja1105_table_header final_header;
sja1105_config_valid_t valid;
char *final_header_ptr;
int crc_len;
valid = sja1105_static_config_check_valid(config,
priv->info->max_frame_mem);
if (valid != SJA1105_CONFIG_OK) {
dev_err(&priv->spidev->dev,
sja1105_static_config_error_msg[valid]);
return -EINVAL;
}
/* Write Device ID and config tables to config_buf */
sja1105_static_config_pack(config_buf, config);
/* Recalculate CRC of the last header (right now 0xDEADBEEF).
* Don't include the CRC field itself.
*/
crc_len = buf_len - 4;
/* Read the whole table header */
final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
/* Modify */
final_header.crc = sja1105_crc32(config_buf, crc_len);
/* Rewrite */
sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
return 0;
}
#define RETRIES 10
int sja1105_static_config_upload(struct sja1105_private *priv)
{
struct sja1105_static_config *config = &priv->static_config;
const struct sja1105_regs *regs = priv->info->regs;
struct device *dev = &priv->spidev->dev;
struct dsa_switch *ds = priv->ds;
struct sja1105_status status;
int rc, retries = RETRIES;
u8 *config_buf;
int buf_len;
buf_len = sja1105_static_config_get_length(config);
config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
if (!config_buf)
return -ENOMEM;
rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Invalid config, cannot upload\n");
rc = -EINVAL;
goto out;
}
/* Prevent PHY jabbering during switch reset by inhibiting
* Tx on all ports and waiting for current packet to drain.
* Otherwise, the PHY will see an unterminated Ethernet packet.
*/
rc = sja1105_inhibit_tx(priv, GENMASK_ULL(ds->num_ports - 1, 0), true);
if (rc < 0) {
dev_err(dev, "Failed to inhibit Tx on ports\n");
rc = -ENXIO;
goto out;
}
/* Wait for an eventual egress packet to finish transmission
* (reach IFG). It is guaranteed that a second one will not
* follow, and that switch cold reset is thus safe
*/
usleep_range(500, 1000);
do {
/* Put the SJA1105 in programming mode */
rc = priv->info->reset_cmd(priv->ds);
if (rc < 0) {
dev_err(dev, "Failed to reset switch, retrying...\n");
continue;
}
/* Wait for the switch to come out of reset */
usleep_range(1000, 5000);
/* Upload the static config to the device */
rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Failed to upload config, retrying...\n");
continue;
}
/* Check that SJA1105 responded well to the config upload */
rc = sja1105_status_get(priv, &status);
if (rc < 0)
continue;
if (status.ids == 1) {
dev_err(dev, "Mismatch between hardware and static config "
"device id. Wrote 0x%llx, wants 0x%llx\n",
config->device_id, priv->info->device_id);
continue;
}
if (status.crcchkl == 1) {
dev_err(dev, "Switch reported invalid local CRC on "
"the uploaded config, retrying...\n");
continue;
}
if (status.crcchkg == 1) {
dev_err(dev, "Switch reported invalid global CRC on "
"the uploaded config, retrying...\n");
continue;
}
if (status.configs == 0) {
dev_err(dev, "Switch reported that configuration is "
"invalid, retrying...\n");
continue;
}
/* Success! */
break;
} while (--retries);
if (!retries) {
rc = -EIO;
dev_err(dev, "Failed to upload config to device, giving up\n");
goto out;
} else if (retries != RETRIES) {
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
}
out:
kfree(config_buf);
return rc;
}
static const struct sja1105_regs sja1105et_regs = {
.device_id = 0x0,
.prod_id = 0x100BC3,
.status = 0x1,
.port_control = 0x11,
.vl_status = 0x10000,
.config = 0x020000,
.rgu = 0x100440,
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
.stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
.stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
/* UM10944.pdf, Table 78, CGU Register overview */
.mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
.mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
.mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
.rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
.ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
.ptppinst = 0x14,
.ptppindur = 0x16,
.ptp_control = 0x17,
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
.ptpclkcorp = 0x1D,
.mdio_100base_tx = SJA1105_RSV_ADDR,
.mdio_100base_t1 = SJA1105_RSV_ADDR,
};
static const struct sja1105_regs sja1105pqrs_regs = {
.device_id = 0x0,
.prod_id = 0x100BC3,
.status = 0x1,
.port_control = 0x12,
.vl_status = 0x10000,
.config = 0x020000,
.rgu = 0x100440,
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
.stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
.stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
.stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
/* UM11040.pdf, Table 114 */
.mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
.mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
.mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
.rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
.rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
.ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
.ptppinst = 0x15,
.ptppindur = 0x17,
.ptp_control = 0x18,
.ptpclkval = 0x19,
.ptpclkrate = 0x1B,
.ptpclkcorp = 0x1E,
.ptpsyncts = 0x1F,
.mdio_100base_tx = SJA1105_RSV_ADDR,
.mdio_100base_t1 = SJA1105_RSV_ADDR,
};
static const struct sja1105_regs sja1110_regs = {
.device_id = SJA1110_SPI_ADDR(0x0),
.prod_id = SJA1110_ACU_ADDR(0xf00),
.status = SJA1110_SPI_ADDR(0x4),
.port_control = SJA1110_SPI_ADDR(0x50), /* actually INHIB_TX */
.vl_status = 0x10000,
.config = 0x020000,
.rgu = SJA1110_RGU_ADDR(0x100), /* Reset Control Register 0 */
/* Ports 2 and 3 are capable of xMII, but there isn't anything to
* configure in the CGU/ACU for them.
*/
.pad_mii_tx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR},
.pad_mii_rx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR},
.pad_mii_id = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1110_ACU_ADDR(0x18), SJA1110_ACU_ADDR(0x28),
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR},
.rmii_pll1 = SJA1105_RSV_ADDR,
.cgu_idiv = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208, 0x20a,
0x20c, 0x20e, 0x210, 0x212, 0x214},
.stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440, 0x450,
0x460, 0x470, 0x480, 0x490, 0x4a0},
.stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640, 0x650,
0x660, 0x670, 0x680, 0x690, 0x6a0},
.stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460, 0x1478,
0x1490, 0x14a8, 0x14c0, 0x14d8, 0x14f0},
.mii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.mii_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.mii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.mii_ext_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.rgmii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.rmii_ref_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
.rmii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR},
.ptpschtm = SJA1110_SPI_ADDR(0x54),
.ptppinst = SJA1110_SPI_ADDR(0x5c),
.ptppindur = SJA1110_SPI_ADDR(0x64),
.ptp_control = SJA1110_SPI_ADDR(0x68),
.ptpclkval = SJA1110_SPI_ADDR(0x6c),
.ptpclkrate = SJA1110_SPI_ADDR(0x74),
.ptpclkcorp = SJA1110_SPI_ADDR(0x80),
.ptpsyncts = SJA1110_SPI_ADDR(0x84),
.mdio_100base_tx = 0x1c2400,
.mdio_100base_t1 = 0x1c1000,
.pcs_base = {SJA1105_RSV_ADDR, 0x1c1400, 0x1c1800, 0x1c1c00, 0x1c2000,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
};
const struct sja1105_info sja1105e_info = {
.device_id = SJA1105E_DEVICE_ID,
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
.num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
[SJA1105_SPEED_100MBPS] = 2,
[SJA1105_SPEED_1000MBPS] = 1,
[SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
},
.supports_mii = {true, true, true, true, true},
.supports_rmii = {true, true, true, true, true},
.supports_rgmii = {true, true, true, true, true},
.name = "SJA1105E",
};
const struct sja1105_info sja1105t_info = {
.device_id = SJA1105T_DEVICE_ID,
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
.num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
[SJA1105_SPEED_100MBPS] = 2,
[SJA1105_SPEED_1000MBPS] = 1,
[SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
},
.supports_mii = {true, true, true, true, true},
.supports_rmii = {true, true, true, true, true},
.supports_rgmii = {true, true, true, true, true},
.name = "SJA1105T",
};
const struct sja1105_info sja1105p_info = {
.device_id = SJA1105PR_DEVICE_ID,
.part_no = SJA1105P_PART_NO,
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
.num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
[SJA1105_SPEED_100MBPS] = 2,
[SJA1105_SPEED_1000MBPS] = 1,
[SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
},
.supports_mii = {true, true, true, true, true},
.supports_rmii = {true, true, true, true, true},
.supports_rgmii = {true, true, true, true, true},
.name = "SJA1105P",
};
const struct sja1105_info sja1105q_info = {
.device_id = SJA1105QS_DEVICE_ID,
.part_no = SJA1105Q_PART_NO,
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
.num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
[SJA1105_SPEED_100MBPS] = 2,
[SJA1105_SPEED_1000MBPS] = 1,
[SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
},
.supports_mii = {true, true, true, true, true},
.supports_rmii = {true, true, true, true, true},
.supports_rgmii = {true, true, true, true, true},
.name = "SJA1105Q",
};
const struct sja1105_info sja1105r_info = {
.device_id = SJA1105PR_DEVICE_ID,
.part_no = SJA1105R_PART_NO,
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
.num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
.pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
[SJA1105_SPEED_100MBPS] = 2,
[SJA1105_SPEED_1000MBPS] = 1,
[SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
},
.supports_mii = {true, true, true, true, true},
.supports_rmii = {true, true, true, true, true},
.supports_rgmii = {true, true, true, true, true},
.supports_sgmii = {false, false, false, false, true},
.name = "SJA1105R",
};
const struct sja1105_info sja1105s_info = {
.device_id = SJA1105QS_DEVICE_ID,
.part_no = SJA1105S_PART_NO,
.static_ops = sja1105s_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
.num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
.pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
.pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
[SJA1105_SPEED_100MBPS] = 2,
[SJA1105_SPEED_1000MBPS] = 1,
[SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
},
.supports_mii = {true, true, true, true, true},
.supports_rmii = {true, true, true, true, true},
.supports_rgmii = {true, true, true, true, true},
.supports_sgmii = {false, false, false, false, true},
.name = "SJA1105S",
};
const struct sja1105_info sja1110a_info = {
.device_id = SJA1110_DEVICE_ID,
.part_no = SJA1110A_PART_NO,
.static_ops = sja1110_table_ops,
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
.num_ports = SJA1110_NUM_PORTS,
.num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1110_setup_rgmii_delay,
.reset_cmd = sja1110_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
.pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
.pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
[SJA1105_SPEED_100MBPS] = 3,
[SJA1105_SPEED_1000MBPS] = 2,
[SJA1105_SPEED_2500MBPS] = 1,
},
.supports_mii = {true, true, true, true, false,
true, true, true, true, true, true},
.supports_rmii = {false, false, true, true, false,
false, false, false, false, false, false},
.supports_rgmii = {false, false, true, true, false,
false, false, false, false, false, false},
.supports_sgmii = {false, true, true, true, true,
false, false, false, false, false, false},
.supports_2500basex = {false, false, false, true, true,
false, false, false, false, false, false},
.internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1},
.name = "SJA1110A",
};
const struct sja1105_info sja1110b_info = {
.device_id = SJA1110_DEVICE_ID,
.part_no = SJA1110B_PART_NO,
.static_ops = sja1110_table_ops,
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
.num_ports = SJA1110_NUM_PORTS,
.num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1110_setup_rgmii_delay,
.reset_cmd = sja1110_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
.pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
.pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
[SJA1105_SPEED_100MBPS] = 3,
[SJA1105_SPEED_1000MBPS] = 2,
[SJA1105_SPEED_2500MBPS] = 1,
},
.supports_mii = {true, true, true, true, false,
true, true, true, true, true, false},
.supports_rmii = {false, false, true, true, false,
false, false, false, false, false, false},
.supports_rgmii = {false, false, true, true, false,
false, false, false, false, false, false},
.supports_sgmii = {false, false, false, true, true,
false, false, false, false, false, false},
.supports_2500basex = {false, false, false, true, true,
false, false, false, false, false, false},
.internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
SJA1105_NO_PHY},
.name = "SJA1110B",
};
const struct sja1105_info sja1110c_info = {
.device_id = SJA1110_DEVICE_ID,
.part_no = SJA1110C_PART_NO,
.static_ops = sja1110_table_ops,
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
.num_ports = SJA1110_NUM_PORTS,
.num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1110_setup_rgmii_delay,
.reset_cmd = sja1110_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
.pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
.pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
[SJA1105_SPEED_100MBPS] = 3,
[SJA1105_SPEED_1000MBPS] = 2,
[SJA1105_SPEED_2500MBPS] = 1,
},
.supports_mii = {true, true, true, true, false,
true, true, true, false, false, false},
.supports_rmii = {false, false, true, true, false,
false, false, false, false, false, false},
.supports_rgmii = {false, false, true, true, false,
false, false, false, false, false, false},
.supports_sgmii = {false, false, false, false, true,
false, false, false, false, false, false},
.supports_2500basex = {false, false, false, false, true,
false, false, false, false, false, false},
.internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY},
.name = "SJA1110C",
};
const struct sja1105_info sja1110d_info = {
.device_id = SJA1110_DEVICE_ID,
.part_no = SJA1110D_PART_NO,
.static_ops = sja1110_table_ops,
.dyn_ops = sja1110_dyn_ops,
.regs = &sja1110_regs,
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
.fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
.num_ports = SJA1110_NUM_PORTS,
.num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1110_setup_rgmii_delay,
.reset_cmd = sja1110_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
.pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
.pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
[SJA1105_SPEED_100MBPS] = 3,
[SJA1105_SPEED_1000MBPS] = 2,
[SJA1105_SPEED_2500MBPS] = 1,
},
.supports_mii = {true, false, true, false, false,
true, true, true, false, false, false},
.supports_rmii = {false, false, true, false, false,
false, false, false, false, false, false},
.supports_rgmii = {false, false, true, false, false,
false, false, false, false, false, false},
.supports_sgmii = {false, true, true, true, true,
false, false, false, false, false, false},
.supports_2500basex = {false, false, false, true, true,
false, false, false, false, false, false},
.internal_phy = {SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
SJA1105_NO_PHY, SJA1105_NO_PHY,
SJA1105_NO_PHY},
.name = "SJA1110D",
};
| linux-master | drivers/net/dsa/sja1105/sja1105_spi.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2020 NXP
*/
#include "sja1105.h"
#include "sja1105_vl.h"
struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
unsigned long cookie)
{
struct sja1105_rule *rule;
list_for_each_entry(rule, &priv->flow_block.rules, list)
if (rule->cookie == cookie)
return rule;
return NULL;
}
static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
{
int i;
for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
if (!priv->flow_block.l2_policer_used[i])
return i;
return -1;
}
static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
struct netlink_ext_ack *extack,
unsigned long cookie, int port,
u64 rate_bytes_per_sec,
u32 burst)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct sja1105_l2_policing_entry *policing;
struct dsa_switch *ds = priv->ds;
bool new_rule = false;
unsigned long p;
int rc;
if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->cookie = cookie;
rule->type = SJA1105_RULE_BCAST_POLICER;
rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
rule->key.type = SJA1105_KEY_BCAST;
new_rule = true;
}
if (rule->bcast_pol.sharindx == -1) {
NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
rc = -ENOSPC;
goto out;
}
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
NL_SET_ERR_MSG_MOD(extack,
"Port already has a broadcast policer");
rc = -EEXIST;
goto out;
}
rule->port_mask |= BIT(port);
/* Make the broadcast policers of all ports attached to this block
* point to the newly allocated policer
*/
for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
policing[bcast].sharindx = rule->bcast_pol.sharindx;
}
policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
512, 1000000);
policing[rule->bcast_pol.sharindx].smax = burst;
/* TODO: support per-flow MTU */
policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
ETH_FCS_LEN;
rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
out:
if (rc == 0 && new_rule) {
priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
list_add(&rule->list, &priv->flow_block.rules);
} else if (new_rule) {
kfree(rule);
}
return rc;
}
static int sja1105_setup_tc_policer(struct sja1105_private *priv,
struct netlink_ext_ack *extack,
unsigned long cookie, int port, int tc,
u64 rate_bytes_per_sec,
u32 burst)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct sja1105_l2_policing_entry *policing;
bool new_rule = false;
unsigned long p;
int rc;
if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->cookie = cookie;
rule->type = SJA1105_RULE_TC_POLICER;
rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
rule->key.type = SJA1105_KEY_TC;
rule->key.tc.pcp = tc;
new_rule = true;
}
if (rule->tc_pol.sharindx == -1) {
NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
rc = -ENOSPC;
goto out;
}
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
NL_SET_ERR_MSG_MOD(extack,
"Port-TC pair already has an L2 policer");
rc = -EEXIST;
goto out;
}
rule->port_mask |= BIT(port);
/* Make the policers for traffic class @tc of all ports attached to
* this block point to the newly allocated policer
*/
for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
int index = (p * SJA1105_NUM_TC) + tc;
policing[index].sharindx = rule->tc_pol.sharindx;
}
policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
512, 1000000);
policing[rule->tc_pol.sharindx].smax = burst;
/* TODO: support per-flow MTU */
policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
ETH_FCS_LEN;
rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
out:
if (rc == 0 && new_rule) {
priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
list_add(&rule->list, &priv->flow_block.rules);
} else if (new_rule) {
kfree(rule);
}
return rc;
}
static int sja1105_flower_policer(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack,
unsigned long cookie,
struct sja1105_key *key,
u64 rate_bytes_per_sec,
u32 burst)
{
switch (key->type) {
case SJA1105_KEY_BCAST:
return sja1105_setup_bcast_policer(priv, extack, cookie, port,
rate_bytes_per_sec, burst);
case SJA1105_KEY_TC:
return sja1105_setup_tc_policer(priv, extack, cookie, port,
key->tc.pcp, rate_bytes_per_sec,
burst);
default:
NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
return -EOPNOTSUPP;
}
}
static int sja1105_flower_parse_key(struct sja1105_private *priv,
struct netlink_ext_ack *extack,
struct flow_cls_offload *cls,
struct sja1105_key *key)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
bool is_bcast_dmac = false;
u64 dmac = U64_MAX;
u16 vid = U16_MAX;
u16 pcp = U16_MAX;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
if (match.key->n_proto) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on protocol not supported");
return -EOPNOTSUPP;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
if (!ether_addr_equal_masked(match.key->src, null,
match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on source MAC not supported");
return -EOPNOTSUPP;
}
if (!ether_addr_equal(match.mask->dst, bcast)) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on MAC not supported");
return -EOPNOTSUPP;
}
dmac = ether_addr_to_u64(match.key->dst);
is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
if (match.mask->vlan_id &&
match.mask->vlan_id != VLAN_VID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on VID is not supported");
return -EOPNOTSUPP;
}
if (match.mask->vlan_priority &&
match.mask->vlan_priority != 0x7) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on PCP is not supported");
return -EOPNOTSUPP;
}
if (match.mask->vlan_id)
vid = match.key->vlan_id;
if (match.mask->vlan_priority)
pcp = match.key->vlan_priority;
}
if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
key->type = SJA1105_KEY_BCAST;
return 0;
}
if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
key->type = SJA1105_KEY_TC;
key->tc.pcp = pcp;
return 0;
}
if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
key->type = SJA1105_KEY_VLAN_AWARE_VL;
key->vl.dmac = dmac;
key->vl.vid = vid;
key->vl.pcp = pcp;
return 0;
}
if (dmac != U64_MAX) {
key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
key->vl.dmac = dmac;
return 0;
}
NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
return -EOPNOTSUPP;
}
static int sja1105_policer_validate(const struct flow_action *action,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when exceed action is not drop");
return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when conform action is not pipe or ok");
return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, act)) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when conform action is ok, but action is not last");
return -EOPNOTSUPP;
}
if (act->police.peakrate_bytes_ps ||
act->police.avrate || act->police.overhead) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when peakrate/avrate/overhead is configured");
return -EOPNOTSUPP;
}
if (act->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"QoS offload not support packets per second");
return -EOPNOTSUPP;
}
return 0;
}
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
struct sja1105_private *priv = ds->priv;
const struct flow_action_entry *act;
unsigned long cookie = cls->cookie;
bool routing_rule = false;
struct sja1105_key key;
bool gate_rule = false;
bool vl_rule = false;
int rc, i;
rc = sja1105_flower_parse_key(priv, extack, cls, &key);
if (rc)
return rc;
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
rc = sja1105_policer_validate(&rule->action, act, extack);
if (rc)
goto out;
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
act->police.rate_bytes_ps,
act->police.burst);
if (rc)
goto out;
break;
case FLOW_ACTION_TRAP: {
int cpu = dsa_upstream_port(ds, port);
routing_rule = true;
vl_rule = true;
rc = sja1105_vl_redirect(priv, port, extack, cookie,
&key, BIT(cpu), true);
if (rc)
goto out;
break;
}
case FLOW_ACTION_REDIRECT: {
struct dsa_port *to_dp;
to_dp = dsa_port_from_netdev(act->dev);
if (IS_ERR(to_dp)) {
NL_SET_ERR_MSG_MOD(extack,
"Destination not a switch port");
return -EOPNOTSUPP;
}
routing_rule = true;
vl_rule = true;
rc = sja1105_vl_redirect(priv, port, extack, cookie,
&key, BIT(to_dp->index), true);
if (rc)
goto out;
break;
}
case FLOW_ACTION_DROP:
vl_rule = true;
rc = sja1105_vl_redirect(priv, port, extack, cookie,
&key, 0, false);
if (rc)
goto out;
break;
case FLOW_ACTION_GATE:
gate_rule = true;
vl_rule = true;
rc = sja1105_vl_gate(priv, port, extack, cookie,
&key, act->hw_index,
act->gate.prio,
act->gate.basetime,
act->gate.cycletime,
act->gate.cycletimeext,
act->gate.num_entries,
act->gate.entries);
if (rc)
goto out;
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Action not supported");
rc = -EOPNOTSUPP;
goto out;
}
}
if (vl_rule && !rc) {
/* Delay scheduling configuration until DESTPORTS has been
* populated by all other actions.
*/
if (gate_rule) {
if (!routing_rule) {
NL_SET_ERR_MSG_MOD(extack,
"Can only offload gate action together with redirect or trap");
return -EOPNOTSUPP;
}
rc = sja1105_init_scheduling(priv);
if (rc)
goto out;
}
rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
out:
return rc;
}
int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
struct sja1105_l2_policing_entry *policing;
int old_sharindx;
if (!rule)
return 0;
if (rule->type == SJA1105_RULE_VL)
return sja1105_vl_delete(priv, port, rule, cls->common.extack);
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (rule->type == SJA1105_RULE_BCAST_POLICER) {
int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
old_sharindx = policing[bcast].sharindx;
policing[bcast].sharindx = port;
} else if (rule->type == SJA1105_RULE_TC_POLICER) {
int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
old_sharindx = policing[index].sharindx;
policing[index].sharindx = port;
} else {
return -EINVAL;
}
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
priv->flow_block.l2_policer_used[old_sharindx] = false;
list_del(&rule->list);
kfree(rule);
}
return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
int rc;
if (!rule)
return 0;
if (rule->type != SJA1105_RULE_VL)
return 0;
rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
cls->common.extack);
if (rc)
return rc;
return 0;
}
void sja1105_flower_setup(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
int port;
INIT_LIST_HEAD(&priv->flow_block.rules);
for (port = 0; port < ds->num_ports; port++)
priv->flow_block.l2_policer_used[port] = true;
}
void sja1105_flower_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_rule *rule;
struct list_head *pos, *n;
list_for_each_safe(pos, n, &priv->flow_block.rules) {
rule = list_entry(pos, struct sja1105_rule, list);
list_del(&rule->list);
kfree(rule);
}
}
| linux-master | drivers/net/dsa/sja1105/sja1105_flower.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2020 NXP
*/
#include <net/tc_act/tc_gate.h>
#include <linux/dsa/8021q.h>
#include "sja1105_vl.h"
#define SJA1105_SIZE_VL_STATUS 8
/* Insert into the global gate list, sorted by gate action time. */
static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
struct sja1105_rule *rule,
u8 gate_state, s64 entry_time,
struct netlink_ext_ack *extack)
{
struct sja1105_gate_entry *e;
int rc;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->rule = rule;
e->gate_state = gate_state;
e->interval = entry_time;
if (list_empty(&gating_cfg->entries)) {
list_add(&e->list, &gating_cfg->entries);
} else {
struct sja1105_gate_entry *p;
list_for_each_entry(p, &gating_cfg->entries, list) {
if (p->interval == e->interval) {
NL_SET_ERR_MSG_MOD(extack,
"Gate conflict");
rc = -EBUSY;
goto err;
}
if (e->interval < p->interval)
break;
}
list_add(&e->list, p->list.prev);
}
gating_cfg->num_entries++;
return 0;
err:
kfree(e);
return rc;
}
/* The gate entries contain absolute times in their e->interval field. Convert
* that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
*/
static void
sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
u64 cycle_time)
{
struct sja1105_gate_entry *last_e;
struct sja1105_gate_entry *e;
struct list_head *prev;
list_for_each_entry(e, &gating_cfg->entries, list) {
struct sja1105_gate_entry *p;
prev = e->list.prev;
if (prev == &gating_cfg->entries)
continue;
p = list_entry(prev, struct sja1105_gate_entry, list);
p->interval = e->interval - p->interval;
}
last_e = list_last_entry(&gating_cfg->entries,
struct sja1105_gate_entry, list);
last_e->interval = cycle_time - last_e->interval;
}
static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
{
struct sja1105_gate_entry *e, *n;
list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
list_del(&e->list);
kfree(e);
}
}
static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
struct sja1105_rule *rule;
s64 max_cycle_time = 0;
s64 its_base_time = 0;
int i, rc = 0;
sja1105_free_gating_config(gating_cfg);
list_for_each_entry(rule, &priv->flow_block.rules, list) {
if (rule->type != SJA1105_RULE_VL)
continue;
if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
continue;
if (max_cycle_time < rule->vl.cycle_time) {
max_cycle_time = rule->vl.cycle_time;
its_base_time = rule->vl.base_time;
}
}
if (!max_cycle_time)
return 0;
dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
max_cycle_time, its_base_time);
gating_cfg->base_time = its_base_time;
gating_cfg->cycle_time = max_cycle_time;
gating_cfg->num_entries = 0;
list_for_each_entry(rule, &priv->flow_block.rules, list) {
s64 time;
s64 rbt;
if (rule->type != SJA1105_RULE_VL)
continue;
if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
continue;
/* Calculate the difference between this gating schedule's
* base time, and the base time of the gating schedule with the
* longest cycle time. We call it the relative base time (rbt).
*/
rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
its_base_time);
rbt -= its_base_time;
time = rbt;
for (i = 0; i < rule->vl.num_entries; i++) {
u8 gate_state = rule->vl.entries[i].gate_state;
s64 entry_time = time;
while (entry_time < max_cycle_time) {
rc = sja1105_insert_gate_entry(gating_cfg, rule,
gate_state,
entry_time,
extack);
if (rc)
goto err;
entry_time += rule->vl.cycle_time;
}
time += rule->vl.entries[i].interval;
}
}
sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
return 0;
err:
sja1105_free_gating_config(gating_cfg);
return rc;
}
/* The switch flow classification core implements TTEthernet, which 'thinks' in
* terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
* However it also has one other operating mode (VLLUPFORMAT=0) where it acts
* somewhat closer to a pre-standard implementation of IEEE 802.1Qci
* (Per-Stream Filtering and Policing), which is what the driver is going to be
* implementing.
*
* VL Lookup
* Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
* && VLAN PCP | | VLMARKER)
* && INGRESS PORT} +---------+ (both fixed)
* (exact match, | && DMAC[15:0] == VLID
* all specified in rule) | (specified in rule)
* v && INGRESS PORT }
* ------------
* 0 (PSFP) / \ 1 (ARINC664)
* +-----------/ VLLUPFORMAT \----------+
* | \ (fixed) / |
* | \ / |
* 0 (forwarding) v ------------ |
* ------------ |
* / \ 1 (QoS classification) |
* +---/ ISCRITICAL \-----------+ |
* | \ (per rule) / | |
* | \ / VLID taken from VLID taken from
* v ------------ index of rule contents of rule
* select that matched that matched
* DESTPORTS | |
* | +---------+--------+
* | |
* | v
* | VL Forwarding
* | (indexed by VLID)
* | +---------+
* | +--------------| |
* | | select TYPE +---------+
* | v
* | 0 (rate ------------ 1 (time
* | constrained) / \ triggered)
* | +------/ TYPE \------------+
* | | \ (per VLID) / |
* | v \ / v
* | VL Policing ------------ VL Policing
* | (indexed by VLID) (indexed by VLID)
* | +---------+ +---------+
* | | TYPE=0 | | TYPE=1 |
* | +---------+ +---------+
* | select SHARINDX select SHARINDX to
* | to rate-limit re-enter VL Forwarding
* | groups of VL's with new VLID for egress
* | to same quota |
* | | |
* | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
* | | |
* | v v
* | VL Forwarding VL Forwarding
* | (indexed by SHARINDX) (indexed by SHARINDX)
* | +---------+ +---------+
* | | TYPE=0 | | TYPE=1 |
* | +---------+ +---------+
* | select PRIORITY, select PRIORITY,
* | PARTITION, DESTPORTS PARTITION, DESTPORTS
* | | |
* | v v
* | VL Policing VL Policing
* | (indexed by SHARINDX) (indexed by SHARINDX)
* | +---------+ +---------+
* | | TYPE=0 | | TYPE=1 |
* | +---------+ +---------+
* | | |
* | v |
* | select BAG, -> exceed => drop |
* | JITTER v
* | | ----------------------------------------------
* | | / Reception Window is open for this VL \
* | | / (the Schedule Table executes an entry i \
* | | / M <= i < N, for which these conditions hold): \ no
* | | +----/ \-+
* | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
* | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
* | | | \ / |
* | | | \ (the VL window has opened and not yet closed)/ |
* | | | ---------------------------------------------- |
* | | v v
* | | dispatch to DESTPORTS when the Schedule Table drop
* | | executes an entry i with TXEN == 1 && VLINDEX == i
* v v
* dispatch immediately to DESTPORTS
*
* The per-port classification key is always composed of {DMAC, VID, PCP} and
* is non-maskable. This 'looks like' the NULL stream identification function
* from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
* ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
* ID and PCP, and then the port-based defaults will be used.
*
* In TTEthernet, routing is something that needs to be done manually for each
* Virtual Link. So the flow action must always include one of:
* a. 'redirect', 'trap' or 'drop': select the egress port list
* Additionally, the following actions may be applied on a Virtual Link,
* turning it into 'critical' traffic:
* b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
* given by the maximum frame length, bandwidth allocation gap (BAG) and
* maximum jitter.
* c. 'gate': turn it into a time-triggered VL, which can be only be received
* and forwarded according to a given schedule.
*/
static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
struct sja1105_vl_lookup_entry *b)
{
if (a->macaddr < b->macaddr)
return true;
if (a->macaddr > b->macaddr)
return false;
if (a->vlanid < b->vlanid)
return true;
if (a->vlanid > b->vlanid)
return false;
if (a->port < b->port)
return true;
if (a->port > b->port)
return false;
if (a->vlanprior < b->vlanprior)
return true;
if (a->vlanprior > b->vlanprior)
return false;
/* Keys are equal */
return false;
}
/* FIXME: this should change when the bridge upper of the port changes. */
static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
{
unsigned long bridge_num;
if (!dp->bridge)
return dsa_tag_8021q_standalone_vid(dp);
bridge_num = dsa_port_bridge_num_get(dp);
return dsa_tag_8021q_bridge_vid(bridge_num);
}
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
struct sja1105_vl_policing_entry *vl_policing;
struct sja1105_vl_forwarding_entry *vl_fwd;
struct sja1105_vl_lookup_entry *vl_lookup;
bool have_critical_virtual_links = false;
struct sja1105_table *table;
struct sja1105_rule *rule;
int num_virtual_links = 0;
int max_sharindx = 0;
int i, j, k;
/* Figure out the dimensioning of the problem */
list_for_each_entry(rule, &priv->flow_block.rules, list) {
if (rule->type != SJA1105_RULE_VL)
continue;
/* Each VL lookup entry matches on a single ingress port */
num_virtual_links += hweight_long(rule->port_mask);
if (rule->vl.type != SJA1105_VL_NONCRITICAL)
have_critical_virtual_links = true;
if (max_sharindx < rule->vl.sharindx)
max_sharindx = rule->vl.sharindx;
}
if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
return -ENOSPC;
}
if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
return -ENOSPC;
}
max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
/* Discard previous VL Lookup Table */
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Discard previous VL Policing Table */
table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Discard previous VL Forwarding Table */
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Discard previous VL Forwarding Parameters Table */
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
/* Nothing to do */
if (!num_virtual_links)
return 0;
/* Pre-allocate space in the static config tables */
/* VL Lookup Table */
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
table->entries = kcalloc(num_virtual_links,
table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = num_virtual_links;
vl_lookup = table->entries;
k = 0;
list_for_each_entry(rule, &priv->flow_block.rules, list) {
unsigned long port;
if (rule->type != SJA1105_RULE_VL)
continue;
for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
vl_lookup[k].port = port;
vl_lookup[k].macaddr = rule->key.vl.dmac;
if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
/* FIXME */
struct dsa_port *dp = dsa_to_port(priv->ds, port);
u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
}
/* For critical VLs, the DESTPORTS mask is taken from
* the VL Forwarding Table, so no point in putting it
* in the VL Lookup Table
*/
if (rule->vl.type == SJA1105_VL_NONCRITICAL)
vl_lookup[k].destports = rule->vl.destports;
else
vl_lookup[k].iscritical = true;
vl_lookup[k].flow_cookie = rule->cookie;
k++;
}
}
/* UM10944.pdf chapter 4.2.3 VL Lookup table:
* "the entries in the VL Lookup table must be sorted in ascending
* order (i.e. the smallest value must be loaded first) according to
* the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
*/
for (i = 0; i < num_virtual_links; i++) {
struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
for (j = i + 1; j < num_virtual_links; j++) {
struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
if (sja1105_vl_key_lower(b, a)) {
struct sja1105_vl_lookup_entry tmp = *a;
*a = *b;
*b = tmp;
}
}
}
if (!have_critical_virtual_links)
return 0;
/* VL Policing Table */
table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = max_sharindx;
vl_policing = table->entries;
/* VL Forwarding Table */
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = max_sharindx;
vl_fwd = table->entries;
/* VL Forwarding Parameters Table */
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
table->entries = kcalloc(1, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = 1;
for (i = 0; i < num_virtual_links; i++) {
unsigned long cookie = vl_lookup[i].flow_cookie;
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
if (rule->vl.type == SJA1105_VL_NONCRITICAL)
continue;
if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
int sharindx = rule->vl.sharindx;
vl_policing[i].type = 1;
vl_policing[i].sharindx = sharindx;
vl_policing[i].maxlen = rule->vl.maxlen;
vl_policing[sharindx].type = 1;
vl_fwd[i].type = 1;
vl_fwd[sharindx].type = 1;
vl_fwd[sharindx].priority = rule->vl.ipv;
vl_fwd[sharindx].partition = 0;
vl_fwd[sharindx].destports = rule->vl.destports;
}
}
sja1105_frame_memory_partitioning(priv);
return 0;
}
int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack, unsigned long cookie,
struct sja1105_key *key, unsigned long destports,
bool append)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct dsa_port *dp = dsa_to_port(priv->ds, port);
bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int rc;
if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
} else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
}
if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->cookie = cookie;
rule->type = SJA1105_RULE_VL;
rule->key = *key;
list_add(&rule->list, &priv->flow_block.rules);
}
rule->port_mask |= BIT(port);
if (append)
rule->vl.destports |= destports;
else
rule->vl.destports = destports;
rc = sja1105_init_virtual_links(priv, extack);
if (rc) {
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
list_del(&rule->list);
kfree(rule);
}
}
return rc;
}
int sja1105_vl_delete(struct sja1105_private *priv, int port,
struct sja1105_rule *rule, struct netlink_ext_ack *extack)
{
int rc;
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
list_del(&rule->list);
kfree(rule);
}
rc = sja1105_compose_gating_subschedule(priv, extack);
if (rc)
return rc;
rc = sja1105_init_virtual_links(priv, extack);
if (rc)
return rc;
rc = sja1105_init_scheduling(priv);
if (rc < 0)
return rc;
return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
int sja1105_vl_gate(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack, unsigned long cookie,
struct sja1105_key *key, u32 index, s32 prio,
u64 base_time, u64 cycle_time, u64 cycle_time_ext,
u32 num_entries, struct action_gate_entry *entries)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct dsa_port *dp = dsa_to_port(priv->ds, port);
bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int ipv = -1;
int i, rc;
s32 rem;
if (cycle_time_ext) {
NL_SET_ERR_MSG_MOD(extack,
"Cycle time extension not supported");
return -EOPNOTSUPP;
}
div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
if (rem) {
NL_SET_ERR_MSG_MOD(extack,
"Base time must be multiple of 200 ns");
return -ERANGE;
}
div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
if (rem) {
NL_SET_ERR_MSG_MOD(extack,
"Cycle time must be multiple of 200 ns");
return -ERANGE;
}
if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
} else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
}
if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
list_add(&rule->list, &priv->flow_block.rules);
rule->cookie = cookie;
rule->type = SJA1105_RULE_VL;
rule->key = *key;
rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
rule->vl.sharindx = index;
rule->vl.base_time = base_time;
rule->vl.cycle_time = cycle_time;
rule->vl.num_entries = num_entries;
rule->vl.entries = kcalloc(num_entries,
sizeof(struct action_gate_entry),
GFP_KERNEL);
if (!rule->vl.entries) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < num_entries; i++) {
div_s64_rem(entries[i].interval,
sja1105_delta_to_ns(1), &rem);
if (rem) {
NL_SET_ERR_MSG_MOD(extack,
"Interval must be multiple of 200 ns");
rc = -ERANGE;
goto out;
}
if (!entries[i].interval) {
NL_SET_ERR_MSG_MOD(extack,
"Interval cannot be zero");
rc = -ERANGE;
goto out;
}
if (ns_to_sja1105_delta(entries[i].interval) >
SJA1105_TAS_MAX_DELTA) {
NL_SET_ERR_MSG_MOD(extack,
"Maximum interval is 52 ms");
rc = -ERANGE;
goto out;
}
if (entries[i].maxoctets != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot offload IntervalOctetMax");
rc = -EOPNOTSUPP;
goto out;
}
if (ipv == -1) {
ipv = entries[i].ipv;
} else if (ipv != entries[i].ipv) {
NL_SET_ERR_MSG_MOD(extack,
"Only support a single IPV per VL");
rc = -EOPNOTSUPP;
goto out;
}
rule->vl.entries[i] = entries[i];
}
if (ipv == -1) {
if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
ipv = key->vl.pcp;
else
ipv = 0;
}
/* TODO: support per-flow MTU */
rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
rule->vl.ipv = ipv;
}
rule->port_mask |= BIT(port);
rc = sja1105_compose_gating_subschedule(priv, extack);
if (rc)
goto out;
rc = sja1105_init_virtual_links(priv, extack);
if (rc)
goto out;
if (sja1105_gating_check_conflicts(priv, -1, extack)) {
NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
rc = -ERANGE;
goto out;
}
out:
if (rc) {
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
list_del(&rule->list);
kfree(rule->vl.entries);
kfree(rule);
}
}
return rc;
}
static int sja1105_find_vlid(struct sja1105_private *priv, int port,
struct sja1105_key *key)
{
struct sja1105_vl_lookup_entry *vl_lookup;
struct sja1105_table *table;
int i;
if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
return -1;
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
vl_lookup = table->entries;
for (i = 0; i < table->entry_count; i++) {
if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
if (vl_lookup[i].port == port &&
vl_lookup[i].macaddr == key->vl.dmac &&
vl_lookup[i].vlanid == key->vl.vid &&
vl_lookup[i].vlanprior == key->vl.pcp)
return i;
} else {
if (vl_lookup[i].port == port &&
vl_lookup[i].macaddr == key->vl.dmac)
return i;
}
}
return -1;
}
int sja1105_vl_stats(struct sja1105_private *priv, int port,
struct sja1105_rule *rule, struct flow_stats *stats,
struct netlink_ext_ack *extack)
{
const struct sja1105_regs *regs = priv->info->regs;
u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
u64 unreleased;
u64 timingerr;
u64 lengtherr;
int vlid, rc;
u64 pkts;
if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
return 0;
vlid = sja1105_find_vlid(priv, port, &rule->key);
if (vlid < 0)
return 0;
rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
SJA1105_SIZE_VL_STATUS);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
return rc;
}
sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
pkts = timingerr + unreleased + lengtherr;
flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
jiffies - rule->vl.stats.lastused,
FLOW_ACTION_HW_STATS_IMMEDIATE);
rule->vl.stats.pkts = pkts;
rule->vl.stats.lastused = jiffies;
return 0;
}
| linux-master | drivers/net/dsa/sja1105/sja1105_vl.c |
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include "sja1105_static_config.h"
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
/* Convenience wrappers over the generic packing functions. These take into
* account the SJA1105 memory layout quirks and provide some level of
* programmer protection against incorrect API use. The errors are not expected
* to occur durring runtime, therefore printing and swallowing them here is
* appropriate instead of clutterring up higher-level code.
*/
void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
{
int rc = packing(buf, (u64 *)val, start, end, len,
PACK, QUIRK_LSW32_IS_FIRST);
if (likely(!rc))
return;
if (rc == -EINVAL) {
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
if ((start - end + 1) > 64)
pr_err("Field %d-%d too large for 64 bits!\n",
start, end);
else
pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
*val, start, end);
}
dump_stack();
}
void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
{
int rc = packing((void *)buf, val, start, end, len,
UNPACK, QUIRK_LSW32_IS_FIRST);
if (likely(!rc))
return;
if (rc == -EINVAL)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
else if (rc == -ERANGE)
pr_err("Field %d-%d too large for 64 bits!\n",
start, end);
dump_stack();
}
void sja1105_packing(void *buf, u64 *val, int start, int end,
size_t len, enum packing_op op)
{
int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST);
if (likely(!rc))
return;
if (rc == -EINVAL) {
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
if ((start - end + 1) > 64)
pr_err("Field %d-%d too large for 64 bits!\n",
start, end);
else
pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
*val, start, end);
}
dump_stack();
}
/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
u32 sja1105_crc32(const void *buf, size_t len)
{
unsigned int i;
u64 word;
u32 crc;
/* seed */
crc = ~0;
for (i = 0; i < len; i += 4) {
sja1105_unpack(buf + i, &word, 31, 0, 4);
crc = crc32_le(crc, (u8 *)&word, 4);
}
return ~crc;
}
static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY;
struct sja1105_avb_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->destmeta, 95, 48, size, op);
sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op);
return size;
}
size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
struct sja1105_avb_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->cas_master, 126, 126, size, op);
sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
return size;
}
static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY;
struct sja1105_general_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op);
sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op);
sja1105_packing(buf, &entry->switchid, 317, 315, size, op);
sja1105_packing(buf, &entry->hostprio, 314, 312, size, op);
sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op);
sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op);
sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op);
sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op);
sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op);
sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op);
sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op);
sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op);
sja1105_packing(buf, &entry->casc_port, 115, 113, size, op);
sja1105_packing(buf, &entry->host_port, 112, 110, size, op);
sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op);
sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op);
sja1105_packing(buf, &entry->vlmask, 74, 43, size, op);
sja1105_packing(buf, &entry->tpid, 42, 27, size, op);
sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op);
sja1105_packing(buf, &entry->tpid2, 25, 10, size, op);
return size;
}
/* TPID and TPID2 are intentionally reversed so that semantic
* compatibility with E/T is kept.
*/
size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
struct sja1105_general_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op);
sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op);
sja1105_packing(buf, &entry->switchid, 349, 347, size, op);
sja1105_packing(buf, &entry->hostprio, 346, 344, size, op);
sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op);
sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op);
sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op);
sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op);
sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op);
sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op);
sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op);
sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op);
sja1105_packing(buf, &entry->casc_port, 147, 145, size, op);
sja1105_packing(buf, &entry->host_port, 144, 142, size, op);
sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op);
sja1105_packing(buf, &entry->replay_port, 24, 22, size, op);
return size;
}
size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_general_params_entry *entry = entry_ptr;
const size_t size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
sja1105_packing(buf, &entry->vllupformat, 447, 447, size, op);
sja1105_packing(buf, &entry->mirr_ptacu, 446, 446, size, op);
sja1105_packing(buf, &entry->switchid, 445, 442, size, op);
sja1105_packing(buf, &entry->hostprio, 441, 439, size, op);
sja1105_packing(buf, &entry->mac_fltres1, 438, 391, size, op);
sja1105_packing(buf, &entry->mac_fltres0, 390, 343, size, op);
sja1105_packing(buf, &entry->mac_flt1, 342, 295, size, op);
sja1105_packing(buf, &entry->mac_flt0, 294, 247, size, op);
sja1105_packing(buf, &entry->incl_srcpt1, 246, 246, size, op);
sja1105_packing(buf, &entry->incl_srcpt0, 245, 245, size, op);
sja1105_packing(buf, &entry->send_meta1, 244, 244, size, op);
sja1105_packing(buf, &entry->send_meta0, 243, 243, size, op);
sja1105_packing(buf, &entry->casc_port, 242, 232, size, op);
sja1105_packing(buf, &entry->host_port, 231, 228, size, op);
sja1105_packing(buf, &entry->mirr_port, 227, 224, size, op);
sja1105_packing(buf, &entry->vlmarker, 223, 192, size, op);
sja1105_packing(buf, &entry->vlmask, 191, 160, size, op);
sja1105_packing(buf, &entry->tpid2, 159, 144, size, op);
sja1105_packing(buf, &entry->ignore2stf, 143, 143, size, op);
sja1105_packing(buf, &entry->tpid, 142, 127, size, op);
sja1105_packing(buf, &entry->queue_ts, 126, 126, size, op);
sja1105_packing(buf, &entry->egrmirrvid, 125, 114, size, op);
sja1105_packing(buf, &entry->egrmirrpcp, 113, 111, size, op);
sja1105_packing(buf, &entry->egrmirrdei, 110, 110, size, op);
sja1105_packing(buf, &entry->replay_port, 109, 106, size, op);
sja1105_packing(buf, &entry->tdmaconfigidx, 70, 67, size, op);
sja1105_packing(buf, &entry->header_type, 64, 49, size, op);
sja1105_packing(buf, &entry->tte_en, 16, 16, size, op);
return size;
}
static size_t
sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
int offset, i;
sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
for (i = 0, offset = 13; i < 8; i++, offset += 10)
sja1105_packing(buf, &entry->part_spc[i],
offset + 9, offset + 0, size, op);
return size;
}
size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
int offset, i;
sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
for (i = 0, offset = 5; i < 8; i++, offset += 11)
sja1105_packing(buf, &entry->part_spc[i],
offset + 10, offset + 0, size, op);
return size;
}
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
struct sja1105_l2_forwarding_entry *entry = entry_ptr;
int offset, i;
sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op);
sja1105_packing(buf, &entry->reach_port, 58, 54, size, op);
sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op);
for (i = 0, offset = 25; i < 8; i++, offset += 3)
sja1105_packing(buf, &entry->vlan_pmap[i],
offset + 2, offset + 0, size, op);
return size;
}
size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_forwarding_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
int offset, i;
if (entry->type_egrpcp2outputq) {
for (i = 0, offset = 31; i < SJA1110_NUM_PORTS;
i++, offset += 3) {
sja1105_packing(buf, &entry->vlan_pmap[i],
offset + 2, offset + 0, size, op);
}
} else {
sja1105_packing(buf, &entry->bc_domain, 63, 53, size, op);
sja1105_packing(buf, &entry->reach_port, 52, 42, size, op);
sja1105_packing(buf, &entry->fl_domain, 41, 31, size, op);
}
return size;
}
static size_t
sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY;
struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->maxage, 31, 17, size, op);
sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op);
sja1105_packing(buf, &entry->poly, 13, 6, size, op);
sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op);
sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op);
sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op);
return size;
}
size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 58; i < 5; i++, offset += 11)
sja1105_packing(buf, &entry->maxaddrp[i],
offset + 10, offset + 0, size, op);
sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
sja1105_packing(buf, &entry->start_dynspc, 42, 33, size, op);
sja1105_packing(buf, &entry->drpnolearn, 32, 28, size, op);
sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
sja1105_packing(buf, &entry->use_static, 24, 24, size, op);
sja1105_packing(buf, &entry->owr_dyn, 23, 23, size, op);
sja1105_packing(buf, &entry->learn_once, 22, 22, size, op);
return size;
}
size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
const size_t size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
int offset, i;
for (i = 0, offset = 70; i < SJA1110_NUM_PORTS; i++, offset += 11)
sja1105_packing(buf, &entry->maxaddrp[i],
offset + 10, offset + 0, size, op);
sja1105_packing(buf, &entry->maxage, 69, 55, size, op);
sja1105_packing(buf, &entry->start_dynspc, 54, 45, size, op);
sja1105_packing(buf, &entry->drpnolearn, 44, 34, size, op);
sja1105_packing(buf, &entry->shared_learn, 33, 33, size, op);
sja1105_packing(buf, &entry->no_enf_hostprt, 32, 32, size, op);
sja1105_packing(buf, &entry->no_mgmt_learn, 31, 31, size, op);
sja1105_packing(buf, &entry->use_static, 30, 30, size, op);
sja1105_packing(buf, &entry->owr_dyn, 29, 29, size, op);
sja1105_packing(buf, &entry->learn_once, 28, 28, size, op);
return size;
}
size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
struct sja1105_l2_lookup_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->vlanid, 95, 84, size, op);
sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
sja1105_packing(buf, &entry->destports, 35, 31, size, op);
sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
sja1105_packing(buf, &entry->index, 29, 20, size, op);
return size;
}
size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
struct sja1105_l2_lookup_entry *entry = entry_ptr;
if (entry->lockeds) {
sja1105_packing(buf, &entry->tsreg, 159, 159, size, op);
sja1105_packing(buf, &entry->mirrvlan, 158, 147, size, op);
sja1105_packing(buf, &entry->takets, 146, 146, size, op);
sja1105_packing(buf, &entry->mirr, 145, 145, size, op);
sja1105_packing(buf, &entry->retag, 144, 144, size, op);
} else {
sja1105_packing(buf, &entry->touched, 159, 159, size, op);
sja1105_packing(buf, &entry->age, 158, 144, size, op);
}
sja1105_packing(buf, &entry->mask_iotag, 143, 143, size, op);
sja1105_packing(buf, &entry->mask_vlanid, 142, 131, size, op);
sja1105_packing(buf, &entry->mask_macaddr, 130, 83, size, op);
sja1105_packing(buf, &entry->iotag, 82, 82, size, op);
sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
sja1105_packing(buf, &entry->destports, 21, 17, size, op);
sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
sja1105_packing(buf, &entry->index, 15, 6, size, op);
return size;
}
size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
struct sja1105_l2_lookup_entry *entry = entry_ptr;
if (entry->lockeds) {
sja1105_packing(buf, &entry->trap, 168, 168, size, op);
sja1105_packing(buf, &entry->mirrvlan, 167, 156, size, op);
sja1105_packing(buf, &entry->takets, 155, 155, size, op);
sja1105_packing(buf, &entry->mirr, 154, 154, size, op);
sja1105_packing(buf, &entry->retag, 153, 153, size, op);
} else {
sja1105_packing(buf, &entry->touched, 168, 168, size, op);
sja1105_packing(buf, &entry->age, 167, 153, size, op);
}
sja1105_packing(buf, &entry->mask_iotag, 152, 152, size, op);
sja1105_packing(buf, &entry->mask_vlanid, 151, 140, size, op);
sja1105_packing(buf, &entry->mask_macaddr, 139, 92, size, op);
sja1105_packing(buf, &entry->mask_srcport, 91, 88, size, op);
sja1105_packing(buf, &entry->iotag, 87, 87, size, op);
sja1105_packing(buf, &entry->vlanid, 86, 75, size, op);
sja1105_packing(buf, &entry->macaddr, 74, 27, size, op);
sja1105_packing(buf, &entry->srcport, 26, 23, size, op);
sja1105_packing(buf, &entry->destports, 22, 12, size, op);
sja1105_packing(buf, &entry->enfport, 11, 11, size, op);
sja1105_packing(buf, &entry->index, 10, 1, size, op);
return size;
}
static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
struct sja1105_l2_policing_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->sharindx, 63, 58, size, op);
sja1105_packing(buf, &entry->smax, 57, 42, size, op);
sja1105_packing(buf, &entry->rate, 41, 26, size, op);
sja1105_packing(buf, &entry->maxlen, 25, 15, size, op);
sja1105_packing(buf, &entry->partition, 14, 12, size, op);
return size;
}
size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_policing_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
sja1105_packing(buf, &entry->sharindx, 63, 57, size, op);
sja1105_packing(buf, &entry->smax, 56, 39, size, op);
sja1105_packing(buf, &entry->rate, 38, 21, size, op);
sja1105_packing(buf, &entry->maxlen, 20, 10, size, op);
sja1105_packing(buf, &entry->partition, 9, 7, size, op);
return size;
}
static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY;
struct sja1105_mac_config_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 72; i < 8; i++, offset += 19) {
sja1105_packing(buf, &entry->enabled[i],
offset + 0, offset + 0, size, op);
sja1105_packing(buf, &entry->base[i],
offset + 9, offset + 1, size, op);
sja1105_packing(buf, &entry->top[i],
offset + 18, offset + 10, size, op);
}
sja1105_packing(buf, &entry->ifg, 71, 67, size, op);
sja1105_packing(buf, &entry->speed, 66, 65, size, op);
sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op);
sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op);
sja1105_packing(buf, &entry->maxage, 32, 25, size, op);
sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op);
sja1105_packing(buf, &entry->vlanid, 21, 10, size, op);
sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op);
sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op);
sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op);
sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op);
sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op);
sja1105_packing(buf, &entry->retag, 4, 4, size, op);
sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op);
sja1105_packing(buf, &entry->egress, 2, 2, size, op);
sja1105_packing(buf, &entry->ingress, 1, 1, size, op);
return size;
}
size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
struct sja1105_mac_config_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 104; i < 8; i++, offset += 19) {
sja1105_packing(buf, &entry->enabled[i],
offset + 0, offset + 0, size, op);
sja1105_packing(buf, &entry->base[i],
offset + 9, offset + 1, size, op);
sja1105_packing(buf, &entry->top[i],
offset + 18, offset + 10, size, op);
}
sja1105_packing(buf, &entry->ifg, 103, 99, size, op);
sja1105_packing(buf, &entry->speed, 98, 97, size, op);
sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op);
sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op);
sja1105_packing(buf, &entry->maxage, 64, 57, size, op);
sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op);
sja1105_packing(buf, &entry->vlanid, 53, 42, size, op);
sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op);
sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op);
sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op);
sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op);
sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op);
sja1105_packing(buf, &entry->retag, 34, 34, size, op);
sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op);
sja1105_packing(buf, &entry->egress, 32, 32, size, op);
sja1105_packing(buf, &entry->ingress, 31, 31, size, op);
return size;
}
size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
struct sja1105_mac_config_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 104; i < 8; i++, offset += 19) {
sja1105_packing(buf, &entry->enabled[i],
offset + 0, offset + 0, size, op);
sja1105_packing(buf, &entry->base[i],
offset + 9, offset + 1, size, op);
sja1105_packing(buf, &entry->top[i],
offset + 18, offset + 10, size, op);
}
sja1105_packing(buf, &entry->speed, 98, 96, size, op);
sja1105_packing(buf, &entry->tp_delin, 95, 80, size, op);
sja1105_packing(buf, &entry->tp_delout, 79, 64, size, op);
sja1105_packing(buf, &entry->maxage, 63, 56, size, op);
sja1105_packing(buf, &entry->vlanprio, 55, 53, size, op);
sja1105_packing(buf, &entry->vlanid, 52, 41, size, op);
sja1105_packing(buf, &entry->ing_mirr, 40, 40, size, op);
sja1105_packing(buf, &entry->egr_mirr, 39, 39, size, op);
sja1105_packing(buf, &entry->drpnona664, 38, 38, size, op);
sja1105_packing(buf, &entry->drpdtag, 37, 37, size, op);
sja1105_packing(buf, &entry->drpuntag, 34, 34, size, op);
sja1105_packing(buf, &entry->retag, 33, 33, size, op);
sja1105_packing(buf, &entry->dyn_learn, 32, 32, size, op);
sja1105_packing(buf, &entry->egress, 31, 31, size, op);
sja1105_packing(buf, &entry->ingress, 30, 30, size, op);
sja1105_packing(buf, &entry->ifg, 10, 5, size, op);
return size;
}
static size_t
sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_schedule_entry_points_params_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY;
sja1105_packing(buf, &entry->clksrc, 31, 30, size, op);
sja1105_packing(buf, &entry->actsubsch, 29, 27, size, op);
return size;
}
static size_t
sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
sja1105_packing(buf, &entry->subschindx, 31, 29, size, op);
sja1105_packing(buf, &entry->delta, 28, 11, size, op);
sja1105_packing(buf, &entry->address, 10, 1, size, op);
return size;
}
static size_t
sja1110_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
sja1105_packing(buf, &entry->subschindx, 63, 61, size, op);
sja1105_packing(buf, &entry->delta, 60, 43, size, op);
sja1105_packing(buf, &entry->address, 42, 31, size, op);
return size;
}
static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
struct sja1105_schedule_params_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 16; i < 8; i++, offset += 10)
sja1105_packing(buf, &entry->subscheind[i],
offset + 9, offset + 0, size, op);
return size;
}
static size_t sja1110_schedule_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_schedule_params_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
int offset, i;
for (i = 0, offset = 0; i < 8; i++, offset += 12)
sja1105_packing(buf, &entry->subscheind[i],
offset + 11, offset + 0, size, op);
return size;
}
static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY;
struct sja1105_schedule_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->winstindex, 63, 54, size, op);
sja1105_packing(buf, &entry->winend, 53, 53, size, op);
sja1105_packing(buf, &entry->winst, 52, 52, size, op);
sja1105_packing(buf, &entry->destports, 51, 47, size, op);
sja1105_packing(buf, &entry->setvalid, 46, 46, size, op);
sja1105_packing(buf, &entry->txen, 45, 45, size, op);
sja1105_packing(buf, &entry->resmedia_en, 44, 44, size, op);
sja1105_packing(buf, &entry->resmedia, 43, 36, size, op);
sja1105_packing(buf, &entry->vlindex, 35, 26, size, op);
sja1105_packing(buf, &entry->delta, 25, 8, size, op);
return size;
}
static size_t sja1110_schedule_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY;
struct sja1105_schedule_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->winstindex, 95, 84, size, op);
sja1105_packing(buf, &entry->winend, 83, 83, size, op);
sja1105_packing(buf, &entry->winst, 82, 82, size, op);
sja1105_packing(buf, &entry->destports, 81, 71, size, op);
sja1105_packing(buf, &entry->setvalid, 70, 70, size, op);
sja1105_packing(buf, &entry->txen, 69, 69, size, op);
sja1105_packing(buf, &entry->resmedia_en, 68, 68, size, op);
sja1105_packing(buf, &entry->resmedia, 67, 60, size, op);
sja1105_packing(buf, &entry->vlindex, 59, 48, size, op);
sja1105_packing(buf, &entry->delta, 47, 30, size, op);
return size;
}
static size_t
sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
int offset, i;
for (i = 0, offset = 16; i < 8; i++, offset += 10)
sja1105_packing(buf, &entry->partspc[i],
offset + 9, offset + 0, size, op);
sja1105_packing(buf, &entry->debugen, 15, 15, size, op);
return size;
}
static size_t
sja1110_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
int offset, i;
for (i = 0, offset = 8; i < 8; i++, offset += 11)
sja1105_packing(buf, &entry->partspc[i],
offset + 10, offset + 0, size, op);
sja1105_packing(buf, &entry->debugen, 7, 7, size, op);
return size;
}
static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_forwarding_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
sja1105_packing(buf, &entry->type, 31, 31, size, op);
sja1105_packing(buf, &entry->priority, 30, 28, size, op);
sja1105_packing(buf, &entry->partition, 27, 25, size, op);
sja1105_packing(buf, &entry->destports, 24, 20, size, op);
return size;
}
static size_t sja1110_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_forwarding_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
sja1105_packing(buf, &entry->type, 31, 31, size, op);
sja1105_packing(buf, &entry->priority, 30, 28, size, op);
sja1105_packing(buf, &entry->partition, 27, 25, size, op);
sja1105_packing(buf, &entry->destports, 24, 14, size, op);
return size;
}
size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_lookup_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
if (entry->format == SJA1105_VL_FORMAT_PSFP) {
/* Interpreting vllupformat as 0 */
sja1105_packing(buf, &entry->destports,
95, 91, size, op);
sja1105_packing(buf, &entry->iscritical,
90, 90, size, op);
sja1105_packing(buf, &entry->macaddr,
89, 42, size, op);
sja1105_packing(buf, &entry->vlanid,
41, 30, size, op);
sja1105_packing(buf, &entry->port,
29, 27, size, op);
sja1105_packing(buf, &entry->vlanprior,
26, 24, size, op);
} else {
/* Interpreting vllupformat as 1 */
sja1105_packing(buf, &entry->egrmirr,
95, 91, size, op);
sja1105_packing(buf, &entry->ingrmirr,
90, 90, size, op);
sja1105_packing(buf, &entry->vlid,
57, 42, size, op);
sja1105_packing(buf, &entry->port,
29, 27, size, op);
}
return size;
}
size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_lookup_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
if (entry->format == SJA1105_VL_FORMAT_PSFP) {
/* Interpreting vllupformat as 0 */
sja1105_packing(buf, &entry->destports,
94, 84, size, op);
sja1105_packing(buf, &entry->iscritical,
83, 83, size, op);
sja1105_packing(buf, &entry->macaddr,
82, 35, size, op);
sja1105_packing(buf, &entry->vlanid,
34, 23, size, op);
sja1105_packing(buf, &entry->port,
22, 19, size, op);
sja1105_packing(buf, &entry->vlanprior,
18, 16, size, op);
} else {
/* Interpreting vllupformat as 1 */
sja1105_packing(buf, &entry->egrmirr,
94, 84, size, op);
sja1105_packing(buf, &entry->ingrmirr,
83, 83, size, op);
sja1105_packing(buf, &entry->vlid,
50, 35, size, op);
sja1105_packing(buf, &entry->port,
22, 19, size, op);
}
return size;
}
static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_policing_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
sja1105_packing(buf, &entry->type, 63, 63, size, op);
sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
sja1105_packing(buf, &entry->sharindx, 51, 42, size, op);
if (entry->type == 0) {
sja1105_packing(buf, &entry->bag, 41, 28, size, op);
sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
}
return size;
}
size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_policing_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
sja1105_packing(buf, &entry->type, 63, 63, size, op);
sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
sja1105_packing(buf, &entry->sharindx, 51, 40, size, op);
if (entry->type == 0) {
sja1105_packing(buf, &entry->bag, 41, 28, size, op);
sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
}
return size;
}
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY;
struct sja1105_vlan_lookup_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op);
sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op);
sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op);
sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op);
sja1105_packing(buf, &entry->tag_port, 43, 39, size, op);
sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
return size;
}
size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vlan_lookup_entry *entry = entry_ptr;
const size_t size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
sja1105_packing(buf, &entry->ving_mirr, 95, 85, size, op);
sja1105_packing(buf, &entry->vegr_mirr, 84, 74, size, op);
sja1105_packing(buf, &entry->vmemb_port, 73, 63, size, op);
sja1105_packing(buf, &entry->vlan_bc, 62, 52, size, op);
sja1105_packing(buf, &entry->tag_port, 51, 41, size, op);
sja1105_packing(buf, &entry->type_entry, 40, 39, size, op);
sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
return size;
}
static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY;
struct sja1105_xmii_params_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 17; i < 5; i++, offset += 3) {
sja1105_packing(buf, &entry->xmii_mode[i],
offset + 1, offset + 0, size, op);
sja1105_packing(buf, &entry->phy_mac[i],
offset + 2, offset + 2, size, op);
}
return size;
}
size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1110_SIZE_XMII_PARAMS_ENTRY;
struct sja1105_xmii_params_entry *entry = entry_ptr;
int offset, i;
for (i = 0, offset = 20; i < SJA1110_NUM_PORTS; i++, offset += 4) {
sja1105_packing(buf, &entry->xmii_mode[i],
offset + 1, offset + 0, size, op);
sja1105_packing(buf, &entry->phy_mac[i],
offset + 2, offset + 2, size, op);
sja1105_packing(buf, &entry->special[i],
offset + 3, offset + 3, size, op);
}
return size;
}
size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_retagging_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
sja1105_packing(buf, &entry->egr_port, 63, 59, size, op);
sja1105_packing(buf, &entry->ing_port, 58, 54, size, op);
sja1105_packing(buf, &entry->vlan_ing, 53, 42, size, op);
sja1105_packing(buf, &entry->vlan_egr, 41, 30, size, op);
sja1105_packing(buf, &entry->do_not_learn, 29, 29, size, op);
sja1105_packing(buf, &entry->use_dest_ports, 28, 28, size, op);
sja1105_packing(buf, &entry->destports, 27, 23, size, op);
return size;
}
size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_retagging_entry *entry = entry_ptr;
const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
sja1105_packing(buf, &entry->egr_port, 63, 53, size, op);
sja1105_packing(buf, &entry->ing_port, 52, 42, size, op);
sja1105_packing(buf, &entry->vlan_ing, 41, 30, size, op);
sja1105_packing(buf, &entry->vlan_egr, 29, 18, size, op);
sja1105_packing(buf, &entry->do_not_learn, 17, 17, size, op);
sja1105_packing(buf, &entry->use_dest_ports, 16, 16, size, op);
sja1105_packing(buf, &entry->destports, 15, 5, size, op);
return size;
}
static size_t sja1110_pcp_remapping_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1110_pcp_remapping_entry *entry = entry_ptr;
const size_t size = SJA1110_SIZE_PCP_REMAPPING_ENTRY;
int offset, i;
for (i = 0, offset = 8; i < SJA1105_NUM_TC; i++, offset += 3)
sja1105_packing(buf, &entry->egrpcp[i],
offset + 2, offset + 0, size, op);
return size;
}
size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105_SIZE_TABLE_HEADER;
struct sja1105_table_header *entry = entry_ptr;
sja1105_packing(buf, &entry->block_id, 31, 24, size, op);
sja1105_packing(buf, &entry->len, 55, 32, size, op);
sja1105_packing(buf, &entry->crc, 95, 64, size, op);
return size;
}
/* WARNING: the *hdr pointer is really non-const, because it is
* modifying the CRC of the header for a 2-stage packing operation
*/
void
sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr)
{
/* First pack the table as-is, then calculate the CRC, and
* finally put the proper CRC into the packed buffer
*/
memset(buf, 0, SJA1105_SIZE_TABLE_HEADER);
sja1105_table_header_packing(buf, hdr, PACK);
hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4);
sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4);
}
static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
{
u64 computed_crc;
int len_bytes;
len_bytes = (uintptr_t)(crc_ptr - table_start);
computed_crc = sja1105_crc32(table_start, len_bytes);
sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4);
}
/* The block IDs that the switches support are unfortunately sparse, so keep a
* mapping table to "block indices" and translate back and forth so that we
* don't waste useless memory in struct sja1105_static_config.
* Also, since the block id comes from essentially untrusted input (unpacking
* the static config from userspace) it has to be sanitized (range-checked)
* before blindly indexing kernel memory with the blk_idx.
*/
static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
[BLK_IDX_VL_LOOKUP] = BLKID_VL_LOOKUP,
[BLK_IDX_VL_POLICING] = BLKID_VL_POLICING,
[BLK_IDX_VL_FORWARDING] = BLKID_VL_FORWARDING,
[BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
[BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
[BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
[BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
[BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
[BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
[BLK_IDX_VL_FORWARDING_PARAMS] = BLKID_VL_FORWARDING_PARAMS,
[BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
[BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
[BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
[BLK_IDX_RETAGGING] = BLKID_RETAGGING,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
[BLK_IDX_PCP_REMAPPING] = BLKID_PCP_REMAPPING,
};
const char *sja1105_static_config_error_msg[] = {
[SJA1105_CONFIG_OK] = "",
[SJA1105_TTETHERNET_NOT_SUPPORTED] =
"schedule-table present, but TTEthernet is "
"only supported on T and Q/S",
[SJA1105_INCORRECT_TTETHERNET_CONFIGURATION] =
"schedule-table present, but one of "
"schedule-entry-points-table, schedule-parameters-table or "
"schedule-entry-points-parameters table is empty",
[SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION] =
"vl-lookup-table present, but one of vl-policing-table, "
"vl-forwarding-table or vl-forwarding-parameters-table is empty",
[SJA1105_MISSING_L2_POLICING_TABLE] =
"l2-policing-table needs to have at least one entry",
[SJA1105_MISSING_L2_FORWARDING_TABLE] =
"l2-forwarding-table is either missing or incomplete",
[SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] =
"l2-forwarding-parameters-table is missing",
[SJA1105_MISSING_GENERAL_PARAMS_TABLE] =
"general-parameters-table is missing",
[SJA1105_MISSING_VLAN_TABLE] =
"vlan-lookup-table needs to have at least the default untagged VLAN",
[SJA1105_MISSING_XMII_TABLE] =
"xmii-table is missing",
[SJA1105_MISSING_MAC_TABLE] =
"mac-configuration-table needs to contain an entry for each port",
[SJA1105_OVERCOMMITTED_FRAME_MEMORY] =
"Not allowed to overcommit frame memory. L2 memory partitions "
"and VL memory partitions share the same space. The sum of all "
"16 memory partitions is not allowed to be larger than 929 "
"128-byte blocks (or 910 with retagging). Please adjust "
"l2-forwarding-parameters-table.part_spc and/or "
"vl-forwarding-parameters-table.partspc.",
};
static sja1105_config_valid_t
static_config_check_memory_size(const struct sja1105_table *tables, int max_mem)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
int i, mem = 0;
l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
for (i = 0; i < 8; i++)
mem += l2_fwd_params->part_spc[i];
if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count) {
vl_fwd_params = tables[BLK_IDX_VL_FORWARDING_PARAMS].entries;
for (i = 0; i < 8; i++)
mem += vl_fwd_params->partspc[i];
}
if (tables[BLK_IDX_RETAGGING].entry_count)
max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
if (mem > max_mem)
return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
return SJA1105_CONFIG_OK;
}
sja1105_config_valid_t
sja1105_static_config_check_valid(const struct sja1105_static_config *config,
int max_mem)
{
const struct sja1105_table *tables = config->tables;
#define IS_FULL(blk_idx) \
(tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
if (tables[BLK_IDX_SCHEDULE].entry_count) {
if (!tables[BLK_IDX_SCHEDULE].ops->max_entry_count)
return SJA1105_TTETHERNET_NOT_SUPPORTED;
if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
if (!IS_FULL(BLK_IDX_SCHEDULE_PARAMS))
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
}
if (tables[BLK_IDX_VL_LOOKUP].entry_count) {
struct sja1105_vl_lookup_entry *vl_lookup;
bool has_critical_links = false;
int i;
vl_lookup = tables[BLK_IDX_VL_LOOKUP].entries;
for (i = 0; i < tables[BLK_IDX_VL_LOOKUP].entry_count; i++) {
if (vl_lookup[i].iscritical) {
has_critical_links = true;
break;
}
}
if (tables[BLK_IDX_VL_POLICING].entry_count == 0 &&
has_critical_links)
return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
if (tables[BLK_IDX_VL_FORWARDING].entry_count == 0 &&
has_critical_links)
return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count == 0 &&
has_critical_links)
return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
}
if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
return SJA1105_MISSING_L2_POLICING_TABLE;
if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0)
return SJA1105_MISSING_VLAN_TABLE;
if (!IS_FULL(BLK_IDX_L2_FORWARDING))
return SJA1105_MISSING_L2_FORWARDING_TABLE;
if (!IS_FULL(BLK_IDX_MAC_CONFIG))
return SJA1105_MISSING_MAC_TABLE;
if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS))
return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE;
if (!IS_FULL(BLK_IDX_GENERAL_PARAMS))
return SJA1105_MISSING_GENERAL_PARAMS_TABLE;
if (!IS_FULL(BLK_IDX_XMII_PARAMS))
return SJA1105_MISSING_XMII_TABLE;
return static_config_check_memory_size(tables, max_mem);
#undef IS_FULL
}
void
sja1105_static_config_pack(void *buf, struct sja1105_static_config *config)
{
struct sja1105_table_header header = {0};
enum sja1105_blk_idx i;
char *p = buf;
int j;
sja1105_pack(p, &config->device_id, 31, 0, 4);
p += SJA1105_SIZE_DEVICE_ID;
for (i = 0; i < BLK_IDX_MAX; i++) {
const struct sja1105_table *table;
char *table_start;
table = &config->tables[i];
if (!table->entry_count)
continue;
header.block_id = blk_id_map[i];
header.len = table->entry_count *
table->ops->packed_entry_size / 4;
sja1105_table_header_pack_with_crc(p, &header);
p += SJA1105_SIZE_TABLE_HEADER;
table_start = p;
for (j = 0; j < table->entry_count; j++) {
u8 *entry_ptr = table->entries;
entry_ptr += j * table->ops->unpacked_entry_size;
memset(p, 0, table->ops->packed_entry_size);
table->ops->packing(p, entry_ptr, PACK);
p += table->ops->packed_entry_size;
}
sja1105_table_write_crc(table_start, p);
p += 4;
}
/* Final header:
* Block ID does not matter
* Length of 0 marks that header is final
* CRC will be replaced on-the-fly on "config upload"
*/
header.block_id = 0;
header.len = 0;
header.crc = 0xDEADBEEF;
memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
sja1105_table_header_packing(p, &header, PACK);
}
size_t
sja1105_static_config_get_length(const struct sja1105_static_config *config)
{
unsigned int sum;
unsigned int header_count;
enum sja1105_blk_idx i;
/* Ending header */
header_count = 1;
sum = SJA1105_SIZE_DEVICE_ID;
/* Tables (headers and entries) */
for (i = 0; i < BLK_IDX_MAX; i++) {
const struct sja1105_table *table;
table = &config->tables[i];
if (table->entry_count)
header_count++;
sum += table->ops->packed_entry_size * table->entry_count;
}
/* Headers have an additional CRC at the end */
sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4);
/* Last header does not have an extra CRC because there is no data */
sum -= 4;
return sum;
}
/* Compatibility matrices */
/* SJA1105E: First generation, no TTEthernet */
const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105et_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1105_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1105_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1105_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1105et_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105et_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1105_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105et_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105et_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1105_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
};
/* SJA1105T: First generation, TTEthernet */
const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {
.packing = sja1105_schedule_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
.packing = sja1105_schedule_entry_points_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
[BLK_IDX_VL_LOOKUP] = {
.packing = sja1105_vl_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
},
[BLK_IDX_VL_POLICING] = {
.packing = sja1105_vl_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
.packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
},
[BLK_IDX_VL_FORWARDING] = {
.packing = sja1105_vl_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105et_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1105_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1105_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1105_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1105et_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_SCHEDULE_PARAMS] = {
.packing = sja1105_schedule_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
.packing = sja1105_schedule_entry_points_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
[BLK_IDX_VL_FORWARDING_PARAMS] = {
.packing = sja1105_vl_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105et_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1105_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105et_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105et_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1105_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
};
/* SJA1105P: Second generation, no TTEthernet, no SGMII */
const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1105_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1105_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1105_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1105pqrs_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1105_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1105_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
};
/* SJA1105Q: Second generation, TTEthernet, no SGMII */
const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {
.packing = sja1105_schedule_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
.packing = sja1105_schedule_entry_points_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
[BLK_IDX_VL_LOOKUP] = {
.packing = sja1105_vl_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
},
[BLK_IDX_VL_POLICING] = {
.packing = sja1105_vl_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
.packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
},
[BLK_IDX_VL_FORWARDING] = {
.packing = sja1105_vl_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1105_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1105_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1105_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1105pqrs_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_SCHEDULE_PARAMS] = {
.packing = sja1105_schedule_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
.packing = sja1105_schedule_entry_points_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
[BLK_IDX_VL_FORWARDING_PARAMS] = {
.packing = sja1105_vl_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1105_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1105_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
};
/* SJA1105R: Second generation, no TTEthernet, SGMII */
const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1105_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1105_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1105_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1105pqrs_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1105_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1105_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
};
/* SJA1105S: Second generation, TTEthernet, SGMII */
const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {
.packing = sja1105_schedule_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
.packing = sja1105_schedule_entry_points_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
[BLK_IDX_VL_LOOKUP] = {
.packing = sja1105_vl_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
},
[BLK_IDX_VL_POLICING] = {
.packing = sja1105_vl_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
.packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
},
[BLK_IDX_VL_FORWARDING] = {
.packing = sja1105_vl_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1105pqrs_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1105_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1105_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1105_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1105pqrs_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_SCHEDULE_PARAMS] = {
.packing = sja1105_schedule_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
.packing = sja1105_schedule_entry_points_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
[BLK_IDX_VL_FORWARDING_PARAMS] = {
.packing = sja1105_vl_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1105pqrs_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1105_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1105_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1105_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
};
/* SJA1110A: Third generation */
const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX] = {
[BLK_IDX_SCHEDULE] = {
.packing = sja1110_schedule_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
.packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY,
.max_entry_count = SJA1110_MAX_SCHEDULE_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
.packing = sja1110_schedule_entry_points_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
.packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
},
[BLK_IDX_VL_LOOKUP] = {
.packing = sja1110_vl_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
.packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
.max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
},
[BLK_IDX_VL_POLICING] = {
.packing = sja1110_vl_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
.packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
.max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
},
[BLK_IDX_VL_FORWARDING] = {
.packing = sja1110_vl_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
.max_entry_count = SJA1110_MAX_VL_FORWARDING_COUNT,
},
[BLK_IDX_L2_LOOKUP] = {
.packing = sja1110_l2_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
.packed_entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
},
[BLK_IDX_L2_POLICING] = {
.packing = sja1110_l2_policing_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
.packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
.max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
},
[BLK_IDX_VLAN_LOOKUP] = {
.packing = sja1110_vlan_lookup_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
.packed_entry_size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY,
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
},
[BLK_IDX_L2_FORWARDING] = {
.packing = sja1110_l2_forwarding_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
.max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
},
[BLK_IDX_MAC_CONFIG] = {
.packing = sja1110_mac_config_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
.packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
.max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
},
[BLK_IDX_SCHEDULE_PARAMS] = {
.packing = sja1110_schedule_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
},
[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
.packing = sja1105_schedule_entry_points_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
.packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
},
[BLK_IDX_VL_FORWARDING_PARAMS] = {
.packing = sja1110_vl_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.packing = sja1110_l2_lookup_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
.packed_entry_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.packing = sja1110_l2_forwarding_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
[BLK_IDX_AVB_PARAMS] = {
.packing = sja1105pqrs_avb_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
.packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
},
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1110_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
.packed_entry_size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
},
[BLK_IDX_RETAGGING] = {
.packing = sja1110_retagging_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
.packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
},
[BLK_IDX_XMII_PARAMS] = {
.packing = sja1110_xmii_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
.packed_entry_size = SJA1110_SIZE_XMII_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
},
[BLK_IDX_PCP_REMAPPING] = {
.packing = sja1110_pcp_remapping_entry_packing,
.unpacked_entry_size = sizeof(struct sja1110_pcp_remapping_entry),
.packed_entry_size = SJA1110_SIZE_PCP_REMAPPING_ENTRY,
.max_entry_count = SJA1110_MAX_PCP_REMAPPING_COUNT,
},
};
int sja1105_static_config_init(struct sja1105_static_config *config,
const struct sja1105_table_ops *static_ops,
u64 device_id)
{
enum sja1105_blk_idx i;
*config = (struct sja1105_static_config) {0};
/* Transfer static_ops array from priv into per-table ops
* for handier access
*/
for (i = 0; i < BLK_IDX_MAX; i++)
config->tables[i].ops = &static_ops[i];
config->device_id = device_id;
return 0;
}
void sja1105_static_config_free(struct sja1105_static_config *config)
{
enum sja1105_blk_idx i;
for (i = 0; i < BLK_IDX_MAX; i++) {
if (config->tables[i].entry_count) {
kfree(config->tables[i].entries);
config->tables[i].entry_count = 0;
}
}
}
int sja1105_table_delete_entry(struct sja1105_table *table, int i)
{
size_t entry_size = table->ops->unpacked_entry_size;
u8 *entries = table->entries;
if (i > table->entry_count)
return -ERANGE;
memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
(table->entry_count - i) * entry_size);
table->entry_count--;
return 0;
}
/* No pointers to table->entries should be kept when this is called. */
int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
{
size_t entry_size = table->ops->unpacked_entry_size;
void *new_entries, *old_entries = table->entries;
if (new_count > table->ops->max_entry_count)
return -ERANGE;
new_entries = kcalloc(new_count, entry_size, GFP_KERNEL);
if (!new_entries)
return -ENOMEM;
memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
entry_size);
table->entries = new_entries;
table->entry_count = new_count;
kfree(old_entries);
return 0;
}
| linux-master | drivers/net/dsa/sja1105/sja1105_static_config.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
* Copyright 2020 NXP
*/
#include "sja1105.h"
/* Since devlink regions have a fixed size and the static config has a variable
* size, we need to calculate the maximum possible static config size by
* creating a dummy config with all table entries populated to the max, and get
* its packed length. This is done dynamically as opposed to simply hardcoding
* a number, since currently not all static config tables are implemented, so
* we are avoiding a possible code desynchronization.
*/
static size_t sja1105_static_config_get_max_size(struct sja1105_private *priv)
{
struct sja1105_static_config config;
enum sja1105_blk_idx blk_idx;
int rc;
rc = sja1105_static_config_init(&config,
priv->info->static_ops,
priv->info->device_id);
if (rc)
return 0;
for (blk_idx = 0; blk_idx < BLK_IDX_MAX; blk_idx++) {
struct sja1105_table *table = &config.tables[blk_idx];
table->entry_count = table->ops->max_entry_count;
}
return sja1105_static_config_get_length(&config);
}
static int
sja1105_region_static_config_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct sja1105_private *priv = ds->priv;
size_t max_len, len;
len = sja1105_static_config_get_length(&priv->static_config);
max_len = sja1105_static_config_get_max_size(priv);
*data = kcalloc(max_len, sizeof(u8), GFP_KERNEL);
if (!*data)
return -ENOMEM;
return static_config_buf_prepare_for_upload(priv, *data, len);
}
static struct devlink_region_ops sja1105_region_static_config_ops = {
.name = "static-config",
.snapshot = sja1105_region_static_config_snapshot,
.destructor = kfree,
};
enum sja1105_region_id {
SJA1105_REGION_STATIC_CONFIG = 0,
};
struct sja1105_region {
const struct devlink_region_ops *ops;
size_t (*get_size)(struct sja1105_private *priv);
};
static struct sja1105_region sja1105_regions[] = {
[SJA1105_REGION_STATIC_CONFIG] = {
.ops = &sja1105_region_static_config_ops,
.get_size = sja1105_static_config_get_max_size,
},
};
static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
{
int i, num_regions = ARRAY_SIZE(sja1105_regions);
struct sja1105_private *priv = ds->priv;
const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *),
GFP_KERNEL);
if (!priv->regions)
return -ENOMEM;
for (i = 0; i < num_regions; i++) {
size = sja1105_regions[i].get_size(priv);
ops = sja1105_regions[i].ops;
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
kfree(priv->regions);
return PTR_ERR(region);
}
priv->regions[i] = region;
}
return 0;
}
static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
{
int i, num_regions = ARRAY_SIZE(sja1105_regions);
struct sja1105_private *priv = ds->priv;
for (i = 0; i < num_regions; i++)
dsa_devlink_region_destroy(priv->regions[i]);
kfree(priv->regions);
}
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
priv->info->name);
}
int sja1105_devlink_setup(struct dsa_switch *ds)
{
return sja1105_setup_devlink_regions(ds);
}
void sja1105_devlink_teardown(struct dsa_switch *ds)
{
sja1105_teardown_devlink_regions(ds);
}
| linux-master | drivers/net/dsa/sja1105/sja1105_devlink.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include <linux/of_mdio.h>
#include "sja1105.h"
#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
int rc;
addr = (mmd << 16) | reg;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
return 0xffff;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
return NXP_SJA1105_XPCS_ID >> 16;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
return NXP_SJA1105_XPCS_ID & GENMASK(15, 0);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
return rc;
return tmp & 0xffff;
}
int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd,
int reg, u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
addr = (mmd << 16) | reg;
tmp = val;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
return -EINVAL;
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
const struct sja1105_regs *regs = priv->info->regs;
int offset, bank;
u64 addr;
u32 tmp;
int rc;
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
addr = (mmd << 16) | reg;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
return NXP_SJA1110_XPCS_ID >> 16;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
return NXP_SJA1110_XPCS_ID & GENMASK(15, 0);
bank = addr >> 8;
offset = addr & GENMASK(7, 0);
/* This addressing scheme reserves register 0xff for the bank address
* register, so that can never be addressed.
*/
if (WARN_ON(offset == 0xff))
return -ENODEV;
tmp = bank;
rc = sja1105_xfer_u32(priv, SPI_WRITE,
regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
&tmp, NULL);
if (rc < 0)
return rc;
rc = sja1105_xfer_u32(priv, SPI_READ, regs->pcs_base[phy] + offset,
&tmp, NULL);
if (rc < 0)
return rc;
return tmp & 0xffff;
}
int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
const struct sja1105_regs *regs = priv->info->regs;
int offset, bank;
u64 addr;
u32 tmp;
int rc;
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
addr = (mmd << 16) | reg;
bank = addr >> 8;
offset = addr & GENMASK(7, 0);
/* This addressing scheme reserves register 0xff for the bank address
* register, so that can never be addressed.
*/
if (WARN_ON(offset == 0xff))
return -ENODEV;
tmp = bank;
rc = sja1105_xfer_u32(priv, SPI_WRITE,
regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
&tmp, NULL);
if (rc < 0)
return rc;
tmp = val;
return sja1105_xfer_u32(priv, SPI_WRITE, regs->pcs_base[phy] + offset,
&tmp, NULL);
}
enum sja1105_mdio_opcode {
SJA1105_C45_ADDR = 0,
SJA1105_C22 = 1,
SJA1105_C45_DATA = 2,
SJA1105_C45_DATA_AUTOINC = 3,
};
static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
int phy, enum sja1105_mdio_opcode op,
int xad)
{
const struct sja1105_regs *regs = priv->info->regs;
return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
}
static int sja1105_base_t1_mdio_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
int rc;
addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
return rc;
return tmp & 0xffff;
}
static int sja1105_base_t1_mdio_read_c45(struct mii_bus *bus, int phy,
int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
int rc;
addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, ®, NULL);
if (rc < 0)
return rc;
addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
return rc;
return tmp & 0xffff;
}
static int sja1105_base_t1_mdio_write_c22(struct mii_bus *bus, int phy, int reg,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
tmp = val & 0xffff;
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
static int sja1105_base_t1_mdio_write_c45(struct mii_bus *bus, int phy,
int mmd, int reg, u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
int rc;
addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, ®, NULL);
if (rc < 0)
return rc;
addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
tmp = val & 0xffff;
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
const struct sja1105_regs *regs = priv->info->regs;
u32 tmp;
int rc;
rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
&tmp, NULL);
if (rc < 0)
return rc;
return tmp & 0xffff;
}
static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
const struct sja1105_regs *regs = priv->info->regs;
u32 tmp = val;
return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
&tmp, NULL);
}
static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
struct device_node *mdio_node)
{
struct sja1105_mdio_private *mdio_priv;
struct device_node *np;
struct mii_bus *bus;
int rc = 0;
np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
if (!np)
return 0;
if (!of_device_is_available(np))
goto out_put_np;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus) {
rc = -ENOMEM;
goto out_put_np;
}
bus->name = "SJA1110 100base-TX MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-tx",
dev_name(priv->ds->dev));
bus->read = sja1105_base_tx_mdio_read;
bus->write = sja1105_base_tx_mdio_write;
bus->parent = priv->ds->dev;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
rc = of_mdiobus_register(bus, np);
if (rc) {
mdiobus_free(bus);
goto out_put_np;
}
priv->mdio_base_tx = bus;
out_put_np:
of_node_put(np);
return rc;
}
static void sja1105_mdiobus_base_tx_unregister(struct sja1105_private *priv)
{
if (!priv->mdio_base_tx)
return;
mdiobus_unregister(priv->mdio_base_tx);
mdiobus_free(priv->mdio_base_tx);
priv->mdio_base_tx = NULL;
}
static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
struct device_node *mdio_node)
{
struct sja1105_mdio_private *mdio_priv;
struct device_node *np;
struct mii_bus *bus;
int rc = 0;
np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
if (!np)
return 0;
if (!of_device_is_available(np))
goto out_put_np;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus) {
rc = -ENOMEM;
goto out_put_np;
}
bus->name = "SJA1110 100base-T1 MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
dev_name(priv->ds->dev));
bus->read = sja1105_base_t1_mdio_read_c22;
bus->write = sja1105_base_t1_mdio_write_c22;
bus->read_c45 = sja1105_base_t1_mdio_read_c45;
bus->write_c45 = sja1105_base_t1_mdio_write_c45;
bus->parent = priv->ds->dev;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
rc = of_mdiobus_register(bus, np);
if (rc) {
mdiobus_free(bus);
goto out_put_np;
}
priv->mdio_base_t1 = bus;
out_put_np:
of_node_put(np);
return rc;
}
static void sja1105_mdiobus_base_t1_unregister(struct sja1105_private *priv)
{
if (!priv->mdio_base_t1)
return;
mdiobus_unregister(priv->mdio_base_t1);
mdiobus_free(priv->mdio_base_t1);
priv->mdio_base_t1 = NULL;
}
static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
{
struct sja1105_mdio_private *mdio_priv;
struct dsa_switch *ds = priv->ds;
struct mii_bus *bus;
int rc = 0;
int port;
if (!priv->info->pcs_mdio_read_c45 || !priv->info->pcs_mdio_write_c45)
return 0;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
bus->name = "SJA1105 PCS MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
dev_name(ds->dev));
bus->read_c45 = priv->info->pcs_mdio_read_c45;
bus->write_c45 = priv->info->pcs_mdio_write_c45;
bus->parent = ds->dev;
/* There is no PHY on this MDIO bus => mask out all PHY addresses
* from auto probing.
*/
bus->phy_mask = ~0;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
rc = mdiobus_register(bus);
if (rc) {
mdiobus_free(bus);
return rc;
}
for (port = 0; port < ds->num_ports; port++) {
struct dw_xpcs *xpcs;
if (dsa_is_unused_port(ds, port))
continue;
if (priv->phy_mode[port] != PHY_INTERFACE_MODE_SGMII &&
priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
continue;
xpcs = xpcs_create_mdiodev(bus, port, priv->phy_mode[port]);
if (IS_ERR(xpcs)) {
rc = PTR_ERR(xpcs);
goto out_pcs_free;
}
priv->xpcs[port] = xpcs;
}
priv->mdio_pcs = bus;
return 0;
out_pcs_free:
for (port = 0; port < ds->num_ports; port++) {
if (!priv->xpcs[port])
continue;
xpcs_destroy(priv->xpcs[port]);
priv->xpcs[port] = NULL;
}
mdiobus_unregister(bus);
mdiobus_free(bus);
return rc;
}
static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
{
struct dsa_switch *ds = priv->ds;
int port;
if (!priv->mdio_pcs)
return;
for (port = 0; port < ds->num_ports; port++) {
if (!priv->xpcs[port])
continue;
xpcs_destroy(priv->xpcs[port]);
priv->xpcs[port] = NULL;
}
mdiobus_unregister(priv->mdio_pcs);
mdiobus_free(priv->mdio_pcs);
priv->mdio_pcs = NULL;
}
int sja1105_mdiobus_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
struct device_node *switch_node = ds->dev->of_node;
struct device_node *mdio_node;
int rc;
rc = sja1105_mdiobus_pcs_register(priv);
if (rc)
return rc;
mdio_node = of_get_child_by_name(switch_node, "mdios");
if (!mdio_node)
return 0;
if (!of_device_is_available(mdio_node))
goto out_put_mdio_node;
if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
if (rc)
goto err_put_mdio_node;
}
if (regs->mdio_100base_t1 != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_t1_register(priv, mdio_node);
if (rc)
goto err_free_base_tx_mdiobus;
}
out_put_mdio_node:
of_node_put(mdio_node);
return 0;
err_free_base_tx_mdiobus:
sja1105_mdiobus_base_tx_unregister(priv);
err_put_mdio_node:
of_node_put(mdio_node);
sja1105_mdiobus_pcs_unregister(priv);
return rc;
}
void sja1105_mdiobus_unregister(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
sja1105_mdiobus_base_t1_unregister(priv);
sja1105_mdiobus_base_tx_unregister(priv);
sja1105_mdiobus_pcs_unregister(priv);
}
| linux-master | drivers/net/dsa/sja1105/sja1105_mdio.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include "sja1105.h"
/* In the dynamic configuration interface, the switch exposes a register-like
* view of some of the static configuration tables.
* Many times the field organization of the dynamic tables is abbreviated (not
* all fields are dynamically reconfigurable) and different from the static
* ones, but the key reason for having it is that we can spare a switch reset
* for settings that can be changed dynamically.
*
* This file creates a per-switch-family abstraction called
* struct sja1105_dynamic_table_ops and two operations that work with it:
* - sja1105_dynamic_config_write
* - sja1105_dynamic_config_read
*
* Compared to the struct sja1105_table_ops from sja1105_static_config.c,
* the dynamic accessors work with a compound buffer:
*
* packed_buf
*
* |
* V
* +-----------------------------------------+------------------+
* | ENTRY BUFFER | COMMAND BUFFER |
* +-----------------------------------------+------------------+
*
* <----------------------- packed_size ------------------------>
*
* The ENTRY BUFFER may or may not have the same layout, or size, as its static
* configuration table entry counterpart. When it does, the same packing
* function is reused (bar exceptional cases - see
* sja1105pqrs_dyn_l2_lookup_entry_packing).
*
* The reason for the COMMAND BUFFER being at the end is to be able to send
* a dynamic write command through a single SPI burst. By the time the switch
* reacts to the command, the ENTRY BUFFER is already populated with the data
* sent by the core.
*
* The COMMAND BUFFER is always SJA1105_SIZE_DYN_CMD bytes (one 32-bit word) in
* size.
*
* Sometimes the ENTRY BUFFER does not really exist (when the number of fields
* that can be reconfigured is small), then the switch repurposes some of the
* unused 32 bits of the COMMAND BUFFER to hold ENTRY data.
*
* The key members of struct sja1105_dynamic_table_ops are:
* - .entry_packing: A function that deals with packing an ENTRY structure
* into an SPI buffer, or retrieving an ENTRY structure
* from one.
* The @packed_buf pointer it's given does always point to
* the ENTRY portion of the buffer.
* - .cmd_packing: A function that deals with packing/unpacking the COMMAND
* structure to/from the SPI buffer.
* It is given the same @packed_buf pointer as .entry_packing,
* so most of the time, the @packed_buf points *behind* the
* COMMAND offset inside the buffer.
* To access the COMMAND portion of the buffer, the function
* knows its correct offset.
* Giving both functions the same pointer is handy because in
* extreme cases (see sja1105pqrs_dyn_l2_lookup_entry_packing)
* the .entry_packing is able to jump to the COMMAND portion,
* or vice-versa (sja1105pqrs_l2_lookup_cmd_packing).
* - .access: A bitmap of:
* OP_READ: Set if the hardware manual marks the ENTRY portion of the
* dynamic configuration table buffer as R (readable) after
* an SPI read command (the switch will populate the buffer).
* OP_WRITE: Set if the manual marks the ENTRY portion of the dynamic
* table buffer as W (writable) after an SPI write command
* (the switch will read the fields provided in the buffer).
* OP_DEL: Set if the manual says the VALIDENT bit is supported in the
* COMMAND portion of this dynamic config buffer (i.e. the
* specified entry can be invalidated through a SPI write
* command).
* OP_SEARCH: Set if the manual says that the index of an entry can
* be retrieved in the COMMAND portion of the buffer based
* on its ENTRY portion, as a result of a SPI write command.
* Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
* this.
* OP_VALID_ANYWAY: Reading some tables through the dynamic config
* interface is possible even if the VALIDENT bit is not
* set in the writeback. So don't error out in that case.
* - .max_entry_count: The number of entries, counting from zero, that can be
* reconfigured through the dynamic interface. If a static
* table can be reconfigured at all dynamically, this
* number always matches the maximum number of supported
* static entries.
* - .packed_size: The length in bytes of the compound ENTRY + COMMAND BUFFER.
* Note that sometimes the compound buffer may contain holes in
* it (see sja1105_vlan_lookup_cmd_packing). The @packed_buf is
* contiguous however, so @packed_size includes any unused
* bytes.
* - .addr: The base SPI address at which the buffer must be written to the
* switch's memory. When looking at the hardware manual, this must
* always match the lowest documented address for the ENTRY, and not
* that of the COMMAND, since the other 32-bit words will follow along
* at the correct addresses.
*/
#define SJA1105_SIZE_DYN_CMD 4
#define SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD \
SJA1105_SIZE_DYN_CMD
#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
#define SJA1110_SIZE_VL_POLICING_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_POLICING_ENTRY)
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
SJA1105_SIZE_DYN_CMD
#define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY)
#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
#define SJA1110_SIZE_L2_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_ENTRY)
#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
#define SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_VLAN_LOOKUP_ENTRY)
#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY)
#define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY)
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
#define SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY)
#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
#define SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_GENERAL_PARAMS_ENTRY)
#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
#define SJA1105_SIZE_RETAGGING_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY)
#define SJA1105ET_SIZE_CBS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_CBS_ENTRY)
#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
#define SJA1110_SIZE_XMII_PARAMS_DYN_CMD \
SJA1110_SIZE_XMII_PARAMS_ENTRY
#define SJA1110_SIZE_L2_POLICING_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_POLICING_ENTRY)
#define SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD \
SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY
#define SJA1105_MAX_DYN_CMD_SIZE \
SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD
struct sja1105_dyn_cmd {
bool search;
u64 valid;
u64 rdwrset;
u64 errors;
u64 valident;
u64 index;
};
enum sja1105_hostcmd {
SJA1105_HOSTCMD_SEARCH = 1,
SJA1105_HOSTCMD_READ = 2,
SJA1105_HOSTCMD_WRITE = 3,
SJA1105_HOSTCMD_INVALIDATE = 4,
};
/* Command and entry overlap */
static void
sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
sja1105_packing(buf, &cmd->rdwrset, 29, 29, size, op);
sja1105_packing(buf, &cmd->index, 9, 0, size, op);
}
/* Command and entry are separate */
static void
sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 9, 0, size, op);
}
static void
sja1110_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 11, 0, size, op);
}
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_lookup_entry *entry = entry_ptr;
const int size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD;
sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
return size;
}
static void
sja1110_vl_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->index, 11, 0, size, op);
}
static void
sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op, int entry_size)
{
const int size = SJA1105_SIZE_DYN_CMD;
u8 *p = buf + entry_size;
u64 hostcmd;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->valident, 27, 27, size, op);
/* VALIDENT is supposed to indicate "keep or not", but in SJA1105 E/T,
* using it to delete a management route was unsupported. UM10944
* said about it:
*
* In case of a write access with the MGMTROUTE flag set,
* the flag will be ignored. It will always be found cleared
* for read accesses with the MGMTROUTE flag set.
*
* SJA1105 P/Q/R/S keeps the same behavior w.r.t. VALIDENT, but there
* is now another flag called HOSTCMD which does more stuff (quoting
* from UM11040):
*
* A write request is accepted only when HOSTCMD is set to write host
* or invalid. A read request is accepted only when HOSTCMD is set to
* search host or read host.
*
* So it is possible to translate a RDWRSET/VALIDENT combination into
* HOSTCMD so that we keep the dynamic command API in place, and
* at the same time achieve compatibility with the management route
* command structure.
*/
if (cmd->rdwrset == SPI_READ) {
if (cmd->search)
hostcmd = SJA1105_HOSTCMD_SEARCH;
else
hostcmd = SJA1105_HOSTCMD_READ;
} else {
/* SPI_WRITE */
if (cmd->valident)
hostcmd = SJA1105_HOSTCMD_WRITE;
else
hostcmd = SJA1105_HOSTCMD_INVALIDATE;
}
sja1105_packing(p, &hostcmd, 25, 23, size, op);
}
static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
/* Hack - The hardware takes the 'index' field within
* struct sja1105_l2_lookup_entry as the index on which this command
* will operate. However it will ignore everything else, so 'index'
* is logically part of command but physically part of entry.
* Populate the 'index' entry field from within the command callback,
* such that our API doesn't need to ask for a full-blown entry
* structure when e.g. a delete is requested.
*/
sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
}
static void
sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
}
/* The switch is so retarded that it makes our command/entry abstraction
* crumble apart.
*
* On P/Q/R/S, the switch tries to say whether a FDB entry
* is statically programmed or dynamically learned via a flag called LOCKEDS.
* The hardware manual says about this fiels:
*
* On write will specify the format of ENTRY.
* On read the flag will be found cleared at times the VALID flag is found
* set. The flag will also be found cleared in response to a read having the
* MGMTROUTE flag set. In response to a read with the MGMTROUTE flag
* cleared, the flag be set if the most recent access operated on an entry
* that was either loaded by configuration or through dynamic reconfiguration
* (as opposed to automatically learned entries).
*
* The trouble with this flag is that it's part of the *command* to access the
* dynamic interface, and not part of the *entry* retrieved from it.
* Otherwise said, for a sja1105_dynamic_config_read, LOCKEDS is supposed to be
* an output from the switch into the command buffer, and for a
* sja1105_dynamic_config_write, the switch treats LOCKEDS as an input
* (hence we can write either static, or automatically learned entries, from
* the core).
* But the manual contradicts itself in the last phrase where it says that on
* read, LOCKEDS will be set to 1 for all FDB entries written through the
* dynamic interface (therefore, the value of LOCKEDS from the
* sja1105_dynamic_config_write is not really used for anything, it'll store a
* 1 anyway).
* This means you can't really write a FDB entry with LOCKEDS=0 (automatically
* learned) into the switch, which kind of makes sense.
* As for reading through the dynamic interface, it doesn't make too much sense
* to put LOCKEDS into the command, since the switch will inevitably have to
* ignore it (otherwise a command would be like "read the FDB entry 123, but
* only if it's dynamically learned" <- well how am I supposed to know?) and
* just use it as an output buffer for its findings. But guess what... that's
* what the entry buffer is for!
* Unfortunately, what really breaks this abstraction is the fact that it
* wasn't designed having the fact in mind that the switch can output
* entry-related data as writeback through the command buffer.
* However, whether a FDB entry is statically or dynamically learned *is* part
* of the entry and not the command data, no matter what the switch thinks.
* In order to do that, we'll need to wrap around the
* sja1105pqrs_l2_lookup_entry_packing from sja1105_static_config.c, and take
* a peek outside of the caller-supplied @buf (the entry buffer), to reach the
* command buffer.
*/
static size_t
sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_lookup_entry *entry = entry_ptr;
u8 *cmd = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
}
static size_t sja1110_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_lookup_entry *entry = entry_ptr;
u8 *cmd = buf + SJA1110_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
return sja1110_l2_lookup_entry_packing(buf, entry_ptr, op);
}
static void
sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->valident, 27, 27, size, op);
/* Hack - see comments above. */
sja1105_packing(buf, &cmd->index, 29, 20,
SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
}
static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_lookup_entry *entry = entry_ptr;
u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
}
static void
sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
u64 mgmtroute = 1;
sja1105et_l2_lookup_cmd_packing(buf, cmd, op);
if (op == PACK)
sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
}
static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_mgmt_entry *entry = entry_ptr;
const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
/* UM10944: To specify if a PTP egress timestamp shall be captured on
* each port upon transmission of the frame, the LSB of VLANID in the
* ENTRY field provided by the host must be set.
* Bit 1 of VLANID then specifies the register where the timestamp for
* this port is stored in.
*/
sja1105_packing(buf, &entry->tsreg, 85, 85, size, op);
sja1105_packing(buf, &entry->takets, 84, 84, size, op);
sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
sja1105_packing(buf, &entry->destports, 35, 31, size, op);
sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
return size;
}
static void
sja1105pqrs_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
u64 mgmtroute = 1;
sja1105pqrs_l2_lookup_cmd_packing(buf, cmd, op);
if (op == PACK)
sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
}
static size_t sja1105pqrs_mgmt_route_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
struct sja1105_mgmt_entry *entry = entry_ptr;
/* In P/Q/R/S, enfport got renamed to mgmtvalid, but its purpose
* is the same (driver uses it to confirm that frame was sent).
* So just keep the name from E/T.
*/
sja1105_packing(buf, &entry->tsreg, 71, 71, size, op);
sja1105_packing(buf, &entry->takets, 70, 70, size, op);
sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
sja1105_packing(buf, &entry->destports, 21, 17, size, op);
sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
return size;
}
/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
* and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
* between entry (0x2d, 0x2e) and command (0x30).
*/
static void
sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->valident, 27, 27, size, op);
/* Hack - see comments above, applied for 'vlanid' field of
* struct sja1105_vlan_lookup_entry.
*/
sja1105_packing(buf, &cmd->index, 38, 27,
SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
}
/* In SJA1110 there is no gap between the command and the data, yay... */
static void
sja1110_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
u64 type_entry = 0;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
/* Hack: treat 'vlanid' field of struct sja1105_vlan_lookup_entry as
* cmd->index.
*/
sja1105_packing(buf, &cmd->index, 38, 27,
SJA1110_SIZE_VLAN_LOOKUP_ENTRY, op);
/* But the VALIDENT bit has disappeared, now we are supposed to
* invalidate an entry through the TYPE_ENTRY field of the entry..
* This is a hack to transform the non-zero quality of the TYPE_ENTRY
* field into a VALIDENT bit.
*/
if (op == PACK && !cmd->valident) {
sja1105_packing(buf, &type_entry, 40, 39,
SJA1110_SIZE_VLAN_LOOKUP_ENTRY, PACK);
} else if (op == UNPACK) {
sja1105_packing(buf, &type_entry, 40, 39,
SJA1110_SIZE_VLAN_LOOKUP_ENTRY, UNPACK);
cmd->valident = !!type_entry;
}
}
static void
sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 4, 0, size, op);
}
static void
sja1110_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 4, 0, size, op);
}
static void
sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
/* Yup, user manual definitions are reversed */
u8 *reg1 = buf + 4;
sja1105_packing(reg1, &cmd->valid, 31, 31, size, op);
sja1105_packing(reg1, &cmd->index, 26, 24, size, op);
}
static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
struct sja1105_mac_config_entry *entry = entry_ptr;
/* Yup, user manual definitions are reversed */
u8 *reg1 = buf + 4;
u8 *reg2 = buf;
sja1105_packing(reg1, &entry->speed, 30, 29, size, op);
sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op);
sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op);
sja1105_packing(reg1, &entry->retag, 21, 21, size, op);
sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op);
sja1105_packing(reg1, &entry->egress, 19, 19, size, op);
sja1105_packing(reg1, &entry->ingress, 18, 18, size, op);
sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op);
sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op);
sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op);
sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op);
sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op);
sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op);
/* MAC configuration table entries which can't be reconfigured:
* top, base, enabled, ifg, maxage, drpnona664
*/
/* Bogus return value, not used anywhere */
return 0;
}
static void
sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 2, 0, size, op);
}
static void
sja1110_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 3, 0, size, op);
}
static void
sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
sja1105_packing(buf, &cmd->valid, 31, 31,
SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
}
static size_t
sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->poly, 7, 0,
SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
/* Bogus return value, not used anywhere */
return 0;
}
static void
sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
}
static void
sja1110_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
}
static void
sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
}
static size_t
sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_general_params_entry *entry = entry_ptr;
const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op);
/* Bogus return value, not used anywhere */
return 0;
}
static void
sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
}
static void
sja1110_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
}
static void
sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
}
static void
sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->errors, 30, 30, size, op);
sja1105_packing(p, &cmd->valident, 29, 29, size, op);
sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
sja1105_packing(p, &cmd->index, 5, 0, size, op);
}
static void
sja1110_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->valident, 28, 28, size, op);
sja1105_packing(p, &cmd->index, 4, 0, size, op);
}
static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105ET_SIZE_CBS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->index, 19, 16, size, op);
}
static size_t sja1105et_cbs_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105ET_SIZE_CBS_ENTRY;
struct sja1105_cbs_entry *entry = entry_ptr;
u8 *cmd = buf + size;
u32 *p = buf;
sja1105_packing(cmd, &entry->port, 5, 3, SJA1105_SIZE_DYN_CMD, op);
sja1105_packing(cmd, &entry->prio, 2, 0, SJA1105_SIZE_DYN_CMD, op);
sja1105_packing(p + 3, &entry->credit_lo, 31, 0, size, op);
sja1105_packing(p + 2, &entry->credit_hi, 31, 0, size, op);
sja1105_packing(p + 1, &entry->send_slope, 31, 0, size, op);
sja1105_packing(p + 0, &entry->idle_slope, 31, 0, size, op);
return size;
}
static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 3, 0, size, op);
}
static void sja1110_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 7, 0, size, op);
}
static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
struct sja1105_cbs_entry *entry = entry_ptr;
sja1105_packing(buf, &entry->port, 159, 157, size, op);
sja1105_packing(buf, &entry->prio, 156, 154, size, op);
sja1105_packing(buf, &entry->credit_lo, 153, 122, size, op);
sja1105_packing(buf, &entry->credit_hi, 121, 90, size, op);
sja1105_packing(buf, &entry->send_slope, 89, 58, size, op);
sja1105_packing(buf, &entry->idle_slope, 57, 26, size, op);
return size;
}
static size_t sja1110_cbs_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
struct sja1105_cbs_entry *entry = entry_ptr;
u64 entry_type = SJA1110_CBS_SHAPER;
sja1105_packing(buf, &entry_type, 159, 159, size, op);
sja1105_packing(buf, &entry->credit_lo, 151, 120, size, op);
sja1105_packing(buf, &entry->credit_hi, 119, 88, size, op);
sja1105_packing(buf, &entry->send_slope, 87, 56, size, op);
sja1105_packing(buf, &entry->idle_slope, 55, 24, size, op);
return size;
}
static void sja1110_dummy_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
}
static void
sja1110_l2_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105_SIZE_L2_POLICING_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->index, 6, 0, size, op);
}
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
#define OP_SEARCH BIT(3)
#define OP_VALID_ANYWAY BIT(4)
/* SJA1105E/T: First generation */
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing,
.cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
.addr = 0x35,
},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x20,
},
[BLK_IDX_MGMT_ROUTE] = {
.entry_packing = sja1105et_mgmt_route_entry_packing,
.cmd_packing = sja1105et_mgmt_route_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.max_entry_count = SJA1105_NUM_PORTS,
.packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x20,
},
[BLK_IDX_VLAN_LOOKUP] = {
.entry_packing = sja1105_vlan_lookup_entry_packing,
.cmd_packing = sja1105_vlan_lookup_cmd_packing,
.access = (OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
.packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
.addr = 0x27,
},
[BLK_IDX_L2_FORWARDING] = {
.entry_packing = sja1105_l2_forwarding_entry_packing,
.cmd_packing = sja1105_l2_forwarding_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
.addr = 0x24,
},
[BLK_IDX_MAC_CONFIG] = {
.entry_packing = sja1105et_mac_config_entry_packing,
.cmd_packing = sja1105et_mac_config_cmd_packing,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
.addr = 0x36,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.entry_packing = sja1105et_l2_lookup_params_entry_packing,
.cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
.addr = 0x38,
},
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
.addr = 0x34,
},
[BLK_IDX_RETAGGING] = {
.entry_packing = sja1105_retagging_entry_packing,
.cmd_packing = sja1105_retagging_cmd_packing,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
.access = (OP_WRITE | OP_DEL),
.packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
.addr = 0x31,
},
[BLK_IDX_CBS] = {
.entry_packing = sja1105et_cbs_entry_packing,
.cmd_packing = sja1105et_cbs_cmd_packing,
.max_entry_count = SJA1105ET_MAX_CBS_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105ET_SIZE_CBS_DYN_CMD,
.addr = 0x2c,
},
};
/* SJA1105P/Q/R/S: Second generation */
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing,
.cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
.addr = 0x47,
},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x24,
},
[BLK_IDX_MGMT_ROUTE] = {
.entry_packing = sja1105pqrs_mgmt_route_entry_packing,
.cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH | OP_VALID_ANYWAY),
.max_entry_count = SJA1105_NUM_PORTS,
.packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x24,
},
[BLK_IDX_VLAN_LOOKUP] = {
.entry_packing = sja1105_vlan_lookup_entry_packing,
.cmd_packing = sja1105_vlan_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
.packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
.addr = 0x2D,
},
[BLK_IDX_L2_FORWARDING] = {
.entry_packing = sja1105_l2_forwarding_entry_packing,
.cmd_packing = sja1105_l2_forwarding_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
.addr = 0x2A,
},
[BLK_IDX_MAC_CONFIG] = {
.entry_packing = sja1105pqrs_mac_config_entry_packing,
.cmd_packing = sja1105pqrs_mac_config_cmd_packing,
.max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
.access = (OP_READ | OP_WRITE),
.packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
.addr = 0x4B,
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.entry_packing = sja1105pqrs_l2_lookup_params_entry_packing,
.cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE),
.packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
.addr = 0x54,
},
[BLK_IDX_AVB_PARAMS] = {
.entry_packing = sja1105pqrs_avb_params_entry_packing,
.cmd_packing = sja1105pqrs_avb_params_cmd_packing,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE),
.packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
.addr = 0x8003,
},
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105pqrs_general_params_entry_packing,
.cmd_packing = sja1105pqrs_general_params_cmd_packing,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE),
.packed_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD,
.addr = 0x3B,
},
[BLK_IDX_RETAGGING] = {
.entry_packing = sja1105_retagging_entry_packing,
.cmd_packing = sja1105_retagging_cmd_packing,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
.access = (OP_READ | OP_WRITE | OP_DEL),
.packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
.addr = 0x38,
},
[BLK_IDX_CBS] = {
.entry_packing = sja1105pqrs_cbs_entry_packing,
.cmd_packing = sja1105pqrs_cbs_cmd_packing,
.max_entry_count = SJA1105PQRS_MAX_CBS_COUNT,
.access = OP_WRITE,
.packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
.addr = 0x32,
},
};
/* SJA1110: Third generation */
const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1110_vl_lookup_entry_packing,
.cmd_packing = sja1110_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x124),
},
[BLK_IDX_VL_POLICING] = {
.entry_packing = sja1110_vl_policing_entry_packing,
.cmd_packing = sja1110_vl_policing_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
.packed_size = SJA1110_SIZE_VL_POLICING_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x310),
},
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1110_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1110_l2_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
.packed_size = SJA1110_SIZE_L2_LOOKUP_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x8c),
},
[BLK_IDX_VLAN_LOOKUP] = {
.entry_packing = sja1110_vlan_lookup_entry_packing,
.cmd_packing = sja1110_vlan_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
.packed_size = SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0xb4),
},
[BLK_IDX_L2_FORWARDING] = {
.entry_packing = sja1110_l2_forwarding_entry_packing,
.cmd_packing = sja1110_l2_forwarding_cmd_packing,
.max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0xa8),
},
[BLK_IDX_MAC_CONFIG] = {
.entry_packing = sja1110_mac_config_entry_packing,
.cmd_packing = sja1110_mac_config_cmd_packing,
.max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x134),
},
[BLK_IDX_L2_LOOKUP_PARAMS] = {
.entry_packing = sja1110_l2_lookup_params_entry_packing,
.cmd_packing = sja1110_l2_lookup_params_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x158),
},
[BLK_IDX_AVB_PARAMS] = {
.entry_packing = sja1105pqrs_avb_params_entry_packing,
.cmd_packing = sja1105pqrs_avb_params_cmd_packing,
.max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x2000C),
},
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1110_general_params_entry_packing,
.cmd_packing = sja1110_general_params_cmd_packing,
.max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0xe8),
},
[BLK_IDX_RETAGGING] = {
.entry_packing = sja1110_retagging_entry_packing,
.cmd_packing = sja1110_retagging_cmd_packing,
.max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
.access = (OP_READ | OP_WRITE | OP_DEL),
.packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0xdc),
},
[BLK_IDX_CBS] = {
.entry_packing = sja1110_cbs_entry_packing,
.cmd_packing = sja1110_cbs_cmd_packing,
.max_entry_count = SJA1110_MAX_CBS_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0xc4),
},
[BLK_IDX_XMII_PARAMS] = {
.entry_packing = sja1110_xmii_params_entry_packing,
.cmd_packing = sja1110_dummy_cmd_packing,
.max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
.access = (OP_READ | OP_VALID_ANYWAY),
.packed_size = SJA1110_SIZE_XMII_PARAMS_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x3c),
},
[BLK_IDX_L2_POLICING] = {
.entry_packing = sja1110_l2_policing_entry_packing,
.cmd_packing = sja1110_l2_policing_cmd_packing,
.max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
.access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.packed_size = SJA1110_SIZE_L2_POLICING_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x2fc),
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {
.entry_packing = sja1110_l2_forwarding_params_entry_packing,
.cmd_packing = sja1110_dummy_cmd_packing,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
.access = (OP_READ | OP_VALID_ANYWAY),
.packed_size = SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD,
.addr = SJA1110_SPI_ADDR(0x20000),
},
};
#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
struct sja1105_dyn_cmd cmd = {};
int rc;
/* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
return rc;
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
if (cmd.valid)
return -EAGAIN;
if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
if (check_errors && cmd.errors)
return -EINVAL;
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
* cleared the VALID bit, which means we have confirmation that it has
* finished processing the command.
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
int err, rc;
err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, ops, entry, check_valident,
check_errors);
return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
* of the switch.
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
* The selection is limited by the hardware in respect to which
* configuration blocks can be read through the dynamic interface.
* @index is used to retrieve a particular table entry. If negative,
* (and if the @blk_idx supports the searching operation) a search
* is performed by the @entry parameter.
* @entry Type-casted to an unpacked structure that holds a table entry
* of the type specified in @blk_idx.
* Usually an output argument. If @index is negative, then this
* argument is used as input/output: it should be pre-populated
* with the element to search for. Entries which support the
* search operation will have an "index" field (not the @index
* argument to this function) and that is where the found index
* will be returned (or left unmodified - thus negative - if not
* found).
*/
int sja1105_dynamic_config_read(struct sja1105_private *priv,
enum sja1105_blk_idx blk_idx,
int index, void *entry)
{
const struct sja1105_dynamic_table_ops *ops;
struct sja1105_dyn_cmd cmd = {0};
/* SPI payload buffer */
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
int rc;
if (blk_idx >= BLK_IDX_MAX_DYN)
return -ERANGE;
ops = &priv->info->dyn_ops[blk_idx];
if (index >= 0 && index >= ops->max_entry_count)
return -ERANGE;
if (index < 0 && !(ops->access & OP_SEARCH))
return -EOPNOTSUPP;
if (!(ops->access & OP_READ))
return -EOPNOTSUPP;
if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
return -ERANGE;
if (!ops->cmd_packing)
return -EOPNOTSUPP;
if (!ops->entry_packing)
return -EOPNOTSUPP;
cmd.valid = true; /* Trigger action on table entry */
cmd.rdwrset = SPI_READ; /* Action is read */
if (index < 0) {
/* Avoid copying a signed negative number to an u64 */
cmd.index = 0;
cmd.search = true;
} else {
cmd.index = index;
cmd.search = false;
}
cmd.valident = true;
ops->cmd_packing(packed_buf, &cmd, PACK);
if (cmd.search)
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0)
goto out;
rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
out:
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
enum sja1105_blk_idx blk_idx,
int index, void *entry, bool keep)
{
const struct sja1105_dynamic_table_ops *ops;
struct sja1105_dyn_cmd cmd = {0};
/* SPI payload buffer */
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
int rc;
if (blk_idx >= BLK_IDX_MAX_DYN)
return -ERANGE;
ops = &priv->info->dyn_ops[blk_idx];
if (index >= ops->max_entry_count)
return -ERANGE;
if (index < 0)
return -ERANGE;
if (!(ops->access & OP_WRITE))
return -EOPNOTSUPP;
if (!keep && !(ops->access & OP_DEL))
return -EOPNOTSUPP;
if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
return -ERANGE;
cmd.valident = keep; /* If false, deletes entry */
cmd.valid = true; /* Trigger action on table entry */
cmd.rdwrset = SPI_WRITE; /* Action is write */
cmd.index = index;
if (!ops->cmd_packing)
return -EOPNOTSUPP;
ops->cmd_packing(packed_buf, &cmd, PACK);
if (!ops->entry_packing)
return -EOPNOTSUPP;
/* Don't dereference potentially NULL pointer if just
* deleting a table entry is what was requested. For cases
* where 'index' field is physically part of entry structure,
* and needed here, we deal with that in the cmd_packing callback.
*/
if (keep)
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0)
goto out;
rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
out:
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
{
int i;
for (i = 0; i < 8; i++) {
if ((crc ^ byte) & (1 << 7)) {
crc <<= 1;
crc ^= poly;
} else {
crc <<= 1;
}
byte <<= 1;
}
return crc;
}
/* CRC8 algorithm with non-reversed input, non-reversed output,
* no input xor and no output xor. Code customized for receiving
* the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial
* is also received as argument in the Koopman notation that the switch
* hardware stores it in.
*/
u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params =
priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
u64 input, poly_koopman = l2_lookup_params->poly;
/* Convert polynomial from Koopman to 'normal' notation */
u8 poly = (u8)(1 + (poly_koopman << 1));
u8 crc = 0; /* seed */
int i;
input = ((u64)vid << 48) | ether_addr_to_u64(addr);
/* Mask the eight bytes starting from MSB one at a time */
for (i = 56; i >= 0; i -= 8) {
u8 byte = (input & (0xffull << i)) >> i;
crc = sja1105_crc8_add(crc, byte, poly);
}
return crc;
}
| linux-master | drivers/net/dsa/sja1105/sja1105_dynamic_config.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/spi/spi.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/phylink.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/pcs/pcs-xpcs.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
#include "sja1105.h"
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
/* Configure the optional reset pin and bring up switch */
static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
unsigned int startup_delay)
{
struct gpio_desc *gpio;
gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (!gpio)
return 0;
gpiod_set_value_cansleep(gpio, 1);
/* Wait for minimum reset pulse length */
msleep(pulse_len);
gpiod_set_value_cansleep(gpio, 0);
/* Wait until chip is ready after reset */
msleep(startup_delay);
gpiod_put(gpio);
return 0;
}
static void
sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
int from, int to, bool allow)
{
if (allow)
l2_fwd[from].reach_port |= BIT(to);
else
l2_fwd[from].reach_port &= ~BIT(to);
}
static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
int from, int to)
{
return !!(l2_fwd[from].reach_port & BIT(to));
}
static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
{
struct sja1105_vlan_lookup_entry *vlan;
int count, i;
vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
for (i = 0; i < count; i++)
if (vlan[i].vlanid == vid)
return i;
/* Return an invalid entry index if not found */
return -1;
}
static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_mac_config_entry *mac;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
if (mac[port].drpuntag == drop)
return 0;
mac[port].drpuntag = drop;
return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
}
static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
{
struct sja1105_mac_config_entry *mac;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
if (mac[port].vlanid == pvid)
return 0;
mac[port].vlanid = pvid;
return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
}
static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct sja1105_private *priv = ds->priv;
struct sja1105_vlan_lookup_entry *vlan;
bool drop_untagged = false;
int match, rc;
u16 pvid;
if (br && br_vlan_enabled(br))
pvid = priv->bridge_pvid[port];
else
pvid = priv->tag_8021q_pvid[port];
rc = sja1105_pvid_apply(priv, port, pvid);
if (rc)
return rc;
/* Only force dropping of untagged packets when the port is under a
* VLAN-aware bridge. When the tag_8021q pvid is used, we are
* deliberately removing the RX VLAN from the port's VMEMB_PORT list,
* to prevent DSA tag spoofing from the link partner. Untagged packets
* are the only ones that should be received with tag_8021q, so
* definitely don't drop them.
*/
if (pvid == priv->bridge_pvid[port]) {
vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
match = sja1105_is_vlan_configured(priv, pvid);
if (match < 0 || !(vlan[match].vmemb_port & BIT(port)))
drop_untagged = true;
}
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
drop_untagged = true;
return sja1105_drop_untagged(ds, port, drop_untagged);
}
static int sja1105_init_mac_settings(struct sja1105_private *priv)
{
struct sja1105_mac_config_entry default_mac = {
/* Enable all 8 priority queues on egress.
* Every queue i holds top[i] - base[i] frames.
* Sum of top[i] - base[i] is 511 (max hardware limit).
*/
.top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
.base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
.enabled = {true, true, true, true, true, true, true, true},
/* Keep standard IFG of 12 bytes on egress. */
.ifg = 0,
/* Always put the MAC speed in automatic mode, where it can be
* adjusted at runtime by PHYLINK.
*/
.speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
/* No static correction for 1-step 1588 events */
.tp_delin = 0,
.tp_delout = 0,
/* Disable aging for critical TTEthernet traffic */
.maxage = 0xFF,
/* Internal VLAN (pvid) to apply to untagged ingress */
.vlanprio = 0,
.vlanid = 1,
.ing_mirr = false,
.egr_mirr = false,
/* Don't drop traffic with other EtherType than ETH_P_IP */
.drpnona664 = false,
/* Don't drop double-tagged traffic */
.drpdtag = false,
/* Don't drop untagged traffic */
.drpuntag = false,
/* Don't retag 802.1p (VID 0) traffic with the pvid */
.retag = false,
/* Disable learning and I/O on user ports by default -
* STP will enable it.
*/
.dyn_learn = false,
.egress = false,
.ingress = false,
};
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
struct dsa_port *dp;
table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
/* Discard previous MAC Configuration Table */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
mac = table->entries;
list_for_each_entry(dp, &ds->dst->ports, list) {
if (dp->ds != ds)
continue;
mac[dp->index] = default_mac;
/* Let sja1105_bridge_stp_state_set() keep address learning
* enabled for the DSA ports. CPU ports use software-assisted
* learning to ensure that only FDB entries belonging to the
* bridge are learned, and that they are learned towards all
* CPU ports in a cross-chip topology if multiple CPU ports
* exist.
*/
if (dsa_port_is_dsa(dp))
dp->learning = true;
/* Disallow untagged packets from being received on the
* CPU and DSA ports.
*/
if (dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))
mac[dp->index].drpuntag = true;
}
return 0;
}
static int sja1105_init_mii_settings(struct sja1105_private *priv)
{
struct device *dev = &priv->spidev->dev;
struct sja1105_xmii_params_entry *mii;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i;
table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
/* Discard previous xMII Mode Parameters Table */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
/* Override table based on PHYLINK DT bindings */
table->entry_count = table->ops->max_entry_count;
mii = table->entries;
for (i = 0; i < ds->num_ports; i++) {
sja1105_mii_role_t role = XMII_MAC;
if (dsa_is_unused_port(priv->ds, i))
continue;
switch (priv->phy_mode[i]) {
case PHY_INTERFACE_MODE_INTERNAL:
if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
goto unsupported;
mii->xmii_mode[i] = XMII_MODE_MII;
if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
mii->special[i] = true;
break;
case PHY_INTERFACE_MODE_REVMII:
role = XMII_PHY;
fallthrough;
case PHY_INTERFACE_MODE_MII:
if (!priv->info->supports_mii[i])
goto unsupported;
mii->xmii_mode[i] = XMII_MODE_MII;
break;
case PHY_INTERFACE_MODE_REVRMII:
role = XMII_PHY;
fallthrough;
case PHY_INTERFACE_MODE_RMII:
if (!priv->info->supports_rmii[i])
goto unsupported;
mii->xmii_mode[i] = XMII_MODE_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
if (!priv->info->supports_rgmii[i])
goto unsupported;
mii->xmii_mode[i] = XMII_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_SGMII:
if (!priv->info->supports_sgmii[i])
goto unsupported;
mii->xmii_mode[i] = XMII_MODE_SGMII;
mii->special[i] = true;
break;
case PHY_INTERFACE_MODE_2500BASEX:
if (!priv->info->supports_2500basex[i])
goto unsupported;
mii->xmii_mode[i] = XMII_MODE_SGMII;
mii->special[i] = true;
break;
unsupported:
default:
dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
phy_modes(priv->phy_mode[i]), i);
return -EINVAL;
}
mii->phy_mac[i] = role;
}
return 0;
}
static int sja1105_init_static_fdb(struct sja1105_private *priv)
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int port;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
/* We only populate the FDB table through dynamic L2 Address Lookup
* entries, except for a special entry at the end which is a catch-all
* for unknown multicast and will be used to control flooding domain.
*/
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
if (!priv->info->can_limit_mcast_flood)
return 0;
table->entries = kcalloc(1, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = 1;
l2_lookup = table->entries;
/* All L2 multicast addresses have an odd first octet */
l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
l2_lookup[0].lockeds = true;
l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
/* Flood multicast to every port by default */
for (port = 0; port < priv->ds->num_ports; port++)
if (!dsa_is_unused_port(priv->ds, port))
l2_lookup[0].destports |= BIT(port);
return 0;
}
static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
{
struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
/* Learned FDB entries are forgotten after 300 seconds */
.maxage = SJA1105_AGEING_TIME_MS(300000),
/* All entries within a FDB bin are available for learning */
.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
/* And the P/Q/R/S equivalent setting: */
.start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
/* Always use Independent VLAN Learning (IVL) */
.shared_learn = false,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
*/
.no_enf_hostprt = false,
/* Don't learn SMAC for mac_fltres1 and mac_fltres0.
* Maybe correlate with no_linklocal_learn from bridge driver?
*/
.no_mgmt_learn = true,
/* P/Q/R/S only */
.use_static = true,
/* Dynamically learned FDB entries can overwrite other (older)
* dynamic FDB entries
*/
.owr_dyn = true,
.drpnolearn = true,
};
struct dsa_switch *ds = priv->ds;
int port, num_used_ports = 0;
struct sja1105_table *table;
u64 max_fdb_entries;
for (port = 0; port < ds->num_ports; port++)
if (!dsa_is_unused_port(ds, port))
num_used_ports++;
max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
}
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
/* This table only has a single entry */
((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
default_l2_lookup_params;
return 0;
}
/* Set up a default VLAN for untagged traffic injected from the CPU
* using management routes (e.g. STP, PTP) as opposed to tag_8021q.
* All DT-defined ports are members of this VLAN, and there are no
* restrictions on forwarding (since the CPU selects the destination).
* Frames from this VLAN will always be transmitted as untagged, and
* neither the bridge nor the 8021q module cannot create this VLAN ID.
*/
static int sja1105_init_static_vlan(struct sja1105_private *priv)
{
struct sja1105_table *table;
struct sja1105_vlan_lookup_entry pvid = {
.type_entry = SJA1110_VLAN_D_TAG,
.ving_mirr = 0,
.vegr_mirr = 0,
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
.vlanid = SJA1105_DEFAULT_VLAN,
};
struct dsa_switch *ds = priv->ds;
int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kzalloc(table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = 1;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
pvid.vmemb_port |= BIT(port);
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN;
priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN;
}
}
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
return 0;
}
static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_entry *l2fwd;
struct dsa_switch *ds = priv->ds;
struct dsa_switch_tree *dst;
struct sja1105_table *table;
struct dsa_link *dl;
int port, tc;
int from, to;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
l2fwd = table->entries;
/* First 5 entries in the L2 Forwarding Table define the forwarding
* rules and the VLAN PCP to ingress queue mapping.
* Set up the ingress queue mapping first.
*/
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
for (tc = 0; tc < SJA1105_NUM_TC; tc++)
l2fwd[port].vlan_pmap[tc] = tc;
}
/* Then manage the forwarding domain for user ports. These can forward
* only to the always-on domain (CPU port and DSA links)
*/
for (from = 0; from < ds->num_ports; from++) {
if (!dsa_is_user_port(ds, from))
continue;
for (to = 0; to < ds->num_ports; to++) {
if (!dsa_is_cpu_port(ds, to) &&
!dsa_is_dsa_port(ds, to))
continue;
l2fwd[from].bc_domain |= BIT(to);
l2fwd[from].fl_domain |= BIT(to);
sja1105_port_allow_traffic(l2fwd, from, to, true);
}
}
/* Then manage the forwarding domain for DSA links and CPU ports (the
* always-on domain). These can send packets to any enabled port except
* themselves.
*/
for (from = 0; from < ds->num_ports; from++) {
if (!dsa_is_cpu_port(ds, from) && !dsa_is_dsa_port(ds, from))
continue;
for (to = 0; to < ds->num_ports; to++) {
if (dsa_is_unused_port(ds, to))
continue;
if (from == to)
continue;
l2fwd[from].bc_domain |= BIT(to);
l2fwd[from].fl_domain |= BIT(to);
sja1105_port_allow_traffic(l2fwd, from, to, true);
}
}
/* In odd topologies ("H" connections where there is a DSA link to
* another switch which also has its own CPU port), TX packets can loop
* back into the system (they are flooded from CPU port 1 to the DSA
* link, and from there to CPU port 2). Prevent this from happening by
* cutting RX from DSA links towards our CPU port, if the remote switch
* has its own CPU port and therefore doesn't need ours for network
* stack termination.
*/
dst = ds->dst;
list_for_each_entry(dl, &dst->rtable, list) {
if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp)
continue;
from = dl->dp->index;
to = dsa_upstream_port(ds, from);
dev_warn(ds->dev,
"H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n",
from, to);
sja1105_port_allow_traffic(l2fwd, from, to, false);
l2fwd[from].bc_domain &= ~BIT(to);
l2fwd[from].fl_domain &= ~BIT(to);
}
/* Finally, manage the egress flooding domain. All ports start up with
* flooding enabled, including the CPU port and DSA links.
*/
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
priv->ucast_egress_floods |= BIT(port);
priv->bcast_egress_floods |= BIT(port);
}
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
*/
for (tc = 0; tc < SJA1105_NUM_TC; tc++) {
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc;
}
l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true;
}
return 0;
}
static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
{
struct sja1110_pcp_remapping_entry *pcp_remap;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int port, tc;
table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
/* Nothing to do for SJA1105 */
if (!table->ops->max_entry_count)
return 0;
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
pcp_remap = table->entries;
/* Repeat the configuration done for vlan_pmap */
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
for (tc = 0; tc < SJA1105_NUM_TC; tc++)
pcp_remap[port].egrpcp[tc] = tc;
}
return 0;
}
static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_params_entry *l2fwd_params;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
/* This table only has a single entry */
l2fwd_params = table->entries;
/* Disallow dynamic reconfiguration of vlan_pmap */
l2fwd_params->max_dynp = 0;
/* Use a single memory partition for all ingress queues */
l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
return 0;
}
void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
l2_fwd_params = table->entries;
l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY;
/* If we have any critical-traffic virtual links, we need to reserve
* some frame buffer memory for them. At the moment, hardcode the value
* at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
* remaining for best-effort traffic. TODO: figure out a more flexible
* way to perform the frame buffer partitioning.
*/
if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
return;
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
vl_fwd_params = table->entries;
l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
}
/* SJA1110 TDMACONFIGIDX values:
*
* | 100 Mbps ports | 1Gbps ports | 2.5Gbps ports | Disabled ports
* -----+----------------+---------------+---------------+---------------
* 0 | 0, [5:10] | [1:2] | [3:4] | retag
* 1 |0, [5:10], retag| [1:2] | [3:4] | -
* 2 | 0, [5:10] | [1:3], retag | 4 | -
* 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
* 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
* 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
* 14 | 0, [5:10] | [1:4], retag | - | -
* 15 | [5:10] | [0:4], retag | - | -
*/
static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
{
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
bool port_1_is_base_tx;
bool port_3_is_2500;
bool port_4_is_2500;
u64 tdmaconfigidx;
if (priv->info->device_id != SJA1110_DEVICE_ID)
return;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* All the settings below are "as opposed to SGMII", which is the
* other pinmuxing option.
*/
port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
if (port_1_is_base_tx)
/* Retagging port will operate at 1 Gbps */
tdmaconfigidx = 5;
else if (port_3_is_2500 && port_4_is_2500)
/* Retagging port will operate at 100 Mbps */
tdmaconfigidx = 1;
else if (port_3_is_2500)
/* Retagging port will operate at 1 Gbps */
tdmaconfigidx = 3;
else if (port_4_is_2500)
/* Retagging port will operate at 1 Gbps */
tdmaconfigidx = 2;
else
/* Retagging port will operate at 1 Gbps */
tdmaconfigidx = 14;
general_params->tdmaconfigidx = tdmaconfigidx;
}
static int sja1105_init_topology(struct sja1105_private *priv,
struct sja1105_general_params_entry *general_params)
{
struct dsa_switch *ds = priv->ds;
int port;
/* The host port is the destination for traffic matching mac_fltres1
* and mac_fltres0 on all ports except itself. Default to an invalid
* value.
*/
general_params->host_port = ds->num_ports;
/* Link-local traffic received on casc_port will be forwarded
* to host_port without embedding the source port and device ID
* info in the destination MAC address, and no RX timestamps will be
* taken either (presumably because it is a cascaded port and a
* downstream SJA switch already did that).
* To disable the feature, we need to do different things depending on
* switch generation. On SJA1105 we need to set an invalid port, while
* on SJA1110 which support multiple cascaded ports, this field is a
* bitmask so it must be left zero.
*/
if (!priv->info->multiple_cascade_ports)
general_params->casc_port = ds->num_ports;
for (port = 0; port < ds->num_ports; port++) {
bool is_upstream = dsa_is_upstream_port(ds, port);
bool is_dsa_link = dsa_is_dsa_port(ds, port);
/* Upstream ports can be dedicated CPU ports or
* upstream-facing DSA links
*/
if (is_upstream) {
if (general_params->host_port == ds->num_ports) {
general_params->host_port = port;
} else {
dev_err(ds->dev,
"Port %llu is already a host port, configuring %d as one too is not supported\n",
general_params->host_port, port);
return -EINVAL;
}
}
/* Cascade ports are downstream-facing DSA links */
if (is_dsa_link && !is_upstream) {
if (priv->info->multiple_cascade_ports) {
general_params->casc_port |= BIT(port);
} else if (general_params->casc_port == ds->num_ports) {
general_params->casc_port = port;
} else {
dev_err(ds->dev,
"Port %llu is already a cascade port, configuring %d as one too is not supported\n",
general_params->casc_port, port);
return -EINVAL;
}
}
}
if (general_params->host_port == ds->num_ports) {
dev_err(ds->dev, "No host port configured\n");
return -EINVAL;
}
return 0;
}
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
/* Allow dynamic changing of the mirror port */
.mirr_ptacu = true,
.switchid = priv->ds->index,
/* Priority queue for link-local management frames
* (both ingress to and egress from CPU - PTP, STP etc)
*/
.hostprio = 7,
.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
.mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
.incl_srcpt1 = true,
.send_meta1 = true,
.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
.mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
.incl_srcpt0 = true,
.send_meta0 = true,
/* Default to an invalid value */
.mirr_port = priv->ds->num_ports,
/* No TTEthernet */
.vllupformat = SJA1105_VL_FORMAT_PSFP,
.vlmarker = 0,
.vlmask = 0,
/* Only update correctionField for 1-step PTP (L2 transport) */
.ignore2stf = 0,
/* Forcefully disable VLAN filtering by telling
* the switch that VLAN has a different EtherType.
*/
.tpid = ETH_P_SJA1105,
.tpid2 = ETH_P_SJA1105,
/* Enable the TTEthernet engine on SJA1110 */
.tte_en = true,
/* Set up the EtherType for control packets on SJA1110 */
.header_type = ETH_P_SJA1110,
};
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
int rc;
rc = sja1105_init_topology(priv, &default_general_params);
if (rc)
return rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
general_params = table->entries;
/* This table only has a single entry */
general_params[0] = default_general_params;
sja1110_select_tdmaconfigidx(priv);
return 0;
}
static int sja1105_init_avb_params(struct sja1105_private *priv)
{
struct sja1105_avb_params_entry *avb;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
/* Discard previous AVB Parameters Table */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
avb = table->entries;
/* Configure the MAC addresses for meta frames */
avb->destmeta = SJA1105_META_DMAC;
avb->srcmeta = SJA1105_META_SMAC;
/* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
* default. This is because there might be boards with a hardware
* layout where enabling the pin as output might cause an electrical
* clash. On E/T the pin is always an output, which the board designers
* probably already knew, so even if there are going to be electrical
* issues, there's nothing we can do.
*/
avb->cas_master = false;
return 0;
}
/* The L2 policing table is 2-stage. The table is looked up for each frame
* according to the ingress port, whether it was broadcast or not, and the
* classified traffic class (given by VLAN PCP). This portion of the lookup is
* fixed, and gives access to the SHARINDX, an indirection register pointing
* within the policing table itself, which is used to resolve the policer that
* will be used for this frame.
*
* Stage 1 Stage 2
* +------------+--------+ +---------------------------------+
* |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* ... | Policer 2: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* ... | Policer 5: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* ... | Policer 7: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
* |Port 4 TC 7 |SHARINDX| ...
* +------------+--------+
* |Port 0 BCAST|SHARINDX| ...
* +------------+--------+
* |Port 1 BCAST|SHARINDX| ...
* +------------+--------+
* ... ...
* +------------+--------+ +---------------------------------+
* |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
* +------------+--------+ +---------------------------------+
*
* In this driver, we shall use policers 0-4 as statically alocated port
* (matchall) policers. So we need to make the SHARINDX for all lookups
* corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
* lookup) equal.
* The remaining policers (40) shall be dynamically allocated for flower
* policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
*/
#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
static int sja1105_init_l2_policing(struct sja1105_private *priv)
{
struct sja1105_l2_policing_entry *policing;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int port, tc;
table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
/* Discard previous L2 Policing Table */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = table->ops->max_entry_count;
policing = table->entries;
/* Setup shared indices for the matchall policers */
for (port = 0; port < ds->num_ports; port++) {
int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
for (tc = 0; tc < SJA1105_NUM_TC; tc++)
policing[port * SJA1105_NUM_TC + tc].sharindx = port;
policing[bcast].sharindx = port;
/* Only SJA1110 has multicast policers */
if (mcast < table->ops->max_entry_count)
policing[mcast].sharindx = port;
}
/* Setup the matchall policer parameters */
for (port = 0; port < ds->num_ports; port++) {
int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
mtu += VLAN_HLEN;
policing[port].smax = 65535; /* Burst size in bytes */
policing[port].rate = SJA1105_RATE_MBPS(1000);
policing[port].maxlen = mtu;
policing[port].partition = 0;
}
return 0;
}
static int sja1105_static_config_load(struct sja1105_private *priv)
{
int rc;
sja1105_static_config_free(&priv->static_config);
rc = sja1105_static_config_init(&priv->static_config,
priv->info->static_ops,
priv->info->device_id);
if (rc)
return rc;
/* Build static configuration */
rc = sja1105_init_mac_settings(priv);
if (rc < 0)
return rc;
rc = sja1105_init_mii_settings(priv);
if (rc < 0)
return rc;
rc = sja1105_init_static_fdb(priv);
if (rc < 0)
return rc;
rc = sja1105_init_static_vlan(priv);
if (rc < 0)
return rc;
rc = sja1105_init_l2_lookup_params(priv);
if (rc < 0)
return rc;
rc = sja1105_init_l2_forwarding(priv);
if (rc < 0)
return rc;
rc = sja1105_init_l2_forwarding_params(priv);
if (rc < 0)
return rc;
rc = sja1105_init_l2_policing(priv);
if (rc < 0)
return rc;
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
rc = sja1105_init_avb_params(priv);
if (rc < 0)
return rc;
rc = sja1110_init_pcp_remapping(priv);
if (rc < 0)
return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
}
/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
* based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
* properties. It has the advantage of working with fixed links and with PHYs
* that apply RGMII delays too, and the MAC driver needs not perform any
* special checks.
*
* Previously we were acting upon the "phy-mode" property when we were
* operating in fixed-link, basically acting as a PHY, but with a reversed
* interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
* behave as if it is connected to a PHY which has applied RGMII delays in the
* TX direction. So if anything, RX delays should have been added by the MAC,
* but we were adding TX delays.
*
* If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
* back to the legacy behavior and apply delays on fixed-link ports based on
* the reverse interpretation of the phy-mode. This is a deviation from the
* expected default behavior which is to simply apply no delays. To achieve
* that behavior with the new bindings, it is mandatory to specify
* "{rx,tx}-internal-delay-ps" with a value of 0.
*/
static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
struct device_node *port_dn)
{
phy_interface_t phy_mode = priv->phy_mode[port];
struct device *dev = &priv->spidev->dev;
int rx_delay = -1, tx_delay = -1;
if (!phy_interface_mode_is_rgmii(phy_mode))
return 0;
of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
dev_warn(dev,
"Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
"please update device tree to specify \"rx-internal-delay-ps\" and "
"\"tx-internal-delay-ps\"",
port);
if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
rx_delay = 2000;
if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
tx_delay = 2000;
}
if (rx_delay < 0)
rx_delay = 0;
if (tx_delay < 0)
tx_delay = 0;
if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
dev_err(dev, "Chip cannot apply RGMII delays\n");
return -EINVAL;
}
if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
(tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
(rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
(tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
dev_err(dev,
"port %d RGMII delay values out of range, must be between %d and %d ps\n",
port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
return -ERANGE;
}
priv->rgmii_rx_delay_ps[port] = rx_delay;
priv->rgmii_tx_delay_ps[port] = tx_delay;
return 0;
}
static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
struct device_node *child;
for_each_available_child_of_node(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
int err;
/* Get switch port number from DT */
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
of_node_put(child);
return -ENODEV;
}
/* Get PHY mode from DT */
err = of_get_phy_mode(child, &phy_mode);
if (err) {
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
of_node_put(child);
return -ENODEV;
}
phy_node = of_parse_phandle(child, "phy-handle", 0);
if (!phy_node) {
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
* So it's a fixed link. Default to PHY role.
*/
priv->fixed_link[index] = true;
} else {
of_node_put(phy_node);
}
priv->phy_mode[index] = phy_mode;
err = sja1105_parse_rgmii_delays(priv, index, child);
if (err) {
of_node_put(child);
return err;
}
}
return 0;
}
static int sja1105_parse_dt(struct sja1105_private *priv)
{
struct device *dev = &priv->spidev->dev;
struct device_node *switch_node = dev->of_node;
struct device_node *ports_node;
int rc;
ports_node = of_get_child_by_name(switch_node, "ports");
if (!ports_node)
ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
return -ENODEV;
}
rc = sja1105_parse_ports_node(priv, ports_node);
of_node_put(ports_node);
return rc;
}
/* Convert link speed from SJA1105 to ethtool encoding */
static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
u64 speed)
{
if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
return SPEED_10;
if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
return SPEED_100;
if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
return SPEED_1000;
if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
return SPEED_2500;
return SPEED_UNKNOWN;
}
/* Set link speed in the MAC configuration for a specific port. */
static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
int speed_mbps)
{
struct sja1105_mac_config_entry *mac;
struct device *dev = priv->ds->dev;
u64 speed;
int rc;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.
* We have to *know* what the MAC looks like. For the sake of keeping
* the code common, we'll use the static configuration tables as a
* reasonable approximation for both E/T and P/Q/R/S.
*/
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
switch (speed_mbps) {
case SPEED_UNKNOWN:
/* PHYLINK called sja1105_mac_config() to inform us about
* the state->interface, but AN has not completed and the
* speed is not yet valid. UM10944.pdf says that setting
* SJA1105_SPEED_AUTO at runtime disables the port, so that is
* ok for power consumption in case AN will never complete -
* otherwise PHYLINK should come back with a new update.
*/
speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
break;
case SPEED_10:
speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
break;
case SPEED_100:
speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
break;
case SPEED_1000:
speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
break;
case SPEED_2500:
speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
break;
default:
dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
return -EINVAL;
}
/* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
* Actually for the SGMII port, the MAC is fixed at 1 Gbps and
* we need to configure the PCS only (if even that).
*/
if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
else
mac[port].speed = speed;
/* Write to the dynamic reconfiguration tables */
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
if (rc < 0) {
dev_err(dev, "Failed to write MAC config: %d\n", rc);
return rc;
}
/* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
* gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
* RMII no change of the clock setup is required. Actually, changing
* the clock setup does interrupt the clock signal for a certain time
* which causes trouble for all PHYs relying on this signal.
*/
if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
return 0;
return sja1105_clocking_setup_port(priv, port);
}
static struct phylink_pcs *
sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
{
struct sja1105_private *priv = ds->priv;
struct dw_xpcs *xpcs = priv->xpcs[port];
if (xpcs)
return &xpcs->pcs;
return NULL;
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
sja1105_inhibit_tx(ds->priv, BIT(port), true);
}
static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct sja1105_private *priv = ds->priv;
sja1105_adjust_port_config(priv, port, speed);
sja1105_inhibit_tx(priv, BIT(port), false);
}
static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_xmii_params_entry *mii;
phy_interface_t phy_mode;
phy_mode = priv->phy_mode[port];
if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
/* Changing the PHY mode on SERDES ports is possible and makes
* sense, because that is done through the XPCS. We allow
* changes between SGMII and 2500base-X.
*/
if (priv->info->supports_sgmii[port])
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
if (priv->info->supports_2500basex[port])
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
config->supported_interfaces);
} else {
/* The SJA1105 MAC programming model is through the static
* config (the xMII Mode table cannot be dynamically
* reconfigured), and we have to program that early.
*/
__set_bit(phy_mode, config->supported_interfaces);
}
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
config->mac_capabilities = MAC_10FD | MAC_100FD;
mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
config->mac_capabilities |= MAC_1000FD;
if (priv->info->supports_2500basex[port])
config->mac_capabilities |= MAC_2500FD;
}
static int
sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
const struct sja1105_l2_lookup_entry *requested)
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int i;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
for (i = 0; i < table->entry_count; i++)
if (l2_lookup[i].macaddr == requested->macaddr &&
l2_lookup[i].vlanid == requested->vlanid &&
l2_lookup[i].destports & BIT(port))
return i;
return -1;
}
/* We want FDB entries added statically through the bridge command to persist
* across switch resets, which are a common thing during normal SJA1105
* operation. So we have to back them up in the static configuration tables
* and hence apply them on next static config upload... yay!
*/
static int
sja1105_static_fdb_change(struct sja1105_private *priv, int port,
const struct sja1105_l2_lookup_entry *requested,
bool keep)
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int rc, match;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
match = sja1105_find_static_fdb_entry(priv, port, requested);
if (match < 0) {
/* Can't delete a missing entry. */
if (!keep)
return 0;
/* No match => new entry */
rc = sja1105_table_resize(table, table->entry_count + 1);
if (rc)
return rc;
match = table->entry_count - 1;
}
/* Assign pointer after the resize (it may be new memory) */
l2_lookup = table->entries;
/* We have a match.
* If the job was to add this FDB entry, it's already done (mostly
* anyway, since the port forwarding mask may have changed, case in
* which we update it).
* Otherwise we have to delete it.
*/
if (keep) {
l2_lookup[match] = *requested;
return 0;
}
/* To remove, the strategy is to overwrite the element with
* the last one, and then reduce the array size by 1
*/
l2_lookup[match] = l2_lookup[table->entry_count - 1];
return sja1105_table_resize(table, table->entry_count - 1);
}
/* First-generation switches have a 4-way set associative TCAM that
* holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
* a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
* For the placement of a newly learnt FDB entry, the switch selects the bin
* based on a hash function, and the way within that bin incrementally.
*/
static int sja1105et_fdb_index(int bin, int way)
{
return bin * SJA1105ET_FDB_BIN_SIZE + way;
}
static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
const u8 *addr, u16 vid,
struct sja1105_l2_lookup_entry *match,
int *last_unused)
{
int way;
for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
int index = sja1105et_fdb_index(bin, way);
/* Skip unused entries, optionally marking them
* into the return value
*/
if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
index, &l2_lookup)) {
if (last_unused)
*last_unused = way;
continue;
}
if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
l2_lookup.vlanid == vid) {
if (match)
*match = l2_lookup;
return way;
}
}
/* Return an invalid entry index if not found */
return -1;
}
int sja1105et_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int last_unused = -1;
int start, end, i;
int bin, way, rc;
bin = sja1105et_fdb_hash(priv, addr, vid);
way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
&l2_lookup, &last_unused);
if (way >= 0) {
/* We have an FDB entry. Is our port in the destination
* mask? If yes, we need to do nothing. If not, we need
* to rewrite the entry by adding this port to it.
*/
if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
return 0;
l2_lookup.destports |= BIT(port);
} else {
int index = sja1105et_fdb_index(bin, way);
/* We don't have an FDB entry. We construct a new one and
* try to find a place for it within the FDB table.
*/
l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.destports = BIT(port);
l2_lookup.vlanid = vid;
if (last_unused >= 0) {
way = last_unused;
} else {
/* Bin is full, need to evict somebody.
* Choose victim at random. If you get these messages
* often, you may need to consider changing the
* distribution function:
* static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
*/
get_random_bytes(&way, sizeof(u8));
way %= SJA1105ET_FDB_BIN_SIZE;
dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
bin, addr, way);
/* Evict entry */
sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
index, NULL, false);
}
}
l2_lookup.lockeds = true;
l2_lookup.index = sja1105et_fdb_index(bin, way);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup.index, &l2_lookup,
true);
if (rc < 0)
return rc;
/* Invalidate a dynamically learned entry if that exists */
start = sja1105et_fdb_index(bin, 0);
end = sja1105et_fdb_index(bin, way);
for (i = start; i < end; i++) {
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, &tmp);
if (rc == -ENOENT)
continue;
if (rc)
return rc;
if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
continue;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
i, NULL, false);
if (rc)
return rc;
break;
}
return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
}
int sja1105et_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0};
struct sja1105_private *priv = ds->priv;
int index, bin, way, rc;
bool keep;
bin = sja1105et_fdb_hash(priv, addr, vid);
way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
&l2_lookup, NULL);
if (way < 0)
return 0;
index = sja1105et_fdb_index(bin, way);
/* We have an FDB entry. Is our port in the destination mask? If yes,
* we need to remove it. If the resulting port mask becomes empty, we
* need to completely evict the FDB entry.
* Otherwise we just write it back.
*/
l2_lookup.destports &= ~BIT(port);
if (l2_lookup.destports)
keep = true;
else
keep = false;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
index, &l2_lookup, keep);
if (rc < 0)
return rc;
return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
}
int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
struct sja1105_private *priv = ds->priv;
int rc, i;
/* Search for an existing entry in the FDB table */
l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.vlanid = vid;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.destports = BIT(port);
tmp = l2_lookup;
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
SJA1105_SEARCH, &tmp);
if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
/* Found a static entry and this port is already in the entry's
* port mask => job done
*/
if ((tmp.destports & BIT(port)) && tmp.lockeds)
return 0;
l2_lookup = tmp;
/* l2_lookup.index is populated by the switch in case it
* found something.
*/
l2_lookup.destports |= BIT(port);
goto skip_finding_an_index;
}
/* Not found, so try to find an unused spot in the FDB.
* This is slightly inefficient because the strategy is knock-knock at
* every possible position from 0 to 1023.
*/
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, NULL);
if (rc < 0)
break;
}
if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
dev_err(ds->dev, "FDB is full, cannot add entry.\n");
return -EINVAL;
}
l2_lookup.index = i;
skip_finding_an_index:
l2_lookup.lockeds = true;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup.index, &l2_lookup,
true);
if (rc < 0)
return rc;
/* The switch learns dynamic entries and looks up the FDB left to
* right. It is possible that our addition was concurrent with the
* dynamic learning of the same address, so now that the static entry
* has been installed, we are certain that address learning for this
* particular address has been turned off, so the dynamic entry either
* is in the FDB at an index smaller than the static one, or isn't (it
* can also be at a larger index, but in that case it is inactive
* because the static FDB entry will match first, and the dynamic one
* will eventually age out). Search for a dynamically learned address
* prior to our static one and invalidate it.
*/
tmp = l2_lookup;
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
SJA1105_SEARCH, &tmp);
if (rc < 0) {
dev_err(ds->dev,
"port %d failed to read back entry for %pM vid %d: %pe\n",
port, addr, vid, ERR_PTR(rc));
return rc;
}
if (tmp.index < l2_lookup.index) {
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
tmp.index, NULL, false);
if (rc < 0)
return rc;
}
return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
}
int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0};
struct sja1105_private *priv = ds->priv;
bool keep;
int rc;
l2_lookup.macaddr = ether_addr_to_u64(addr);
l2_lookup.vlanid = vid;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
SJA1105_SEARCH, &l2_lookup);
if (rc < 0)
return 0;
l2_lookup.destports &= ~BIT(port);
/* Decide whether we remove just this port from the FDB entry,
* or if we remove it completely.
*/
if (l2_lookup.destports)
keep = true;
else
keep = false;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup.index, &l2_lookup, keep);
if (rc < 0)
return rc;
return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
}
static int sja1105_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (!vid) {
switch (db.type) {
case DSA_DB_PORT:
vid = dsa_tag_8021q_standalone_vid(db.dp);
break;
case DSA_DB_BRIDGE:
vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
break;
default:
return -EOPNOTSUPP;
}
}
mutex_lock(&priv->fdb_lock);
rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
if (!vid) {
switch (db.type) {
case DSA_DB_PORT:
vid = dsa_tag_8021q_standalone_vid(db.dp);
break;
case DSA_DB_BRIDGE:
vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
break;
default:
return -EOPNOTSUPP;
}
}
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
mutex_lock(&priv->fdb_lock);
rc = __sja1105_fdb_del(ds, port, addr, vid, db);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
int rc;
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, &l2_lookup);
/* No fdb entry at i, not an issue */
if (rc == -ENOENT)
continue;
if (rc) {
dev_err(dev, "Failed to dump FDB: %d\n", rc);
return rc;
}
/* FDB dump callback is per port. This means we have to
* disregard a valid entry if it's not for this port, even if
* only to revisit it later. This is inefficient because the
* 1024-sized FDB table needs to be traversed 4 times through
* SPI during a 'bridge fdb show' command.
*/
if (!(l2_lookup.destports & BIT(port)))
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
* only wants to see unicast
*/
if (is_multicast_ether_addr(macaddr))
continue;
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
return rc;
}
return 0;
}
static void sja1105_fast_age(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = {
.dev = dsa_port_bridge_dev_get(dp),
.num = dsa_port_bridge_num_get(dp),
},
};
int i;
mutex_lock(&priv->fdb_lock);
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
int rc;
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, &l2_lookup);
/* No fdb entry at i, not an issue */
if (rc == -ENOENT)
continue;
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
break;
}
if (!(l2_lookup.destports & BIT(port)))
continue;
/* Don't delete static FDB entries */
if (l2_lookup.lockeds)
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
break;
}
}
mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
}
/* Common function for unicast and broadcast flood configuration.
* Flooding is configured between each {ingress, egress} port pair, and since
* the bridge's semantics are those of "egress flooding", it means we must
* enable flooding towards this port from all ingress ports that are in the
* same forwarding domain.
*/
static int sja1105_manage_flood_domains(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_entry *l2_fwd;
struct dsa_switch *ds = priv->ds;
int from, to, rc;
l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
for (from = 0; from < ds->num_ports; from++) {
u64 fl_domain = 0, bc_domain = 0;
for (to = 0; to < priv->ds->num_ports; to++) {
if (!sja1105_can_forward(l2_fwd, from, to))
continue;
if (priv->ucast_egress_floods & BIT(to))
fl_domain |= BIT(to);
if (priv->bcast_egress_floods & BIT(to))
bc_domain |= BIT(to);
}
/* Nothing changed, nothing to do */
if (l2_fwd[from].fl_domain == fl_domain &&
l2_fwd[from].bc_domain == bc_domain)
continue;
l2_fwd[from].fl_domain = fl_domain;
l2_fwd[from].bc_domain = bc_domain;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
from, &l2_fwd[from], true);
if (rc < 0)
return rc;
}
return 0;
}
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool member)
{
struct sja1105_l2_forwarding_entry *l2_fwd;
struct sja1105_private *priv = ds->priv;
int i, rc;
l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
for (i = 0; i < ds->num_ports; i++) {
/* Add this port to the forwarding matrix of the
* other ports in the same bridge, and viceversa.
*/
if (!dsa_is_user_port(ds, i))
continue;
/* For the ports already under the bridge, only one thing needs
* to be done, and that is to add this port to their
* reachability domain. So we can perform the SPI write for
* them immediately. However, for this port itself (the one
* that is new to the bridge), we need to add all other ports
* to its reachability domain. So we do that incrementally in
* this loop, and perform the SPI write only at the end, once
* the domain contains all other bridge ports.
*/
if (i == port)
continue;
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
sja1105_port_allow_traffic(l2_fwd, i, port, member);
sja1105_port_allow_traffic(l2_fwd, port, i, member);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
i, &l2_fwd[i], true);
if (rc < 0)
return rc;
}
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
port, &l2_fwd[port], true);
if (rc)
return rc;
rc = sja1105_commit_pvid(ds, port);
if (rc)
return rc;
return sja1105_manage_flood_domains(priv);
}
static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct sja1105_mac_config_entry *mac;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
* At the moment no difference between DISABLED and BLOCKING.
*/
mac[port].ingress = false;
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
case BR_STATE_LISTENING:
mac[port].ingress = true;
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
mac[port].dyn_learn = dp->learning;
break;
case BR_STATE_FORWARDING:
mac[port].ingress = true;
mac[port].egress = true;
mac[port].dyn_learn = dp->learning;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
return;
}
sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
}
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
int rc;
rc = sja1105_bridge_member(ds, port, bridge, true);
if (rc)
return rc;
rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
}
*tx_fwd_offload = true;
return 0;
}
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
dsa_tag_8021q_bridge_leave(ds, port, bridge);
sja1105_bridge_member(ds, port, bridge, false);
}
#define BYTES_PER_KBIT (1000LL / 8)
/* Port 0 (the uC port) does not have CBS shapers */
#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
int port, int prio)
{
int i;
if (priv->info->fixed_cbs_mapping) {
i = SJA1110_FIXED_CBS(port, prio);
if (i >= 0 && i < priv->info->num_cbs_shapers)
return i;
return -1;
}
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
return i;
return -1;
}
static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;
if (priv->info->fixed_cbs_mapping)
return -1;
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
return -1;
}
static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
int prio)
{
int i;
for (i = 0; i < priv->info->num_cbs_shapers; i++) {
struct sja1105_cbs_entry *cbs = &priv->cbs[i];
if (cbs->port == port && cbs->prio == prio) {
memset(cbs, 0, sizeof(*cbs));
return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
i, cbs, true);
}
}
return 0;
}
static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *offload)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
s64 port_transmit_rate_kbps;
int index;
if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);
/* The user may be replacing an existing shaper */
index = sja1105_find_cbs_shaper(priv, port, offload->queue);
if (index < 0) {
/* That isn't the case - see if we can allocate a new one */
index = sja1105_find_unused_cbs_shaper(priv);
if (index < 0)
return -ENOSPC;
}
cbs = &priv->cbs[index];
cbs->port = port;
cbs->prio = offload->queue;
/* locredit and sendslope are negative by definition. In hardware,
* positive values must be provided, and the negative sign is implicit.
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
/* User space is in kbits/sec, while the hardware in bytes/sec times
* link speed. Since the given offload->sendslope is good only for the
* current link speed anyway, and user space is likely to reprogram it
* when that changes, don't even bother to track the port's link speed,
* but deduce the port transmit rate from idleslope - sendslope.
*/
port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
port_transmit_rate_kbps);
cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
*/
cbs->credit_lo &= GENMASK_ULL(31, 0);
cbs->send_slope &= GENMASK_ULL(31, 0);
return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
true);
}
static int sja1105_reload_cbs(struct sja1105_private *priv)
{
int rc = 0, i;
/* The credit based shapers are only allocated if
* CONFIG_NET_SCH_CBS is enabled.
*/
if (!priv->cbs)
return 0;
for (i = 0; i < priv->info->num_cbs_shapers; i++) {
struct sja1105_cbs_entry *cbs = &priv->cbs[i];
if (!cbs->idle_slope && !cbs->send_slope)
continue;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
true);
if (rc)
break;
}
return rc;
}
static const char * const sja1105_reset_reasons[] = {
[SJA1105_VLAN_FILTERING] = "VLAN filtering",
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
[SJA1105_VIRTUAL_LINKS] = "Virtual links",
};
/* For situations where we need to change a setting at runtime that is only
* available through the static configuration, resetting the switch in order
* to upload the new static config is unavoidable. Back up the settings we
* modify at runtime (currently only MAC) and restore them after uploading,
* such that this operation is relatively seamless.
*/
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason)
{
struct ptp_system_timestamp ptp_sts_before;
struct ptp_system_timestamp ptp_sts_after;
int speed_mbps[SJA1105_MAX_NUM_PORTS];
u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
s64 t12, t34;
int rc, i;
s64 now;
mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
/* Back up the dynamic link speed changed by sja1105_adjust_port_config
* in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
* switch wants to see in the static config in order to allow us to
* change it through the dynamic interface later.
*/
for (i = 0; i < ds->num_ports; i++) {
speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
mac[i].speed);
mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
if (priv->xpcs[i])
bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
MDIO_MMD_VEND2, MDIO_CTRL1);
}
/* No PTP operations can run right now */
mutex_lock(&priv->ptp_data.lock);
rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
if (rc < 0) {
mutex_unlock(&priv->ptp_data.lock);
goto out;
}
/* Reset switch and send updated static configuration */
rc = sja1105_static_config_upload(priv);
if (rc < 0) {
mutex_unlock(&priv->ptp_data.lock);
goto out;
}
rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
if (rc < 0) {
mutex_unlock(&priv->ptp_data.lock);
goto out;
}
t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
/* Mid point, corresponds to pre-reset PTPCLKVAL */
t12 = t1 + (t2 - t1) / 2;
/* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
t34 = t3 + (t4 - t3) / 2;
/* Advance PTPCLKVAL by the time it took since its readout */
now += (t34 - t12);
__sja1105_ptp_adjtime(ds, now);
mutex_unlock(&priv->ptp_data.lock);
dev_info(priv->ds->dev,
"Reset switch and programmed static config. Reason: %s\n",
sja1105_reset_reasons[reason]);
/* Configure the CGU (PLLs) for MII and RMII PHYs.
* For these interfaces there is no dynamic configuration
* needed, since PLLs have same settings at all speeds.
*/
if (priv->info->clocking_setup) {
rc = priv->info->clocking_setup(priv);
if (rc < 0)
goto out;
}
for (i = 0; i < ds->num_ports; i++) {
struct dw_xpcs *xpcs = priv->xpcs[i];
unsigned int neg_mode;
rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
if (rc < 0)
goto out;
if (!xpcs)
continue;
if (bmcr[i] & BMCR_ANENABLE)
neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else
neg_mode = PHYLINK_PCS_NEG_OUTBAND;
rc = xpcs_do_config(xpcs, priv->phy_mode[i], NULL, neg_mode);
if (rc < 0)
goto out;
if (neg_mode == PHYLINK_PCS_NEG_OUTBAND) {
int speed = SPEED_UNKNOWN;
if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
speed = SPEED_2500;
else if (bmcr[i] & BMCR_SPEED1000)
speed = SPEED_1000;
else if (bmcr[i] & BMCR_SPEED100)
speed = SPEED_100;
else
speed = SPEED_10;
xpcs_link_up(&xpcs->pcs, neg_mode, priv->phy_mode[i],
speed, DUPLEX_FULL);
}
}
rc = sja1105_reload_cbs(priv);
if (rc < 0)
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static enum dsa_tag_protocol
sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
struct sja1105_private *priv = ds->priv;
return priv->info->tag_proto;
}
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
struct sja1105_rule *rule;
u16 tpid, tpid2;
int rc;
list_for_each_entry(rule, &priv->flow_block.rules, list) {
if (rule->type == SJA1105_RULE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot change VLAN filtering with active VL rules");
return -EBUSY;
}
}
if (enabled) {
/* Enable VLAN filtering. */
tpid = ETH_P_8021Q;
tpid2 = ETH_P_8021AD;
} else {
/* Disable VLAN filtering. */
tpid = ETH_P_SJA1105;
tpid2 = ETH_P_SJA1105;
}
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
general_params->tpid = tpid;
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
general_params->tpid2 = tpid2;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
rc = sja1105_commit_pvid(ds, port);
if (rc)
return rc;
}
rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
return rc;
}
static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid,
u16 flags, bool allowed_ingress)
{
struct sja1105_vlan_lookup_entry *vlan;
struct sja1105_table *table;
int match, rc;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
match = sja1105_is_vlan_configured(priv, vid);
if (match < 0) {
rc = sja1105_table_resize(table, table->entry_count + 1);
if (rc)
return rc;
match = table->entry_count - 1;
}
/* Assign pointer after the resize (it's new memory) */
vlan = table->entries;
vlan[match].type_entry = SJA1110_VLAN_D_TAG;
vlan[match].vlanid = vid;
vlan[match].vlan_bc |= BIT(port);
if (allowed_ingress)
vlan[match].vmemb_port |= BIT(port);
else
vlan[match].vmemb_port &= ~BIT(port);
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
vlan[match].tag_port &= ~BIT(port);
else
vlan[match].tag_port |= BIT(port);
return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
&vlan[match], true);
}
static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid)
{
struct sja1105_vlan_lookup_entry *vlan;
struct sja1105_table *table;
bool keep = true;
int match, rc;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
match = sja1105_is_vlan_configured(priv, vid);
/* Can't delete a missing entry. */
if (match < 0)
return 0;
/* Assign pointer after the resize (it's new memory) */
vlan = table->entries;
vlan[match].vlanid = vid;
vlan[match].vlan_bc &= ~BIT(port);
vlan[match].vmemb_port &= ~BIT(port);
/* Also unset tag_port, just so we don't have a confusing bitmap
* (no practical purpose).
*/
vlan[match].tag_port &= ~BIT(port);
/* If there's no port left as member of this VLAN,
* it's time for it to go.
*/
if (!vlan[match].vmemb_port)
keep = false;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
&vlan[match], keep);
if (rc < 0)
return rc;
if (!keep)
return sja1105_table_delete_entry(table, match);
return 0;
}
static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
u16 flags = vlan->flags;
int rc;
/* Be sure to deny alterations to the configuration done by tag_8021q.
*/
if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
"Range 3072-4095 reserved for dsa_8021q operation");
return -EBUSY;
}
/* Always install bridge VLANs as egress-tagged on CPU and DSA ports */
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
flags = 0;
rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true);
if (rc)
return rc;
if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
priv->bridge_pvid[port] = vlan->vid;
return sja1105_commit_pvid(ds, port);
}
static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
int rc;
rc = sja1105_vlan_del(priv, port, vlan->vid);
if (rc)
return rc;
/* In case the pvid was deleted, make sure that untagged packets will
* be dropped.
*/
return sja1105_commit_pvid(ds, port);
}
static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
struct sja1105_private *priv = ds->priv;
bool allowed_ingress = true;
int rc;
/* Prevent attackers from trying to inject a DSA tag from
* the outside world.
*/
if (dsa_is_user_port(ds, port))
allowed_ingress = false;
rc = sja1105_vlan_add(priv, port, vid, flags, allowed_ingress);
if (rc)
return rc;
if (flags & BRIDGE_VLAN_INFO_PVID)
priv->tag_8021q_pvid[port] = vid;
return sja1105_commit_pvid(ds, port);
}
static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct sja1105_private *priv = ds->priv;
return sja1105_vlan_del(priv, port, vid);
}
static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = info->info.extack;
struct net_device *upper = info->upper_dev;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp;
if (is_vlan_dev(upper)) {
NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported");
return -EBUSY;
}
if (netif_is_bridge_master(upper)) {
list_for_each_entry(dp, &dst->ports, list) {
struct net_device *br = dsa_port_bridge_dev_get(dp);
if (br && br != upper && br_vlan_enabled(br)) {
NL_SET_ERR_MSG_MOD(extack,
"Only one VLAN-aware bridge is supported");
return -EBUSY;
}
}
}
return 0;
}
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
struct sja1105_mgmt_entry mgmt_route = {0};
struct sja1105_private *priv = ds->priv;
struct ethhdr *hdr;
int timeout = 10;
int rc;
hdr = eth_hdr(skb);
mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
mgmt_route.destports = BIT(port);
mgmt_route.enfport = 1;
mgmt_route.tsreg = 0;
mgmt_route.takets = takets;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route, true);
if (rc < 0) {
kfree_skb(skb);
return rc;
}
/* Transfer skb to the host port. */
dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
/* Wait until the switch has processed the frame */
do {
rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route);
if (rc < 0) {
dev_err_ratelimited(priv->ds->dev,
"failed to poll for mgmt route\n");
continue;
}
/* UM10944: The ENFPORT flag of the respective entry is
* cleared when a match is found. The host can use this
* flag as an acknowledgment.
*/
cpu_relax();
} while (mgmt_route.enfport && --timeout);
if (!timeout) {
/* Clean up the management route so that a follow-up
* frame may not match on it by mistake.
* This is only hardware supported on P/Q/R/S - on E/T it is
* a no-op and we are silently discarding the -EOPNOTSUPP.
*/
sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route, false);
dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
}
return NETDEV_TX_OK;
}
#define work_to_xmit_work(w) \
container_of((w), struct sja1105_deferred_xmit_work, work)
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
* lock on the bus)
*/
static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
struct sk_buff *clone, *skb = xmit_work->skb;
struct dsa_switch *ds = xmit_work->dp->ds;
struct sja1105_private *priv = ds->priv;
int port = xmit_work->dp->index;
clone = SJA1105_SKB_CB(skb)->clone;
mutex_lock(&priv->mgmt_lock);
sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
/* The clone, if there, was made by dsa_skb_tx_timestamp */
if (clone)
sja1105_ptp_txtstamp_skb(ds, port, clone);
mutex_unlock(&priv->mgmt_lock);
kfree(xmit_work);
}
static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_tagger_data *tagger_data;
if (proto != priv->info->tag_proto)
return -EPROTONOSUPPORT;
tagger_data = sja1105_tagger_data(ds);
tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
return 0;
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
* which cannot be reconfigured at runtime. So a switch reset is required.
*/
static int sja1105_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
unsigned int maxage;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
maxage = SJA1105_AGEING_TIME_MS(ageing_time);
if (l2_lookup_params->maxage == maxage)
return 0;
l2_lookup_params->maxage = maxage;
return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
}
static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct sja1105_l2_policing_entry *policing;
struct sja1105_private *priv = ds->priv;
new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
new_mtu += VLAN_HLEN;
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (policing[port].maxlen == new_mtu)
return 0;
policing[port].maxlen = new_mtu;
return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
{
return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return sja1105_setup_tc_taprio(ds, port, type_data);
case TC_SETUP_QDISC_CBS:
return sja1105_setup_tc_cbs(ds, port, type_data);
default:
return -EOPNOTSUPP;
}
}
/* We have a single mirror (@to) port, but can configure ingress and egress
* mirroring on all other (@from) ports.
* We need to allow mirroring rules only as long as the @to port is always the
* same, and we need to unset the @to port from mirr_port only when there is no
* mirroring rule that references it.
*/
static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
bool ingress, bool enabled)
{
struct sja1105_general_params_entry *general_params;
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
bool already_enabled;
u64 new_mirr_port;
int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
already_enabled = (general_params->mirr_port != ds->num_ports);
if (already_enabled && enabled && general_params->mirr_port != to) {
dev_err(priv->ds->dev,
"Delete mirroring rules towards port %llu first\n",
general_params->mirr_port);
return -EBUSY;
}
new_mirr_port = to;
if (!enabled) {
bool keep = false;
int port;
/* Anybody still referencing mirr_port? */
for (port = 0; port < ds->num_ports; port++) {
if (mac[port].ing_mirr || mac[port].egr_mirr) {
keep = true;
break;
}
}
/* Unset already_enabled for next time */
if (!keep)
new_mirr_port = ds->num_ports;
}
if (new_mirr_port != general_params->mirr_port) {
general_params->mirr_port = new_mirr_port;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
0, general_params, true);
if (rc < 0)
return rc;
}
if (ingress)
mac[from].ing_mirr = enabled;
else
mac[from].egr_mirr = enabled;
return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
&mac[from], true);
}
static int sja1105_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
ingress, true);
}
static void sja1105_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
mirror->ingress, false);
}
static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
struct dsa_mall_policer_tc_entry *policer)
{
struct sja1105_l2_policing_entry *policing;
struct sja1105_private *priv = ds->priv;
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
/* In hardware, every 8 microseconds the credit level is incremented by
* the value of RATE bytes divided by 64, up to a maximum of SMAX
* bytes.
*/
policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
1000000);
policing[port].smax = policer->burst;
return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
{
struct sja1105_l2_policing_entry *policing;
struct sja1105_private *priv = ds->priv;
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
policing[port].rate = SJA1105_RATE_MBPS(1000);
policing[port].smax = 65535;
sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
bool enabled)
{
struct sja1105_mac_config_entry *mac;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
mac[port].dyn_learn = enabled;
return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
}
static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
struct switchdev_brport_flags flags)
{
if (flags.mask & BR_FLOOD) {
if (flags.val & BR_FLOOD)
priv->ucast_egress_floods |= BIT(to);
else
priv->ucast_egress_floods &= ~BIT(to);
}
if (flags.mask & BR_BCAST_FLOOD) {
if (flags.val & BR_BCAST_FLOOD)
priv->bcast_egress_floods |= BIT(to);
else
priv->bcast_egress_floods &= ~BIT(to);
}
return sja1105_manage_flood_domains(priv);
}
static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int match, rc;
mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
for (match = 0; match < table->entry_count; match++)
if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
break;
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
rc = -ENOSPC;
goto out;
}
if (flags.val & BR_MCAST_FLOOD)
l2_lookup[match].destports |= BIT(to);
else
l2_lookup[match].destports &= ~BIT(to);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match], true);
out:
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD))
return -EINVAL;
if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
!priv->info->can_limit_mcast_flood) {
bool multicast = !!(flags.val & BR_MCAST_FLOOD);
bool unicast = !!(flags.val & BR_FLOOD);
if (unicast != multicast) {
NL_SET_ERR_MSG_MOD(extack,
"This chip cannot configure multicast flooding independently of unicast");
return -EINVAL;
}
}
return 0;
}
static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (flags.mask & BR_LEARNING) {
bool learn_ena = !!(flags.val & BR_LEARNING);
rc = sja1105_port_set_learning(priv, port, learn_ena);
if (rc)
return rc;
}
if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
if (rc)
return rc;
}
/* For chips that can't offload BR_MCAST_FLOOD independently, there
* is nothing to do here, we ensured the configuration is in sync by
* offloading BR_FLOOD.
*/
if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
rc = sja1105_port_mcast_flood(priv, port, flags,
extack);
if (rc)
return rc;
}
return 0;
}
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
* Furthermode, some PHYs may not have crystals for generating their clocks
* (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
* ref_clk pin. So port clocking needs to be initialized early, before
* connecting to PHYs is attempted, otherwise they won't respond through MDIO.
* Setting correct PHY link speed does not matter now.
* But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
* bindings are not yet parsed by DSA core. We need to parse early so that we
* can populate the xMII mode parameters table.
*/
static int sja1105_setup(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (priv->info->disable_microcontroller) {
rc = priv->info->disable_microcontroller(priv);
if (rc < 0) {
dev_err(ds->dev,
"Failed to disable microcontroller: %pe\n",
ERR_PTR(rc));
return rc;
}
}
/* Create and send configuration down to device */
rc = sja1105_static_config_load(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc);
return rc;
}
/* Configure the CGU (PHY link modes and speeds) */
if (priv->info->clocking_setup) {
rc = priv->info->clocking_setup(priv);
if (rc < 0) {
dev_err(ds->dev,
"Failed to configure MII clocking: %pe\n",
ERR_PTR(rc));
goto out_static_config_free;
}
}
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
rc = sja1105_ptp_clock_register(ds);
if (rc < 0) {
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
goto out_flower_teardown;
}
rc = sja1105_mdiobus_register(ds);
if (rc < 0) {
dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
ERR_PTR(rc));
goto out_ptp_clock_unregister;
}
rc = sja1105_devlink_setup(ds);
if (rc < 0)
goto out_mdiobus_unregister;
rtnl_lock();
rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
rtnl_unlock();
if (rc)
goto out_devlink_teardown;
/* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q
* EtherType is.
* So it will still try to apply VLAN filtering, but all ingress
* traffic (except frames received with EtherType of ETH_P_SJA1105)
* will be internally tagged with a distorted VLAN header where the
* TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
*/
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
ds->fdb_isolation = true;
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
ds->max_num_bridges = 7;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
return 0;
out_devlink_teardown:
sja1105_devlink_teardown(ds);
out_mdiobus_unregister:
sja1105_mdiobus_unregister(ds);
out_ptp_clock_unregister:
sja1105_ptp_clock_unregister(ds);
out_flower_teardown:
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
out_static_config_free:
sja1105_static_config_free(&priv->static_config);
return rc;
}
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
rtnl_lock();
dsa_tag_8021q_unregister(ds);
rtnl_unlock();
sja1105_devlink_teardown(ds);
sja1105_mdiobus_unregister(ds);
sja1105_ptp_clock_unregister(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_static_config_free(&priv->static_config);
}
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.connect_tag_protocol = sja1105_connect_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_get_caps = sja1105_phylink_get_caps,
.phylink_mac_select_pcs = sja1105_mac_select_pcs,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
.port_fast_age = sja1105_fast_age,
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
.port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
.port_bridge_flags = sja1105_port_bridge_flags,
.port_stp_state_set = sja1105_bridge_stp_state_set,
.port_vlan_filtering = sja1105_vlan_filtering,
.port_vlan_add = sja1105_bridge_vlan_add,
.port_vlan_del = sja1105_bridge_vlan_del,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_hwtstamp_get = sja1105_hwtstamp_get,
.port_hwtstamp_set = sja1105_hwtstamp_set,
.port_rxtstamp = sja1105_port_rxtstamp,
.port_txtstamp = sja1105_port_txtstamp,
.port_setup_tc = sja1105_port_setup_tc,
.port_mirror_add = sja1105_mirror_add,
.port_mirror_del = sja1105_mirror_del,
.port_policer_add = sja1105_port_policer_add,
.port_policer_del = sja1105_port_policer_del,
.cls_flower_add = sja1105_cls_flower_add,
.cls_flower_del = sja1105_cls_flower_del,
.cls_flower_stats = sja1105_cls_flower_stats,
.devlink_info_get = sja1105_devlink_info_get,
.tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
.tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
.port_prechangeupper = sja1105_prechangeupper,
};
static const struct of_device_id sja1105_dt_ids[];
static int sja1105_check_device_id(struct sja1105_private *priv)
{
const struct sja1105_regs *regs = priv->info->regs;
u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
struct device *dev = &priv->spidev->dev;
const struct of_device_id *match;
u32 device_id;
u64 part_no;
int rc;
rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
NULL);
if (rc < 0)
return rc;
rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
SJA1105_SIZE_DEVICE_ID);
if (rc < 0)
return rc;
sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
for (match = sja1105_dt_ids; match->compatible[0]; match++) {
const struct sja1105_info *info = match->data;
/* Is what's been probed in our match table at all? */
if (info->device_id != device_id || info->part_no != part_no)
continue;
/* But is it what's in the device tree? */
if (priv->info->device_id != device_id ||
priv->info->part_no != part_no) {
dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
priv->info->name, info->name);
/* It isn't. No problem, pick that up. */
priv->info = info;
}
return 0;
}
dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
device_id, part_no);
return -ENODEV;
}
static int sja1105_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct sja1105_private *priv;
size_t max_xfer, max_msg;
struct dsa_switch *ds;
int rc;
if (!dev->of_node) {
dev_err(dev, "No DTS bindings for SJA1105 driver\n");
return -EINVAL;
}
rc = sja1105_hw_reset(dev, 1, 1);
if (rc)
return rc;
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Populate our driver private structure (priv) based on
* the device tree node that was probed (spi)
*/
priv->spidev = spi;
spi_set_drvdata(spi, priv);
/* Configure the SPI bus */
spi->bits_per_word = 8;
rc = spi_setup(spi);
if (rc < 0) {
dev_err(dev, "Could not init SPI\n");
return rc;
}
/* In sja1105_xfer, we send spi_messages composed of two spi_transfers:
* a small one for the message header and another one for the current
* chunk of the packed buffer.
* Check that the restrictions imposed by the SPI controller are
* respected: the chunk buffer is smaller than the max transfer size,
* and the total length of the chunk plus its message header is smaller
* than the max message size.
* We do that during probe time since the maximum transfer size is a
* runtime invariant.
*/
max_xfer = spi_max_transfer_size(spi);
max_msg = spi_max_message_size(spi);
/* We need to send at least one 64-bit word of SPI payload per message
* in order to be able to make useful progress.
*/
if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) {
dev_err(dev, "SPI master cannot send large enough buffers, aborting\n");
return -EINVAL;
}
priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN;
if (priv->max_xfer_len > max_xfer)
priv->max_xfer_len = max_xfer;
if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER)
priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER;
priv->info = of_device_get_match_data(dev);
/* Detect hardware device */
rc = sja1105_check_device_id(priv);
if (rc < 0) {
dev_err(dev, "Device ID check failed: %d\n", rc);
return rc;
}
dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = dev;
ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
return rc;
}
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
if (!priv->cbs)
return -ENOMEM;
}
return dsa_register_switch(priv->ds);
}
static void sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
return;
dsa_unregister_switch(priv->ds);
}
static void sja1105_shutdown(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
return;
dsa_switch_shutdown(priv->ds);
spi_set_drvdata(spi, NULL);
}
static const struct of_device_id sja1105_dt_ids[] = {
{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
{ .compatible = "nxp,sja1105p", .data = &sja1105p_info },
{ .compatible = "nxp,sja1105q", .data = &sja1105q_info },
{ .compatible = "nxp,sja1105r", .data = &sja1105r_info },
{ .compatible = "nxp,sja1105s", .data = &sja1105s_info },
{ .compatible = "nxp,sja1110a", .data = &sja1110a_info },
{ .compatible = "nxp,sja1110b", .data = &sja1110b_info },
{ .compatible = "nxp,sja1110c", .data = &sja1110c_info },
{ .compatible = "nxp,sja1110d", .data = &sja1110d_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
static const struct spi_device_id sja1105_spi_ids[] = {
{ "sja1105e" },
{ "sja1105t" },
{ "sja1105p" },
{ "sja1105q" },
{ "sja1105r" },
{ "sja1105s" },
{ "sja1110a" },
{ "sja1110b" },
{ "sja1110c" },
{ "sja1110d" },
{ },
};
MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
.id_table = sja1105_spi_ids,
.probe = sja1105_probe,
.remove = sja1105_remove,
.shutdown = sja1105_shutdown,
};
module_spi_driver(sja1105_driver);
MODULE_AUTHOR("Vladimir Oltean <[email protected]>");
MODULE_AUTHOR("Georg Waibel <[email protected]>");
MODULE_DESCRIPTION("SJA1105 Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/net/dsa/sja1105/sja1105_main.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include "sja1105.h"
enum sja1105_counter_index {
__SJA1105_COUNTER_UNUSED,
/* MAC */
N_RUNT,
N_SOFERR,
N_ALIGNERR,
N_MIIERR,
TYPEERR,
SIZEERR,
TCTIMEOUT,
PRIORERR,
NOMASTER,
MEMOV,
MEMERR,
INVTYP,
INTCYOV,
DOMERR,
PCFBAGDROP,
SPCPRIOR,
AGEPRIOR,
PORTDROP,
LENDROP,
BAGDROP,
POLICEERR,
DRPNONA664ERR,
SPCERR,
AGEDRP,
/* HL1 */
N_N664ERR,
N_VLANERR,
N_UNRELEASED,
N_SIZEERR,
N_CRCERR,
N_VLNOTFOUND,
N_CTPOLERR,
N_POLERR,
N_RXFRM,
N_RXBYTE,
N_TXFRM,
N_TXBYTE,
/* HL2 */
N_QFULL,
N_PART_DROP,
N_EGR_DISABLED,
N_NOT_REACH,
__MAX_SJA1105ET_PORT_COUNTER,
/* P/Q/R/S only */
/* ETHER */
N_DROPS_NOLEARN = __MAX_SJA1105ET_PORT_COUNTER,
N_DROPS_NOROUTE,
N_DROPS_ILL_DTAG,
N_DROPS_DTAG,
N_DROPS_SOTAG,
N_DROPS_SITAG,
N_DROPS_UTAG,
N_TX_BYTES_1024_2047,
N_TX_BYTES_512_1023,
N_TX_BYTES_256_511,
N_TX_BYTES_128_255,
N_TX_BYTES_65_127,
N_TX_BYTES_64,
N_TX_MCAST,
N_TX_BCAST,
N_RX_BYTES_1024_2047,
N_RX_BYTES_512_1023,
N_RX_BYTES_256_511,
N_RX_BYTES_128_255,
N_RX_BYTES_65_127,
N_RX_BYTES_64,
N_RX_MCAST,
N_RX_BCAST,
__MAX_SJA1105PQRS_PORT_COUNTER,
};
struct sja1105_port_counter {
enum sja1105_stats_area area;
const char name[ETH_GSTRING_LEN];
int offset;
int start;
int end;
bool is_64bit;
};
static const struct sja1105_port_counter sja1105_port_counters[] = {
/* MAC-Level Diagnostic Counters */
[N_RUNT] = {
.area = MAC,
.name = "n_runt",
.offset = 0,
.start = 31,
.end = 24,
},
[N_SOFERR] = {
.area = MAC,
.name = "n_soferr",
.offset = 0x0,
.start = 23,
.end = 16,
},
[N_ALIGNERR] = {
.area = MAC,
.name = "n_alignerr",
.offset = 0x0,
.start = 15,
.end = 8,
},
[N_MIIERR] = {
.area = MAC,
.name = "n_miierr",
.offset = 0x0,
.start = 7,
.end = 0,
},
/* MAC-Level Diagnostic Flags */
[TYPEERR] = {
.area = MAC,
.name = "typeerr",
.offset = 0x1,
.start = 27,
.end = 27,
},
[SIZEERR] = {
.area = MAC,
.name = "sizeerr",
.offset = 0x1,
.start = 26,
.end = 26,
},
[TCTIMEOUT] = {
.area = MAC,
.name = "tctimeout",
.offset = 0x1,
.start = 25,
.end = 25,
},
[PRIORERR] = {
.area = MAC,
.name = "priorerr",
.offset = 0x1,
.start = 24,
.end = 24,
},
[NOMASTER] = {
.area = MAC,
.name = "nomaster",
.offset = 0x1,
.start = 23,
.end = 23,
},
[MEMOV] = {
.area = MAC,
.name = "memov",
.offset = 0x1,
.start = 22,
.end = 22,
},
[MEMERR] = {
.area = MAC,
.name = "memerr",
.offset = 0x1,
.start = 21,
.end = 21,
},
[INVTYP] = {
.area = MAC,
.name = "invtyp",
.offset = 0x1,
.start = 19,
.end = 19,
},
[INTCYOV] = {
.area = MAC,
.name = "intcyov",
.offset = 0x1,
.start = 18,
.end = 18,
},
[DOMERR] = {
.area = MAC,
.name = "domerr",
.offset = 0x1,
.start = 17,
.end = 17,
},
[PCFBAGDROP] = {
.area = MAC,
.name = "pcfbagdrop",
.offset = 0x1,
.start = 16,
.end = 16,
},
[SPCPRIOR] = {
.area = MAC,
.name = "spcprior",
.offset = 0x1,
.start = 15,
.end = 12,
},
[AGEPRIOR] = {
.area = MAC,
.name = "ageprior",
.offset = 0x1,
.start = 11,
.end = 8,
},
[PORTDROP] = {
.area = MAC,
.name = "portdrop",
.offset = 0x1,
.start = 6,
.end = 6,
},
[LENDROP] = {
.area = MAC,
.name = "lendrop",
.offset = 0x1,
.start = 5,
.end = 5,
},
[BAGDROP] = {
.area = MAC,
.name = "bagdrop",
.offset = 0x1,
.start = 4,
.end = 4,
},
[POLICEERR] = {
.area = MAC,
.name = "policeerr",
.offset = 0x1,
.start = 3,
.end = 3,
},
[DRPNONA664ERR] = {
.area = MAC,
.name = "drpnona664err",
.offset = 0x1,
.start = 2,
.end = 2,
},
[SPCERR] = {
.area = MAC,
.name = "spcerr",
.offset = 0x1,
.start = 1,
.end = 1,
},
[AGEDRP] = {
.area = MAC,
.name = "agedrp",
.offset = 0x1,
.start = 0,
.end = 0,
},
/* High-Level Diagnostic Counters */
[N_N664ERR] = {
.area = HL1,
.name = "n_n664err",
.offset = 0xF,
.start = 31,
.end = 0,
},
[N_VLANERR] = {
.area = HL1,
.name = "n_vlanerr",
.offset = 0xE,
.start = 31,
.end = 0,
},
[N_UNRELEASED] = {
.area = HL1,
.name = "n_unreleased",
.offset = 0xD,
.start = 31,
.end = 0,
},
[N_SIZEERR] = {
.area = HL1,
.name = "n_sizeerr",
.offset = 0xC,
.start = 31,
.end = 0,
},
[N_CRCERR] = {
.area = HL1,
.name = "n_crcerr",
.offset = 0xB,
.start = 31,
.end = 0,
},
[N_VLNOTFOUND] = {
.area = HL1,
.name = "n_vlnotfound",
.offset = 0xA,
.start = 31,
.end = 0,
},
[N_CTPOLERR] = {
.area = HL1,
.name = "n_ctpolerr",
.offset = 0x9,
.start = 31,
.end = 0,
},
[N_POLERR] = {
.area = HL1,
.name = "n_polerr",
.offset = 0x8,
.start = 31,
.end = 0,
},
[N_RXFRM] = {
.area = HL1,
.name = "n_rxfrm",
.offset = 0x6,
.start = 31,
.end = 0,
.is_64bit = true,
},
[N_RXBYTE] = {
.area = HL1,
.name = "n_rxbyte",
.offset = 0x4,
.start = 31,
.end = 0,
.is_64bit = true,
},
[N_TXFRM] = {
.area = HL1,
.name = "n_txfrm",
.offset = 0x2,
.start = 31,
.end = 0,
.is_64bit = true,
},
[N_TXBYTE] = {
.area = HL1,
.name = "n_txbyte",
.offset = 0x0,
.start = 31,
.end = 0,
.is_64bit = true,
},
[N_QFULL] = {
.area = HL2,
.name = "n_qfull",
.offset = 0x3,
.start = 31,
.end = 0,
},
[N_PART_DROP] = {
.area = HL2,
.name = "n_part_drop",
.offset = 0x2,
.start = 31,
.end = 0,
},
[N_EGR_DISABLED] = {
.area = HL2,
.name = "n_egr_disabled",
.offset = 0x1,
.start = 31,
.end = 0,
},
[N_NOT_REACH] = {
.area = HL2,
.name = "n_not_reach",
.offset = 0x0,
.start = 31,
.end = 0,
},
/* Ether Stats */
[N_DROPS_NOLEARN] = {
.area = ETHER,
.name = "n_drops_nolearn",
.offset = 0x16,
.start = 31,
.end = 0,
},
[N_DROPS_NOROUTE] = {
.area = ETHER,
.name = "n_drops_noroute",
.offset = 0x15,
.start = 31,
.end = 0,
},
[N_DROPS_ILL_DTAG] = {
.area = ETHER,
.name = "n_drops_ill_dtag",
.offset = 0x14,
.start = 31,
.end = 0,
},
[N_DROPS_DTAG] = {
.area = ETHER,
.name = "n_drops_dtag",
.offset = 0x13,
.start = 31,
.end = 0,
},
[N_DROPS_SOTAG] = {
.area = ETHER,
.name = "n_drops_sotag",
.offset = 0x12,
.start = 31,
.end = 0,
},
[N_DROPS_SITAG] = {
.area = ETHER,
.name = "n_drops_sitag",
.offset = 0x11,
.start = 31,
.end = 0,
},
[N_DROPS_UTAG] = {
.area = ETHER,
.name = "n_drops_utag",
.offset = 0x10,
.start = 31,
.end = 0,
},
[N_TX_BYTES_1024_2047] = {
.area = ETHER,
.name = "n_tx_bytes_1024_2047",
.offset = 0x0F,
.start = 31,
.end = 0,
},
[N_TX_BYTES_512_1023] = {
.area = ETHER,
.name = "n_tx_bytes_512_1023",
.offset = 0x0E,
.start = 31,
.end = 0,
},
[N_TX_BYTES_256_511] = {
.area = ETHER,
.name = "n_tx_bytes_256_511",
.offset = 0x0D,
.start = 31,
.end = 0,
},
[N_TX_BYTES_128_255] = {
.area = ETHER,
.name = "n_tx_bytes_128_255",
.offset = 0x0C,
.start = 31,
.end = 0,
},
[N_TX_BYTES_65_127] = {
.area = ETHER,
.name = "n_tx_bytes_65_127",
.offset = 0x0B,
.start = 31,
.end = 0,
},
[N_TX_BYTES_64] = {
.area = ETHER,
.name = "n_tx_bytes_64",
.offset = 0x0A,
.start = 31,
.end = 0,
},
[N_TX_MCAST] = {
.area = ETHER,
.name = "n_tx_mcast",
.offset = 0x09,
.start = 31,
.end = 0,
},
[N_TX_BCAST] = {
.area = ETHER,
.name = "n_tx_bcast",
.offset = 0x08,
.start = 31,
.end = 0,
},
[N_RX_BYTES_1024_2047] = {
.area = ETHER,
.name = "n_rx_bytes_1024_2047",
.offset = 0x07,
.start = 31,
.end = 0,
},
[N_RX_BYTES_512_1023] = {
.area = ETHER,
.name = "n_rx_bytes_512_1023",
.offset = 0x06,
.start = 31,
.end = 0,
},
[N_RX_BYTES_256_511] = {
.area = ETHER,
.name = "n_rx_bytes_256_511",
.offset = 0x05,
.start = 31,
.end = 0,
},
[N_RX_BYTES_128_255] = {
.area = ETHER,
.name = "n_rx_bytes_128_255",
.offset = 0x04,
.start = 31,
.end = 0,
},
[N_RX_BYTES_65_127] = {
.area = ETHER,
.name = "n_rx_bytes_65_127",
.offset = 0x03,
.start = 31,
.end = 0,
},
[N_RX_BYTES_64] = {
.area = ETHER,
.name = "n_rx_bytes_64",
.offset = 0x02,
.start = 31,
.end = 0,
},
[N_RX_MCAST] = {
.area = ETHER,
.name = "n_rx_mcast",
.offset = 0x01,
.start = 31,
.end = 0,
},
[N_RX_BCAST] = {
.area = ETHER,
.name = "n_rx_bcast",
.offset = 0x00,
.start = 31,
.end = 0,
},
};
static int sja1105_port_counter_read(struct sja1105_private *priv, int port,
enum sja1105_counter_index idx, u64 *ctr)
{
const struct sja1105_port_counter *c = &sja1105_port_counters[idx];
size_t size = c->is_64bit ? 8 : 4;
u8 buf[8] = {0};
u64 regs;
int rc;
regs = priv->info->regs->stats[c->area][port];
rc = sja1105_xfer_buf(priv, SPI_READ, regs + c->offset, buf, size);
if (rc)
return rc;
sja1105_unpack(buf, ctr, c->start, c->end, size);
return 0;
}
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
struct sja1105_private *priv = ds->priv;
enum sja1105_counter_index max_ctr, i;
int rc, k = 0;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
else
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
if (rc) {
dev_err(ds->dev,
"Failed to read port %d counters: %d\n",
port, rc);
break;
}
}
}
void sja1105_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
struct sja1105_private *priv = ds->priv;
enum sja1105_counter_index max_ctr, i;
char *p = data;
if (stringset != ETH_SS_STATS)
return;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
else
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
strscpy(p, sja1105_port_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct sja1105_private *priv = ds->priv;
enum sja1105_counter_index max_ctr, i;
int sset_count = 0;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
else
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
if (!strlen(sja1105_port_counters[i].name))
continue;
sset_count++;
}
return sset_count;
}
| linux-master | drivers/net/dsa/sja1105/sja1105_ethtool.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <[email protected]>
*/
#include <linux/spi/spi.h>
#include "sja1105.h"
/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
* therefore scaled_ppm between [-2,147,483,648, 2,147,483,647].
* Set the maximum supported ppb to a round value smaller than the maximum.
*
* Percentually speaking, this is a +/- 0.032x adjustment of the
* free-running counter (0.968x to 1.032x).
*/
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
/* PTPSYNCTS has no interrupt or update mechanism, because the intended
* hardware use case is for the timestamp to be collected synchronously,
* immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
* one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
* generic extts source, the PTPSYNCTS register needs polling and a comparison
* with the old value. The polling interval is configured as the Nyquist rate
* of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
* this hardware can do (but may be enough for some setups). Anything of higher
* frequency than 1 Hz will be lost, since there is no timestamp FIFO.
*/
#define SJA1105_EXTTS_INTERVAL (HZ / 6)
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
* |
* v
* Convert scaled_ppm from the +/- ((10^6) << 16) range
* into the +/- (1 << 31) range.
*
* This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
* and defines the scaling factor between scaled_ppm and the actual
* frequency adjustments of the PHC.
*
* ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
* simplifies to
* ptpclkrate = scaled_ppm * 2^9 / 5^6
*/
#define SJA1105_CC_MULT_NUM (1 << 9)
#define SJA1105_CC_MULT_DEM 15625
#define SJA1105_CC_MULT 0x80000000
enum sja1105_ptp_clk_mode {
PTP_ADD_MODE = 1,
PTP_SET_MODE = 0,
};
#define extts_to_data(t) \
container_of((t), struct sja1105_ptp_data, extts_timer)
#define ptp_caps_to_data(d) \
container_of((d), struct sja1105_ptp_data, caps)
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
priv->hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->hwts_rx_en &= ~BIT(port);
break;
default:
priv->hwts_rx_en |= BIT(port);
break;
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
return 0;
}
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
config.flags = 0;
if (priv->hwts_tx_en & BIT(port))
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
if (priv->hwts_rx_en & BIT(port))
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
config.rx_filter = HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
/* Called during cleanup */
if (!ptp_data->clock)
return -ENODEV;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
info->phc_index = ptp_clock_index(ptp_data->clock);
return 0;
}
void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_PTP_CMD;
/* No need to keep this as part of the structure */
u64 valid = 1;
sja1105_packing(buf, &valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
}
void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op)
{
const int size = SJA1105_SIZE_PTP_CMD;
/* No need to keep this as part of the structure */
u64 valid = 1;
sja1105_packing(buf, &valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
}
int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
sja1105_spi_rw_mode_t rw)
{
const struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
int rc;
if (rw == SPI_WRITE)
priv->info->ptp_cmd_packing(buf, cmd, PACK);
rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
SJA1105_SIZE_PTP_CMD);
if (rw == SPI_READ)
priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
return rc;
}
/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
* around in 0.135 seconds, and 32 bits for P/Q/R/S, wrapping around in 34.35
* seconds).
*
* This receives the RX or TX MAC timestamps, provided by hardware as
* the lower bits of the cycle counter, sampled at the time the timestamp was
* collected.
*
* To reconstruct into a full 64-bit-wide timestamp, the cycle counter is
* read and the high-order bits are filled in.
*
* Must be called within one wraparound period of the partial timestamp since
* it was generated by the MAC.
*/
static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
u64 ts_partial)
{
struct sja1105_private *priv = ds->priv;
u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
u64 ts_reconstructed;
ts_reconstructed = (now & ~partial_tstamp_mask) | ts_partial;
/* Check lower bits of current cycle counter against the timestamp.
* If the current cycle counter is lower than the partial timestamp,
* then wraparound surely occurred and must be accounted for.
*/
if ((now & partial_tstamp_mask) <= ts_partial)
ts_reconstructed -= (partial_tstamp_mask + 1);
return ts_reconstructed;
}
/* Reads the SPI interface for an egress timestamp generated by the switch
* for frames sent using management routes.
*
* SJA1105 E/T layout of the 4-byte SPI payload:
*
* 31 23 15 7 0
* | | | | |
* +-----+-----+-----+ ^
* ^ |
* | |
* 24-bit timestamp Update bit
*
*
* SJA1105 P/Q/R/S layout of the 8-byte SPI payload:
*
* 31 23 15 7 0 63 55 47 39 32
* | | | | | | | | | |
* ^ +-----+-----+-----+-----+
* | ^
* | |
* Update bit 32-bit timestamp
*
* Notice that the update bit is in the same place.
* To have common code for E/T and P/Q/R/S for reading the timestamp,
* we need to juggle with the offset and the bit indices.
*/
static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
int tstamp_bit_start, tstamp_bit_end;
int timeout = 10;
u8 packed_buf[8];
u64 update;
int rc;
do {
rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
packed_buf, priv->info->ptpegr_ts_bytes);
if (rc < 0)
return rc;
sja1105_unpack(packed_buf, &update, 0, 0,
priv->info->ptpegr_ts_bytes);
if (update)
break;
usleep_range(10, 50);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
/* Point the end bit to the second 32-bit word on P/Q/R/S,
* no-op on E/T.
*/
tstamp_bit_end = (priv->info->ptpegr_ts_bytes - 4) * 8;
/* Shift the 24-bit timestamp on E/T to be collected from 31:8.
* No-op on P/Q/R/S.
*/
tstamp_bit_end += 32 - priv->info->ptp_ts_bits;
tstamp_bit_start = tstamp_bit_end + priv->info->ptp_ts_bits - 1;
*ts = 0;
sja1105_unpack(packed_buf, ts, tstamp_bit_start, tstamp_bit_end,
priv->info->ptpegr_ts_bytes);
return 0;
}
/* Caller must hold ptp_data->lock */
static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
struct ptp_system_timestamp *ptp_sts)
{
const struct sja1105_regs *regs = priv->info->regs;
return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
ptp_sts);
}
/* Caller must hold ptp_data->lock */
static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
struct ptp_system_timestamp *ptp_sts)
{
const struct sja1105_regs *regs = priv->info->regs;
return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
ptp_sts);
}
static void sja1105_extts_poll(struct sja1105_private *priv)
{
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
const struct sja1105_regs *regs = priv->info->regs;
struct ptp_clock_event event;
u64 ptpsyncts = 0;
int rc;
rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
NULL);
if (rc < 0)
dev_err_ratelimited(priv->ds->dev,
"Failed to read PTPSYNCTS: %d\n", rc);
if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
event.index = 0;
event.type = PTP_CLOCK_EXTTS;
event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
ptp_clock_event(ptp_data->clock, &event);
ptp_data->ptpsyncts = ptpsyncts;
}
}
static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
struct dsa_switch *ds = priv->ds;
struct sk_buff *skb;
mutex_lock(&ptp_data->lock);
while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ticks, ts;
int rc;
rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
if (rc < 0) {
dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
kfree_skb(skb);
continue;
}
*shwt = (struct skb_shared_hwtstamps) {0};
ts = SJA1105_SKB_CB(skb)->tstamp;
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
netif_rx(skb);
}
if (ptp_data->extts_enabled)
sja1105_extts_poll(priv);
mutex_unlock(&ptp_data->lock);
/* Don't restart */
return -1;
}
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (!(priv->hwts_rx_en & BIT(port)))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
* timestamp. For that we need a sleepable context.
*/
skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
ptp_schedule_worker(ptp_data->clock, 0);
return true;
}
bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ts = SJA1105_SKB_CB(skb)->tstamp;
*shwt = (struct skb_shared_hwtstamps) {0};
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
/* Don't defer */
return false;
}
/* Called from dsa_skb_defer_rx_timestamp */
bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
return priv->info->rxtstamp(ds, port, skb);
}
void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
enum sja1110_meta_tstamp dir, u64 tstamp)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shwt = {0};
/* We don't care about RX timestamps on the CPU port */
if (dir == SJA1110_META_TSTAMP_RX)
return;
spin_lock(&ptp_data->skb_txtstamp_queue.lock);
skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
continue;
__skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
skb_match = skb;
break;
}
spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
if (WARN_ON(!skb_match))
return;
shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
skb_complete_tx_timestamp(skb_match, &shwt);
}
/* In addition to cloning the skb which is done by the common
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
* packet to the TX timestamping queue.
*/
void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
u8 ts_id;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
spin_lock(&priv->ts_id_lock);
ts_id = priv->ts_id;
/* Deal automatically with 8-bit wraparound */
priv->ts_id++;
SJA1105_SKB_CB(clone)->ts_id = ts_id;
spin_unlock(&priv->ts_id_lock);
skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
}
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
* the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
* callback, where we will timestamp it synchronously.
*/
void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sk_buff *clone;
if (!(priv->hwts_tx_en & BIT(port)))
return;
clone = skb_clone_sk(skb);
if (!clone)
return;
SJA1105_SKB_CB(skb)->clone = clone;
if (priv->info->txtstamp)
priv->info->txtstamp(ds, port, skb);
}
static int sja1105_ptp_reset(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_ptp_cmd cmd = ptp_data->cmd;
int rc;
mutex_lock(&ptp_data->lock);
cmd.resptp = 1;
dev_dbg(ds->dev, "Resetting PTP clock\n");
rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
sja1105_tas_clockstep(priv->ds);
mutex_unlock(&ptp_data->lock);
return rc;
}
/* Caller must hold ptp_data->lock */
int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
struct ptp_system_timestamp *ptp_sts)
{
struct sja1105_private *priv = ds->priv;
u64 ticks;
int rc;
rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
if (rc < 0) {
dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
return rc;
}
*ns = sja1105_ticks_to_ns(ticks);
return 0;
}
static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *ptp_sts)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
u64 now = 0;
int rc;
mutex_lock(&ptp_data->lock);
rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
*ts = ns_to_timespec64(now);
mutex_unlock(&ptp_data->lock);
return rc;
}
/* Caller must hold ptp_data->lock */
static int sja1105_ptp_mode_set(struct sja1105_private *priv,
enum sja1105_ptp_clk_mode mode)
{
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (ptp_data->cmd.ptpclkadd == mode)
return 0;
ptp_data->cmd.ptpclkadd = mode;
return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
}
/* Write to PTPCLKVAL while PTPCLKADD is 0 */
int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
struct ptp_system_timestamp *ptp_sts)
{
struct sja1105_private *priv = ds->priv;
u64 ticks = ns_to_sja1105_ticks(ns);
int rc;
rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
if (rc < 0) {
dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
return rc;
}
rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
sja1105_tas_clockstep(priv->ds);
return rc;
}
static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
u64 ns = timespec64_to_ns(ts);
int rc;
mutex_lock(&ptp_data->lock);
rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
mutex_unlock(&ptp_data->lock);
return rc;
}
static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
const struct sja1105_regs *regs = priv->info->regs;
u32 clkrate32;
s64 clkrate;
int rc;
clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
/* Take a +/- value and re-center it around 2^31. */
clkrate = SJA1105_CC_MULT + clkrate;
WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
clkrate32 = clkrate;
mutex_lock(&ptp_data->lock);
rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
NULL);
sja1105_tas_adjfreq(priv->ds);
mutex_unlock(&ptp_data->lock);
return rc;
}
/* Write to PTPCLKVAL while PTPCLKADD is 1 */
int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
{
struct sja1105_private *priv = ds->priv;
s64 ticks = ns_to_sja1105_ticks(delta);
int rc;
rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
if (rc < 0) {
dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
return rc;
}
rc = sja1105_ptpclkval_write(priv, ticks, NULL);
sja1105_tas_clockstep(priv->ds);
return rc;
}
static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
int rc;
mutex_lock(&ptp_data->lock);
rc = __sja1105_ptp_adjtime(priv->ds, delta);
mutex_unlock(&ptp_data->lock);
return rc;
}
static void sja1105_ptp_extts_setup_timer(struct sja1105_ptp_data *ptp_data)
{
unsigned long expires = ((jiffies / SJA1105_EXTTS_INTERVAL) + 1) *
SJA1105_EXTTS_INTERVAL;
mod_timer(&ptp_data->extts_timer, expires);
}
static void sja1105_ptp_extts_timer(struct timer_list *t)
{
struct sja1105_ptp_data *ptp_data = extts_to_data(t);
ptp_schedule_worker(ptp_data->clock, 0);
sja1105_ptp_extts_setup_timer(ptp_data);
}
static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
enum ptp_pin_function func)
{
struct sja1105_avb_params_entry *avb;
enum ptp_pin_function old_func;
avb = priv->static_config.tables[BLK_IDX_AVB_PARAMS].entries;
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID ||
avb->cas_master)
old_func = PTP_PF_PEROUT;
else
old_func = PTP_PF_EXTTS;
if (func == old_func)
return 0;
avb->cas_master = (func == PTP_PF_PEROUT);
return sja1105_dynamic_config_write(priv, BLK_IDX_AVB_PARAMS, 0, avb,
true);
}
/* The PTP_CLK pin may be configured to toggle with a 50% duty cycle and a
* frequency f:
*
* NSEC_PER_SEC
* f = ----------------------
* (PTPPINDUR * 8 ns) * 2
*/
static int sja1105_per_out_enable(struct sja1105_private *priv,
struct ptp_perout_request *perout,
bool on)
{
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_ptp_cmd cmd = ptp_data->cmd;
int rc;
/* We only support one channel */
if (perout->index != 0)
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (perout->flags)
return -EOPNOTSUPP;
mutex_lock(&ptp_data->lock);
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
if (rc)
goto out;
if (on) {
struct timespec64 pin_duration_ts = {
.tv_sec = perout->period.sec,
.tv_nsec = perout->period.nsec,
};
struct timespec64 pin_start_ts = {
.tv_sec = perout->start.sec,
.tv_nsec = perout->start.nsec,
};
u64 pin_duration = timespec64_to_ns(&pin_duration_ts);
u64 pin_start = timespec64_to_ns(&pin_start_ts);
u32 pin_duration32;
u64 now;
/* ptppindur: 32 bit register which holds the interval between
* 2 edges on PTP_CLK. So check for truncation which happens
* at periods larger than around 68.7 seconds.
*/
pin_duration = ns_to_sja1105_ticks(pin_duration / 2);
if (pin_duration > U32_MAX) {
rc = -ERANGE;
goto out;
}
pin_duration32 = pin_duration;
/* ptppins: 64 bit register which needs to hold a PTP time
* larger than the current time, otherwise the startptpcp
* command won't do anything. So advance the current time
* by a number of periods in a way that won't alter the
* phase offset.
*/
rc = __sja1105_ptp_gettimex(priv->ds, &now, NULL);
if (rc < 0)
goto out;
pin_start = future_base_time(pin_start, pin_duration,
now + 1ull * NSEC_PER_SEC);
pin_start = ns_to_sja1105_ticks(pin_start);
rc = sja1105_xfer_u64(priv, SPI_WRITE, regs->ptppinst,
&pin_start, NULL);
if (rc < 0)
goto out;
rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptppindur,
&pin_duration32, NULL);
if (rc < 0)
goto out;
}
if (on)
cmd.startptpcp = true;
else
cmd.stopptpcp = true;
rc = sja1105_ptp_commit(priv->ds, &cmd, SPI_WRITE);
out:
mutex_unlock(&ptp_data->lock);
return rc;
}
static int sja1105_extts_enable(struct sja1105_private *priv,
struct ptp_extts_request *extts,
bool on)
{
int rc;
/* We only support one channel */
if (extts->index != 0)
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (extts->flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* We can only enable time stamping on both edges, sadly. */
if ((extts->flags & PTP_STRICT_FLAGS) &&
(extts->flags & PTP_ENABLE_FEATURE) &&
(extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
if (rc)
return rc;
priv->ptp_data.extts_enabled = on;
if (on)
sja1105_ptp_extts_setup_timer(&priv->ptp_data);
else
del_timer_sync(&priv->ptp_data.extts_timer);
return 0;
}
static int sja1105_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *req, int on)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
int rc = -EOPNOTSUPP;
if (req->type == PTP_CLK_REQ_PEROUT)
rc = sja1105_per_out_enable(priv, &req->perout, on);
else if (req->type == PTP_CLK_REQ_EXTTS)
rc = sja1105_extts_enable(priv, &req->extts, on);
return rc;
}
static int sja1105_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
if (chan != 0 || pin != 0)
return -1;
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
break;
case PTP_PF_EXTTS:
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID)
return -1;
break;
default:
return -1;
}
return 0;
}
static struct ptp_pin_desc sja1105_ptp_pin = {
.name = "ptp_clk",
.index = 0,
.func = PTP_PF_NONE,
};
int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
ptp_data->caps = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "SJA1105 PHC",
.adjfine = sja1105_ptp_adjfine,
.adjtime = sja1105_ptp_adjtime,
.gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
.enable = sja1105_ptp_enable,
.verify = sja1105_ptp_verify_pin,
.do_aux_work = sja1105_rxtstamp_work,
.max_adj = SJA1105_MAX_ADJ_PPB,
.pin_config = &sja1105_ptp_pin,
.n_pins = 1,
.n_ext_ts = 1,
.n_per_out = 1,
};
/* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
/* Only used on SJA1110 */
skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
if (IS_ERR_OR_NULL(ptp_data->clock))
return PTR_ERR(ptp_data->clock);
ptp_data->cmd.corrclk4ts = true;
ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
timer_setup(&ptp_data->extts_timer, sja1105_ptp_extts_timer, 0);
return sja1105_ptp_reset(ds);
}
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
}
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct skb_shared_hwtstamps shwt = {0};
u64 ticks, ts;
int rc;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
mutex_lock(&ptp_data->lock);
rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
if (rc < 0) {
dev_err(ds->dev, "timed out polling for tstamp\n");
kfree_skb(skb);
goto out;
}
rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
if (rc < 0) {
dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
kfree_skb(skb);
goto out;
}
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
skb_complete_tx_timestamp(skb, &shwt);
out:
mutex_unlock(&ptp_data->lock);
}
| linux-master | drivers/net/dsa/sja1105/sja1105_ptp.c |
/*
* B53 switch driver main logic
*
* Copyright (C) 2011-2013 Jonas Gorski <[email protected]>
* Copyright (C) 2016 Florian Fainelli <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <net/dsa.h>
#include "b53_regs.h"
#include "b53_priv.h"
struct b53_mib_desc {
u8 size;
u8 offset;
const char *name;
};
/* BCM5365 MIB counters */
static const struct b53_mib_desc b53_mibs_65[] = {
{ 8, 0x00, "TxOctets" },
{ 4, 0x08, "TxDropPkts" },
{ 4, 0x10, "TxBroadcastPkts" },
{ 4, 0x14, "TxMulticastPkts" },
{ 4, 0x18, "TxUnicastPkts" },
{ 4, 0x1c, "TxCollisions" },
{ 4, 0x20, "TxSingleCollision" },
{ 4, 0x24, "TxMultipleCollision" },
{ 4, 0x28, "TxDeferredTransmit" },
{ 4, 0x2c, "TxLateCollision" },
{ 4, 0x30, "TxExcessiveCollision" },
{ 4, 0x38, "TxPausePkts" },
{ 8, 0x44, "RxOctets" },
{ 4, 0x4c, "RxUndersizePkts" },
{ 4, 0x50, "RxPausePkts" },
{ 4, 0x54, "Pkts64Octets" },
{ 4, 0x58, "Pkts65to127Octets" },
{ 4, 0x5c, "Pkts128to255Octets" },
{ 4, 0x60, "Pkts256to511Octets" },
{ 4, 0x64, "Pkts512to1023Octets" },
{ 4, 0x68, "Pkts1024to1522Octets" },
{ 4, 0x6c, "RxOversizePkts" },
{ 4, 0x70, "RxJabbers" },
{ 4, 0x74, "RxAlignmentErrors" },
{ 4, 0x78, "RxFCSErrors" },
{ 8, 0x7c, "RxGoodOctets" },
{ 4, 0x84, "RxDropPkts" },
{ 4, 0x88, "RxUnicastPkts" },
{ 4, 0x8c, "RxMulticastPkts" },
{ 4, 0x90, "RxBroadcastPkts" },
{ 4, 0x94, "RxSAChanges" },
{ 4, 0x98, "RxFragments" },
};
#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
/* BCM63xx MIB counters */
static const struct b53_mib_desc b53_mibs_63xx[] = {
{ 8, 0x00, "TxOctets" },
{ 4, 0x08, "TxDropPkts" },
{ 4, 0x0c, "TxQoSPkts" },
{ 4, 0x10, "TxBroadcastPkts" },
{ 4, 0x14, "TxMulticastPkts" },
{ 4, 0x18, "TxUnicastPkts" },
{ 4, 0x1c, "TxCollisions" },
{ 4, 0x20, "TxSingleCollision" },
{ 4, 0x24, "TxMultipleCollision" },
{ 4, 0x28, "TxDeferredTransmit" },
{ 4, 0x2c, "TxLateCollision" },
{ 4, 0x30, "TxExcessiveCollision" },
{ 4, 0x38, "TxPausePkts" },
{ 8, 0x3c, "TxQoSOctets" },
{ 8, 0x44, "RxOctets" },
{ 4, 0x4c, "RxUndersizePkts" },
{ 4, 0x50, "RxPausePkts" },
{ 4, 0x54, "Pkts64Octets" },
{ 4, 0x58, "Pkts65to127Octets" },
{ 4, 0x5c, "Pkts128to255Octets" },
{ 4, 0x60, "Pkts256to511Octets" },
{ 4, 0x64, "Pkts512to1023Octets" },
{ 4, 0x68, "Pkts1024to1522Octets" },
{ 4, 0x6c, "RxOversizePkts" },
{ 4, 0x70, "RxJabbers" },
{ 4, 0x74, "RxAlignmentErrors" },
{ 4, 0x78, "RxFCSErrors" },
{ 8, 0x7c, "RxGoodOctets" },
{ 4, 0x84, "RxDropPkts" },
{ 4, 0x88, "RxUnicastPkts" },
{ 4, 0x8c, "RxMulticastPkts" },
{ 4, 0x90, "RxBroadcastPkts" },
{ 4, 0x94, "RxSAChanges" },
{ 4, 0x98, "RxFragments" },
{ 4, 0xa0, "RxSymbolErrors" },
{ 4, 0xa4, "RxQoSPkts" },
{ 8, 0xa8, "RxQoSOctets" },
{ 4, 0xb0, "Pkts1523to2047Octets" },
{ 4, 0xb4, "Pkts2048to4095Octets" },
{ 4, 0xb8, "Pkts4096to8191Octets" },
{ 4, 0xbc, "Pkts8192to9728Octets" },
{ 4, 0xc0, "RxDiscarded" },
};
#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
/* MIB counters */
static const struct b53_mib_desc b53_mibs[] = {
{ 8, 0x00, "TxOctets" },
{ 4, 0x08, "TxDropPkts" },
{ 4, 0x10, "TxBroadcastPkts" },
{ 4, 0x14, "TxMulticastPkts" },
{ 4, 0x18, "TxUnicastPkts" },
{ 4, 0x1c, "TxCollisions" },
{ 4, 0x20, "TxSingleCollision" },
{ 4, 0x24, "TxMultipleCollision" },
{ 4, 0x28, "TxDeferredTransmit" },
{ 4, 0x2c, "TxLateCollision" },
{ 4, 0x30, "TxExcessiveCollision" },
{ 4, 0x38, "TxPausePkts" },
{ 8, 0x50, "RxOctets" },
{ 4, 0x58, "RxUndersizePkts" },
{ 4, 0x5c, "RxPausePkts" },
{ 4, 0x60, "Pkts64Octets" },
{ 4, 0x64, "Pkts65to127Octets" },
{ 4, 0x68, "Pkts128to255Octets" },
{ 4, 0x6c, "Pkts256to511Octets" },
{ 4, 0x70, "Pkts512to1023Octets" },
{ 4, 0x74, "Pkts1024to1522Octets" },
{ 4, 0x78, "RxOversizePkts" },
{ 4, 0x7c, "RxJabbers" },
{ 4, 0x80, "RxAlignmentErrors" },
{ 4, 0x84, "RxFCSErrors" },
{ 8, 0x88, "RxGoodOctets" },
{ 4, 0x90, "RxDropPkts" },
{ 4, 0x94, "RxUnicastPkts" },
{ 4, 0x98, "RxMulticastPkts" },
{ 4, 0x9c, "RxBroadcastPkts" },
{ 4, 0xa0, "RxSAChanges" },
{ 4, 0xa4, "RxFragments" },
{ 4, 0xa8, "RxJumboPkts" },
{ 4, 0xac, "RxSymbolErrors" },
{ 4, 0xc0, "RxDiscarded" },
};
#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
static const struct b53_mib_desc b53_mibs_58xx[] = {
{ 8, 0x00, "TxOctets" },
{ 4, 0x08, "TxDropPkts" },
{ 4, 0x0c, "TxQPKTQ0" },
{ 4, 0x10, "TxBroadcastPkts" },
{ 4, 0x14, "TxMulticastPkts" },
{ 4, 0x18, "TxUnicastPKts" },
{ 4, 0x1c, "TxCollisions" },
{ 4, 0x20, "TxSingleCollision" },
{ 4, 0x24, "TxMultipleCollision" },
{ 4, 0x28, "TxDeferredCollision" },
{ 4, 0x2c, "TxLateCollision" },
{ 4, 0x30, "TxExcessiveCollision" },
{ 4, 0x34, "TxFrameInDisc" },
{ 4, 0x38, "TxPausePkts" },
{ 4, 0x3c, "TxQPKTQ1" },
{ 4, 0x40, "TxQPKTQ2" },
{ 4, 0x44, "TxQPKTQ3" },
{ 4, 0x48, "TxQPKTQ4" },
{ 4, 0x4c, "TxQPKTQ5" },
{ 8, 0x50, "RxOctets" },
{ 4, 0x58, "RxUndersizePkts" },
{ 4, 0x5c, "RxPausePkts" },
{ 4, 0x60, "RxPkts64Octets" },
{ 4, 0x64, "RxPkts65to127Octets" },
{ 4, 0x68, "RxPkts128to255Octets" },
{ 4, 0x6c, "RxPkts256to511Octets" },
{ 4, 0x70, "RxPkts512to1023Octets" },
{ 4, 0x74, "RxPkts1024toMaxPktsOctets" },
{ 4, 0x78, "RxOversizePkts" },
{ 4, 0x7c, "RxJabbers" },
{ 4, 0x80, "RxAlignmentErrors" },
{ 4, 0x84, "RxFCSErrors" },
{ 8, 0x88, "RxGoodOctets" },
{ 4, 0x90, "RxDropPkts" },
{ 4, 0x94, "RxUnicastPkts" },
{ 4, 0x98, "RxMulticastPkts" },
{ 4, 0x9c, "RxBroadcastPkts" },
{ 4, 0xa0, "RxSAChanges" },
{ 4, 0xa4, "RxFragments" },
{ 4, 0xa8, "RxJumboPkt" },
{ 4, 0xac, "RxSymblErr" },
{ 4, 0xb0, "InRangeErrCount" },
{ 4, 0xb4, "OutRangeErrCount" },
{ 4, 0xb8, "EEELpiEvent" },
{ 4, 0xbc, "EEELpiDuration" },
{ 4, 0xc0, "RxDiscard" },
{ 4, 0xc8, "TxQPKTQ6" },
{ 4, 0xcc, "TxQPKTQ7" },
{ 4, 0xd0, "TxPkts64Octets" },
{ 4, 0xd4, "TxPkts65to127Octets" },
{ 4, 0xd8, "TxPkts128to255Octets" },
{ 4, 0xdc, "TxPkts256to511Ocets" },
{ 4, 0xe0, "TxPkts512to1023Ocets" },
{ 4, 0xe4, "TxPkts1024toMaxPktOcets" },
};
#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
{
unsigned int i;
b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
for (i = 0; i < 10; i++) {
u8 vta;
b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
if (!(vta & VTA_START_CMD))
return 0;
usleep_range(100, 200);
}
return -EIO;
}
static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
struct b53_vlan *vlan)
{
if (is5325(dev)) {
u32 entry = 0;
if (vlan->members) {
entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
VA_UNTAG_S_25) | vlan->members;
if (dev->core_rev >= 3)
entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
else
entry |= VA_VALID_25;
}
b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
VTA_RW_STATE_WR | VTA_RW_OP_EN);
} else if (is5365(dev)) {
u16 entry = 0;
if (vlan->members)
entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
VTA_RW_STATE_WR | VTA_RW_OP_EN);
} else {
b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
(vlan->untag << VTE_UNTAG_S) | vlan->members);
b53_do_vlan_op(dev, VTA_CMD_WRITE);
}
dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
vid, vlan->members, vlan->untag);
}
static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
struct b53_vlan *vlan)
{
if (is5325(dev)) {
u32 entry = 0;
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
VTA_RW_STATE_RD | VTA_RW_OP_EN);
b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
if (dev->core_rev >= 3)
vlan->valid = !!(entry & VA_VALID_25_R4);
else
vlan->valid = !!(entry & VA_VALID_25);
vlan->members = entry & VA_MEMBER_MASK;
vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
} else if (is5365(dev)) {
u16 entry = 0;
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
VTA_RW_STATE_WR | VTA_RW_OP_EN);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
vlan->valid = !!(entry & VA_VALID_65);
vlan->members = entry & VA_MEMBER_MASK;
vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
} else {
u32 entry = 0;
b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
b53_do_vlan_op(dev, VTA_CMD_READ);
b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
vlan->members = entry & VTE_MEMBERS;
vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
vlan->valid = true;
}
}
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
if (enable)
mgmt |= SM_SW_FWD_EN;
else
mgmt &= ~SM_SW_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
/* Include IMP port in dumb forwarding mode
*/
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
mgmt |= B53_MII_DUMB_FWDG_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
/* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
* frames should be flooded or not.
*/
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
}
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
bool enable_filtering)
{
u8 mgmt, vc0, vc1, vc4 = 0, vc5;
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
if (is5325(dev) || is5365(dev)) {
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
} else if (is63xx(dev)) {
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
} else {
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
if (is5325(dev))
vc0 &= ~VC0_RESERVED_1;
if (is5325(dev) || is5365(dev))
vc1 |= VC1_RX_MCST_TAG_EN;
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
if (is5325(dev) || is5365(dev))
vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
else
vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
if (is5325(dev) || is5365(dev))
vc1 &= ~VC1_RX_MCST_TAG_EN;
}
if (!is5325(dev) && !is5365(dev))
vc5 &= ~VC5_VID_FFF_EN;
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
if (is5325(dev) || is5365(dev)) {
/* enable the high 8 bit vid check on 5325 */
if (is5325(dev) && enable)
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
VC3_HIGH_8BIT_EN);
else
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
} else if (is63xx(dev)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
} else {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
}
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
dev->vlan_enabled = enable;
dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n",
port, enable, enable_filtering);
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
{
u32 port_mask = 0;
u16 max_size = JMS_MIN_SIZE;
if (is5325(dev) || is5365(dev))
return -EINVAL;
if (enable) {
port_mask = dev->enabled_ports;
max_size = JMS_MAX_SIZE;
if (allow_10_100)
port_mask |= JPM_10_100_JUMBO_EN;
}
b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
}
static int b53_flush_arl(struct b53_device *dev, u8 mask)
{
unsigned int i;
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
for (i = 0; i < 10; i++) {
u8 fast_age_ctrl;
b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
&fast_age_ctrl);
if (!(fast_age_ctrl & FAST_AGE_DONE))
goto out;
msleep(1);
}
return -ETIMEDOUT;
out:
/* Only age dynamic entries (default behavior) */
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
return 0;
}
static int b53_fast_age_port(struct b53_device *dev, int port)
{
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
return b53_flush_arl(dev, FAST_AGE_PORT);
}
static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
{
b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
return b53_flush_arl(dev, FAST_AGE_VLAN);
}
void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
{
struct b53_device *dev = ds->priv;
unsigned int i;
u16 pvlan;
/* Enable the IMP port to be in the same VLAN as the other ports
* on a per-port basis such that we only have Port i and IMP in
* the same VLAN.
*/
b53_for_each_port(dev, i) {
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
pvlan |= BIT(cpu_port);
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
}
}
EXPORT_SYMBOL(b53_imp_vlan_setup);
static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
bool unicast)
{
u16 uc;
b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
if (unicast)
uc |= BIT(port);
else
uc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
bool multicast)
{
u16 mc;
b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
}
static void b53_port_set_learning(struct b53_device *dev, int port,
bool learning)
{
u16 reg;
b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®);
if (learning)
reg &= ~BIT(port);
else
reg |= BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
unsigned int cpu_port;
int ret = 0;
u16 pvlan;
if (!dsa_is_user_port(ds, port))
return 0;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
b53_port_set_ucast_flood(dev, port, true);
b53_port_set_mcast_flood(dev, port, true);
b53_port_set_learning(dev, port, false);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
return ret;
/* Clear the Rx and Tx disable bits and set to no spanning tree */
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
/* Set this port, and only this one to be in the default VLAN,
* if member of a bridge, restore its membership prior to
* bringing down this port.
*/
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
pvlan &= ~0x1ff;
pvlan |= BIT(port);
pvlan |= dev->ports[port].vlan_ctl_mask;
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
b53_imp_vlan_setup(ds, cpu_port);
/* If EEE was enabled, restore it */
if (dev->ports[port].eee.eee_enabled)
b53_eee_enable_set(ds, port, true);
return 0;
}
EXPORT_SYMBOL(b53_enable_port);
void b53_disable_port(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
u8 reg;
/* Disable Tx/Rx for the port */
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
if (dev->ops->irq_disable)
dev->ops->irq_disable(dev, port);
}
EXPORT_SYMBOL(b53_disable_port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
u8 hdr_ctl, val;
u16 reg;
/* Resolve which bit controls the Broadcom tag */
switch (port) {
case 8:
val = BRCM_HDR_P8_EN;
break;
case 7:
val = BRCM_HDR_P7_EN;
break;
case 5:
val = BRCM_HDR_P5_EN;
break;
default:
val = 0;
break;
}
/* Enable management mode if tagging is requested */
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
if (tag_en)
hdr_ctl |= SM_SW_FWD_MODE;
else
hdr_ctl &= ~SM_SW_FWD_MODE;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
/* Configure the appropriate IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
if (port == 8)
hdr_ctl |= GC_FRM_MGMT_PORT_MII;
else if (port == 5)
hdr_ctl |= GC_FRM_MGMT_PORT_M;
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
hdr_ctl |= val;
else
hdr_ctl &= ~val;
b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
/* Registers below are only accessible on newer devices */
if (!is58xx(dev))
return;
/* Enable reception Broadcom tag for CPU TX (switch RX) to
* allow us to tag outgoing frames
*/
b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®);
if (tag_en)
reg &= ~BIT(port);
else
reg |= BIT(port);
b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
/* Enable transmission of Broadcom tags from the switch (CPU RX) to
* allow delivering frames to the per-port net_devices
*/
b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®);
if (tag_en)
reg &= ~BIT(port);
else
reg |= BIT(port);
b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
}
EXPORT_SYMBOL(b53_brcm_hdr_setup);
static void b53_enable_cpu_port(struct b53_device *dev, int port)
{
u8 port_ctrl;
/* BCM5325 CPU port is at 8 */
if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
port = B53_CPU_PORT;
port_ctrl = PORT_CTRL_RX_BCST_EN |
PORT_CTRL_RX_MCST_EN |
PORT_CTRL_RX_UCST_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
b53_port_set_ucast_flood(dev, port, true);
b53_port_set_mcast_flood(dev, port, true);
b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
{
u8 gc;
b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
static u16 b53_default_pvid(struct b53_device *dev)
{
if (is5325(dev) || is5365(dev))
return 1;
else
return 0;
}
static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
}
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
struct b53_vlan *v;
int i, def_vid;
u16 vid;
def_vid = b53_default_pvid(dev);
/* clear all vlan entries */
if (is5325(dev) || is5365(dev)) {
for (i = def_vid; i < dev->num_vlans; i++)
b53_set_vlan_entry(dev, i, &vl);
} else {
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
* dsa_slave_vlan_rx_add_vid() to create the default VLAN
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
b53_for_each_port(dev, i) {
v = &dev->vlans[def_vid];
v->members |= BIT(i);
if (!b53_vlan_port_needs_forced_tagged(ds, i))
v->untag = v->members;
b53_write16(dev, B53_VLAN_PAGE,
B53_VLAN_PORT_DEF_TAG(i), def_vid);
}
/* Upon initial call we have not set-up any VLANs, but upon
* system resume, we need to restore all VLAN entries.
*/
for (vid = def_vid; vid < dev->num_vlans; vid++) {
v = &dev->vlans[vid];
if (!v->members)
continue;
b53_set_vlan_entry(dev, vid, v);
b53_fast_age_vlan(dev, vid);
}
return 0;
}
EXPORT_SYMBOL(b53_configure_vlan);
static void b53_switch_reset_gpio(struct b53_device *dev)
{
int gpio = dev->reset_gpio;
if (gpio < 0)
return;
/* Reset sequence: RESET low(50ms)->high(20ms)
*/
gpio_set_value(gpio, 0);
mdelay(50);
gpio_set_value(gpio, 1);
mdelay(20);
dev->current_page = 0xff;
}
static int b53_switch_reset(struct b53_device *dev)
{
unsigned int timeout = 1000;
u8 mgmt, reg;
b53_switch_reset_gpio(dev);
if (is539x(dev)) {
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
}
/* This is specific to 58xx devices here, do not use is58xx() which
* covers the larger Starfigther 2 family, including 7445/7278 which
* still use this driver as a library and need to perform the reset
* earlier.
*/
if (dev->chip_id == BCM58XX_DEVICE_ID ||
dev->chip_id == BCM583XX_DEVICE_ID) {
b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
reg |= SW_RST | EN_SW_RST | EN_CH_RST;
b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
do {
b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
if (!(reg & SW_RST))
break;
usleep_range(1000, 2000);
} while (timeout-- > 0);
if (timeout == 0) {
dev_err(dev->dev,
"Timeout waiting for SW_RST to clear!\n");
return -ETIMEDOUT;
}
}
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
if (!(mgmt & SM_SW_FWD_EN)) {
mgmt &= ~SM_SW_FWD_MODE;
mgmt |= SM_SW_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
if (!(mgmt & SM_SW_FWD_EN)) {
dev_err(dev->dev, "Failed to enable switch!\n");
return -EINVAL;
}
}
b53_enable_mib(dev);
return b53_flush_arl(dev, FAST_AGE_STATIC);
}
static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct b53_device *priv = ds->priv;
u16 value = 0;
int ret;
if (priv->ops->phy_read16)
ret = priv->ops->phy_read16(priv, addr, reg, &value);
else
ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
reg * 2, &value);
return ret ? ret : value;
}
static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct b53_device *priv = ds->priv;
if (priv->ops->phy_write16)
return priv->ops->phy_write16(priv, addr, reg, val);
return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
}
static int b53_reset_switch(struct b53_device *priv)
{
/* reset vlans */
memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
priv->serdes_lane = B53_INVALID_LANE;
return b53_switch_reset(priv);
}
static int b53_apply_config(struct b53_device *priv)
{
/* disable switching */
b53_set_forwarding(priv, 0);
b53_configure_vlan(priv->ds);
/* enable switching */
b53_set_forwarding(priv, 1);
return 0;
}
static void b53_reset_mib(struct b53_device *priv)
{
u8 gc;
b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
msleep(1);
b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
msleep(1);
}
static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
{
if (is5365(dev))
return b53_mibs_65;
else if (is63xx(dev))
return b53_mibs_63xx;
else if (is58xx(dev))
return b53_mibs_58xx;
else
return b53_mibs;
}
static unsigned int b53_get_mib_size(struct b53_device *dev)
{
if (is5365(dev))
return B53_MIBS_65_SIZE;
else if (is63xx(dev))
return B53_MIBS_63XX_SIZE;
else if (is58xx(dev))
return B53_MIBS_58XX_SIZE;
else
return B53_MIBS_SIZE;
}
static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
{
/* These ports typically do not have built-in PHYs */
switch (port) {
case B53_CPU_PORT_25:
case 7:
case B53_CPU_PORT:
return NULL;
}
return mdiobus_get_phy(ds->slave_mii_bus, port);
}
void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
unsigned int mib_size = b53_get_mib_size(dev);
struct phy_device *phydev;
unsigned int i;
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
strscpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
if (!phydev)
return;
phy_ethtool_get_strings(phydev, data);
}
}
EXPORT_SYMBOL(b53_get_strings);
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
unsigned int mib_size = b53_get_mib_size(dev);
const struct b53_mib_desc *s;
unsigned int i;
u64 val = 0;
if (is5365(dev) && port == 5)
port = 8;
mutex_lock(&dev->stats_mutex);
for (i = 0; i < mib_size; i++) {
s = &mibs[i];
if (s->size == 8) {
b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
} else {
u32 val32;
b53_read32(dev, B53_MIB_PAGE(port), s->offset,
&val32);
val = val32;
}
data[i] = (u64)val;
}
mutex_unlock(&dev->stats_mutex);
}
EXPORT_SYMBOL(b53_get_ethtool_stats);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
struct phy_device *phydev;
phydev = b53_get_phy_device(ds, port);
if (!phydev)
return;
phy_ethtool_get_stats(phydev, NULL, data);
}
EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct b53_device *dev = ds->priv;
struct phy_device *phydev;
if (sset == ETH_SS_STATS) {
return b53_get_mib_size(dev);
} else if (sset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
if (!phydev)
return 0;
return phy_ethtool_get_sset_count(phydev);
}
return 0;
}
EXPORT_SYMBOL(b53_get_sset_count);
enum b53_devlink_resource_id {
B53_DEVLINK_PARAM_ID_VLAN_TABLE,
};
static u64 b53_devlink_vlan_table_get(void *priv)
{
struct b53_device *dev = priv;
struct b53_vlan *vl;
unsigned int i;
u64 count = 0;
for (i = 0; i < dev->num_vlans; i++) {
vl = &dev->vlans[i];
if (vl->members)
count++;
}
return count;
}
int b53_setup_devlink_resources(struct dsa_switch *ds)
{
struct devlink_resource_size_params size_params;
struct b53_device *dev = ds->priv;
int err;
devlink_resource_size_params_init(&size_params, dev->num_vlans,
dev->num_vlans,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
B53_DEVLINK_PARAM_ID_VLAN_TABLE,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&size_params);
if (err)
goto out;
dsa_devlink_resource_occ_get_register(ds,
B53_DEVLINK_PARAM_ID_VLAN_TABLE,
b53_devlink_vlan_table_get, dev);
return 0;
out:
dsa_devlink_resources_unregister(ds);
return err;
}
EXPORT_SYMBOL(b53_setup_devlink_resources);
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
unsigned int port;
int ret;
/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
* which forces the CPU port to be tagged in all VLANs.
*/
ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
b53_reset_mib(dev);
ret = b53_apply_config(dev);
if (ret) {
dev_err(ds->dev, "failed to apply configuration\n");
return ret;
}
/* Configure IMP/CPU port, disable all other ports. Enabled
* ports will be configured with .port_enable
*/
for (port = 0; port < dev->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
b53_enable_cpu_port(dev, port);
else
b53_disable_port(ds, port);
}
return b53_setup_devlink_resources(ds);
}
static void b53_teardown(struct dsa_switch *ds)
{
dsa_devlink_resources_unregister(ds);
}
static void b53_force_link(struct b53_device *dev, int port, int link)
{
u8 reg, val, off;
/* Override the port settings */
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
}
b53_read8(dev, B53_CTRL_PAGE, off, ®);
reg |= val;
if (link)
reg |= PORT_OVERRIDE_LINK;
else
reg &= ~PORT_OVERRIDE_LINK;
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
static void b53_force_port_config(struct b53_device *dev, int port,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
u8 reg, val, off;
/* Override the port settings */
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
}
b53_read8(dev, B53_CTRL_PAGE, off, ®);
reg |= val;
if (duplex == DUPLEX_FULL)
reg |= PORT_OVERRIDE_FULL_DUPLEX;
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
fallthrough;
case SPEED_1000:
reg |= PORT_OVERRIDE_SPEED_1000M;
break;
case SPEED_100:
reg |= PORT_OVERRIDE_SPEED_100M;
break;
case SPEED_10:
reg |= PORT_OVERRIDE_SPEED_10M;
break;
default:
dev_err(dev->dev, "unknown speed: %d\n", speed);
return;
}
if (rx_pause)
reg |= PORT_OVERRIDE_RX_FLOW;
if (tx_pause)
reg |= PORT_OVERRIDE_TX_FLOW;
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
u8 rgmii_ctrl = 0, off;
if (port == dev->imp_port)
off = B53_RGMII_CTRL_IMP;
else
off = B53_RGMII_CTRL_P(port);
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
switch (interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
break;
case PHY_INTERFACE_MODE_RGMII:
default:
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
break;
}
if (port != dev->imp_port) {
if (is63268(dev))
rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
}
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_dbg(ds->dev, "Configured port %d for %s\n", port,
phy_modes(interface));
}
static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct b53_device *dev = ds->priv;
struct ethtool_eee *p = &dev->ports[port].eee;
u8 rgmii_ctrl = 0, reg = 0, off;
bool tx_pause = false;
bool rx_pause = false;
if (!phy_is_pseudo_fixed_link(phydev))
return;
/* Enable flow control on BCM5301x's CPU port */
if (is5301x(dev) && dsa_is_cpu_port(ds, port))
tx_pause = rx_pause = true;
if (phydev->pause) {
if (phydev->asym_pause)
tx_pause = true;
rx_pause = true;
}
b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, phydev->link);
if (is63xx(dev) && port >= B53_63XX_RGMII0)
b53_adjust_63xx_rgmii(ds, port, phydev->interface);
if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
if (port == dev->imp_port)
off = B53_RGMII_CTRL_IMP;
else
off = B53_RGMII_CTRL_P(port);
/* Configure the port RGMII clock delay by DLL disabled and
* tx_clk aligned timing (restoring to reset defaults)
*/
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
RGMII_CTRL_TIMING_SEL);
/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
* sure that we enable the port TX clock internal delay to
* account for this internal delay that is inserted, otherwise
* the switch won't be able to receive correctly.
*
* PHY_INTERFACE_MODE_RGMII means that we are not introducing
* any delay neither on transmission nor reception, so the
* BCM53125 must also be configured accordingly to account for
* the lack of delay and introduce
*
* The BCM53125 switch has its RX clock and TX clock control
* swapped, hence the reason why we modify the TX clock path in
* the "RGMII" case
*/
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_info(ds->dev, "Configured port %d for %s\n", port,
phy_modes(phydev->interface));
}
/* configure MII port if necessary */
if (is5325(dev)) {
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
®);
/* reverse mii needs to be enabled */
if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
reg | PORT_OVERRIDE_RV_MII_25);
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
®);
if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
dev_err(ds->dev,
"Failed to enable reverse MII mode\n");
return;
}
}
}
/* Re-negotiate EEE if it was enabled already */
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
void b53_port_event(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
bool link;
u16 sts;
b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
link = !!(sts & BIT(port));
dsa_port_phylink_mac_change(ds, port, link);
}
EXPORT_SYMBOL(b53_port_event);
static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct b53_device *dev = ds->priv;
/* Internal ports need GMII for PHYLIB */
__set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
/* These switches appear to support MII and RevMII too, but beyond
* this, the code gives very few clues. FIXME: We probably need more
* interface modes here.
*
* According to b53_srab_mux_init(), ports 3..5 can support:
* SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
* However, the interface mode read from the MUX configuration is
* not passed back to DSA, so phylink uses NA.
* DT can specify RGMII for ports 0, 1.
* For MDIO, port 8 can be RGMII_TXID.
*/
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
/* 5325/5365 are not capable of gigabit speeds, everything else is.
* Note: the original code also exclulded Gigagbit for MII, RevMII
* and 802.3z modes. MII and RevMII are not able to work above 100M,
* so will be excluded by the generic validator implementation.
* However, the exclusion of Gigabit for 802.3z just seems wrong.
*/
if (!(is5325(dev) || is5365(dev)))
config->mac_capabilities |= MAC_1000;
/* Get the implementation specific capabilities */
if (dev->ops->phylink_get_caps)
dev->ops->phylink_get_caps(dev, port, config);
}
static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
int port,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
if (!dev->ops->phylink_mac_select_pcs)
return NULL;
return dev->ops->phylink_mac_select_pcs(dev, port, interface);
}
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
}
EXPORT_SYMBOL(b53_phylink_mac_config);
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
if (mode == MLO_AN_PHY)
return;
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
return;
}
if (phy_interface_mode_is_8023z(interface) &&
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, false);
}
EXPORT_SYMBOL(b53_phylink_mac_link_down);
void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct b53_device *dev = ds->priv;
if (is63xx(dev) && port >= B53_63XX_RGMII0)
b53_adjust_63xx_rgmii(ds, port, interface);
if (mode == MLO_AN_PHY)
return;
if (mode == MLO_AN_FIXED) {
b53_force_port_config(dev, port, speed, duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, true);
return;
}
if (phy_interface_mode_is_8023z(interface) &&
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, true);
}
EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
static int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
return -EOPNOTSUPP;
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
* receiving VLAN tagged frames at all, we can still allow the port to
* be configured for egress untagged.
*/
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
return -EINVAL;
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
b53_enable_vlan(dev, port, true, ds->vlan_filtering);
return 0;
}
int b53_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
vl = &dev->vlans[vlan->vid];
b53_get_vlan_entry(dev, vlan->vid, vl);
if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
untagged = true;
vl->members |= BIT(port);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid);
b53_fast_age_vlan(dev, vlan->vid);
}
return 0;
}
EXPORT_SYMBOL(b53_vlan_add);
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct b53_vlan *vl;
u16 pvid;
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
vl = &dev->vlans[vlan->vid];
b53_get_vlan_entry(dev, vlan->vid, vl);
vl->members &= ~BIT(port);
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
b53_fast_age_vlan(dev, pvid);
return 0;
}
EXPORT_SYMBOL(b53_vlan_del);
/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
static int b53_arl_op_wait(struct b53_device *dev)
{
unsigned int timeout = 10;
u8 reg;
do {
b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
if (!(reg & ARLTBL_START_DONE))
return 0;
usleep_range(1000, 2000);
} while (timeout--);
dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
return -ETIMEDOUT;
}
static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
{
u8 reg;
if (op > ARLTBL_RW)
return -EINVAL;
b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
reg |= ARLTBL_START_DONE;
if (op)
reg |= ARLTBL_RW;
else
reg &= ~ARLTBL_RW;
if (dev->vlan_enabled)
reg &= ~ARLTBL_IVL_SVL_SELECT;
else
reg |= ARLTBL_IVL_SVL_SELECT;
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
return b53_arl_op_wait(dev);
}
static int b53_arl_read(struct b53_device *dev, u64 mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
int ret;
ret = b53_arl_op_wait(dev);
if (ret)
return ret;
bitmap_zero(free_bins, dev->num_arl_bins);
/* Read the bins */
for (i = 0; i < dev->num_arl_bins; i++) {
u64 mac_vid;
u32 fwd_entry;
b53_read64(dev, B53_ARLIO_PAGE,
B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
b53_read32(dev, B53_ARLIO_PAGE,
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
if (!(fwd_entry & ARLTBL_VALID)) {
set_bit(i, free_bins);
continue;
}
if ((mac_vid & ARLTBL_MAC_MASK) != mac)
continue;
if (dev->vlan_enabled &&
((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
continue;
*idx = i;
return 0;
}
*idx = find_first_bit(free_bins, dev->num_arl_bins);
return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
static int b53_arl_op(struct b53_device *dev, int op, int port,
const unsigned char *addr, u16 vid, bool is_valid)
{
struct b53_arl_entry ent;
u32 fwd_entry;
u64 mac, mac_vid = 0;
u8 idx = 0;
int ret;
/* Convert the array into a 64-bit MAC */
mac = ether_addr_to_u64(addr);
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
/* Issue a read operation for this MAC */
ret = b53_arl_rw_op(dev, 1);
if (ret)
return ret;
ret = b53_arl_read(dev, mac, vid, &ent, &idx);
/* If this is a read, just finish now */
if (op)
return ret;
switch (ret) {
case -ETIMEDOUT:
return ret;
case -ENOSPC:
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
addr, vid);
return is_valid ? ret : 0;
case -ENOENT:
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
fwd_entry = 0;
break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
addr, vid, idx);
break;
}
/* For multicast address, the port is a bitmask and the validity
* is determined by having at least one port being still active
*/
if (!is_multicast_ether_addr(addr)) {
ent.port = port;
ent.is_valid = is_valid;
} else {
if (is_valid)
ent.port |= BIT(port);
else
ent.port &= ~BIT(port);
ent.is_valid = !!(ent.port);
}
ent.vid = vid;
ent.is_static = true;
ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
b53_write64(dev, B53_ARLIO_PAGE,
B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
b53_write32(dev, B53_ARLIO_PAGE,
B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
return b53_arl_rw_op(dev, 0);
}
int b53_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
*/
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
return ret;
}
EXPORT_SYMBOL(b53_fdb_add);
int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, false);
mutex_unlock(&priv->arl_mutex);
return ret;
}
EXPORT_SYMBOL(b53_fdb_del);
static int b53_arl_search_wait(struct b53_device *dev)
{
unsigned int timeout = 1000;
u8 reg;
do {
b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®);
if (!(reg & ARL_SRCH_STDN))
return 0;
if (reg & ARL_SRCH_VLID)
return 0;
usleep_range(1000, 2000);
} while (timeout--);
return -ETIMEDOUT;
}
static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
struct b53_arl_entry *ent)
{
u64 mac_vid;
u32 fwd_entry;
b53_read64(dev, B53_ARLIO_PAGE,
B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
b53_read32(dev, B53_ARLIO_PAGE,
B53_ARL_SRCH_RSTL(idx), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
}
static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
dsa_fdb_dump_cb_t *cb, void *data)
{
if (!ent->is_valid)
return 0;
if (port != ent->port)
return 0;
return cb(ent->mac, ent->vid, ent->is_static, data);
}
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
unsigned int count = 0;
int ret;
u8 reg;
mutex_lock(&priv->arl_mutex);
/* Start search operation */
reg = ARL_SRCH_STDN;
b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
do {
ret = b53_arl_search_wait(priv);
if (ret)
break;
b53_arl_search_rd(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret)
break;
if (priv->num_arl_bins > 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
break;
if (!results[0].is_valid && !results[1].is_valid)
break;
}
} while (count++ < b53_max_arl_entries(priv) / 2);
mutex_unlock(&priv->arl_mutex);
return 0;
}
EXPORT_SYMBOL(b53_fdb_dump);
int b53_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
*/
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
mutex_unlock(&priv->arl_mutex);
return ret;
}
EXPORT_SYMBOL(b53_mdb_add);
int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
mutex_unlock(&priv->arl_mutex);
if (ret)
dev_err(ds->dev, "failed to delete MDB entry\n");
return ret;
}
EXPORT_SYMBOL(b53_mdb_del);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
/* On 7278, port 7 which connects to the ASP should only receive
* traffic from matching CFP rules.
*/
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
return -EINVAL;
/* Make this port leave the all VLANs join since we will have proper
* VLAN entries from now on
*/
if (is58xx(dev)) {
b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
reg &= ~BIT(port);
if ((reg & BIT(cpu_port)) == BIT(cpu_port))
reg &= ~BIT(cpu_port);
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
}
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this local port to the remote port VLAN control
* membership and update the remote port bitmask
*/
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
reg |= BIT(port);
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
dev->ports[i].vlan_ctl_mask = reg;
pvlan |= BIT(i);
}
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
return 0;
}
EXPORT_SYMBOL(b53_br_join);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
reg &= ~BIT(port);
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
dev->ports[port].vlan_ctl_mask = reg;
/* Prevent self removal to preserve isolation */
if (port != i)
pvlan &= ~BIT(i);
}
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
/* Make this port join all VLANs without VLAN entries */
if (is58xx(dev)) {
b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
reg |= BIT(port);
if (!(reg & BIT(cpu_port)))
reg |= BIT(cpu_port);
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
} else {
b53_get_vlan_entry(dev, pvid, vl);
vl->members |= BIT(port) | BIT(cpu_port);
vl->untag |= BIT(port) | BIT(cpu_port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
EXPORT_SYMBOL(b53_br_leave);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
{
struct b53_device *dev = ds->priv;
u8 hw_state;
u8 reg;
switch (state) {
case BR_STATE_DISABLED:
hw_state = PORT_CTRL_DIS_STATE;
break;
case BR_STATE_LISTENING:
hw_state = PORT_CTRL_LISTEN_STATE;
break;
case BR_STATE_LEARNING:
hw_state = PORT_CTRL_LEARN_STATE;
break;
case BR_STATE_FORWARDING:
hw_state = PORT_CTRL_FWD_STATE;
break;
case BR_STATE_BLOCKING:
hw_state = PORT_CTRL_BLOCK_STATE;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
return;
}
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
reg &= ~PORT_CTRL_STP_STATE_MASK;
reg |= hw_state;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
}
EXPORT_SYMBOL(b53_br_set_stp_state);
void b53_br_fast_age(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
if (b53_fast_age_port(dev, port))
dev_err(ds->dev, "fast ageing failed\n");
}
EXPORT_SYMBOL(b53_br_fast_age);
int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(b53_br_flags_pre);
int b53_br_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & BR_FLOOD)
b53_port_set_ucast_flood(ds->priv, port,
!!(flags.val & BR_FLOOD));
if (flags.mask & BR_MCAST_FLOOD)
b53_port_set_mcast_flood(ds->priv, port,
!!(flags.val & BR_MCAST_FLOOD));
if (flags.mask & BR_LEARNING)
b53_port_set_learning(ds->priv, port,
!!(flags.val & BR_LEARNING));
return 0;
}
EXPORT_SYMBOL(b53_br_flags);
static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
{
/* Broadcom switches will accept enabling Broadcom tags on the
* following ports: 5, 7 and 8, any other port is not supported
*/
switch (port) {
case B53_CPU_PORT_25:
case 7:
case B53_CPU_PORT:
return true;
}
return false;
}
static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
enum dsa_tag_protocol tag_protocol)
{
bool ret = b53_possible_cpu_port(ds, port);
if (!ret) {
dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
port);
return ret;
}
switch (tag_protocol) {
case DSA_TAG_PROTO_BRCM:
case DSA_TAG_PROTO_BRCM_PREPEND:
dev_warn(ds->dev,
"Port %d is stacked to Broadcom tag switch\n", port);
ret = false;
break;
default:
ret = true;
break;
}
return ret;
}
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot)
{
struct b53_device *dev = ds->priv;
if (!b53_can_enable_brcm_tags(ds, port, mprot)) {
dev->tag_protocol = DSA_TAG_PROTO_NONE;
goto out;
}
/* Older models require a different 6 byte tag */
if (is5325(dev) || is5365(dev) || is63xx(dev)) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
goto out;
}
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
* which requires us to use the prepended Broadcom tag type
*/
if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
goto out;
}
dev->tag_protocol = DSA_TAG_PROTO_BRCM;
out:
return dev->tag_protocol;
}
EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 reg, loc;
if (ingress)
loc = B53_IG_MIR_CTL;
else
loc = B53_EG_MIR_CTL;
b53_read16(dev, B53_MGMT_PAGE, loc, ®);
reg |= BIT(port);
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
reg &= ~CAP_PORT_MASK;
reg |= mirror->to_local_port;
reg |= MIRROR_EN;
b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
return 0;
}
EXPORT_SYMBOL(b53_mirror_add);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct b53_device *dev = ds->priv;
bool loc_disable = false, other_loc_disable = false;
u16 reg, loc;
if (mirror->ingress)
loc = B53_IG_MIR_CTL;
else
loc = B53_EG_MIR_CTL;
/* Update the desired ingress/egress register */
b53_read16(dev, B53_MGMT_PAGE, loc, ®);
reg &= ~BIT(port);
if (!(reg & MIRROR_MASK))
loc_disable = true;
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
/* Now look at the other one to know if we can disable mirroring
* entirely
*/
if (mirror->ingress)
b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®);
else
b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®);
if (!(reg & MIRROR_MASK))
other_loc_disable = true;
b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
/* Both no longer have ports, let's disable mirroring */
if (loc_disable && other_loc_disable) {
reg &= ~MIRROR_EN;
reg &= ~mirror->to_local_port;
}
b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
}
EXPORT_SYMBOL(b53_mirror_del);
void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
{
struct b53_device *dev = ds->priv;
u16 reg;
b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®);
if (enable)
reg |= BIT(port);
else
reg &= ~BIT(port);
b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
}
EXPORT_SYMBOL(b53_eee_enable_set);
/* Returns 0 if EEE was not enabled, or 1 otherwise
*/
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
ret = phy_init_eee(phy, false);
if (ret)
return 0;
b53_eee_enable_set(ds, port, true);
return 1;
}
EXPORT_SYMBOL(b53_eee_init);
int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_eee *p = &dev->ports[port].eee;
u16 reg;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, ®);
e->eee_enabled = p->eee_enabled;
e->eee_active = !!(reg & BIT(port));
return 0;
}
EXPORT_SYMBOL(b53_get_mac_eee);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_eee *p = &dev->ports[port].eee;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
return 0;
}
EXPORT_SYMBOL(b53_set_mac_eee);
static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
{
struct b53_device *dev = ds->priv;
bool enable_jumbo;
bool allow_10_100;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
enable_jumbo = (mtu >= JMS_MIN_SIZE);
allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
}
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
{
return JMS_MAX_SIZE;
}
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
.teardown = b53_teardown,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
.phylink_get_caps = b53_phylink_get_caps,
.phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
.phylink_mac_config = b53_phylink_mac_config,
.phylink_mac_link_down = b53_phylink_mac_link_down,
.phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
.get_mac_eee = b53_get_mac_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
.port_bridge_flags = b53_br_flags,
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
.port_fdb_dump = b53_fdb_dump,
.port_fdb_add = b53_fdb_add,
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
.port_max_mtu = b53_get_max_mtu,
.port_change_mtu = b53_change_mtu,
};
struct b53_chip_data {
u32 chip_id;
const char *dev_name;
u16 vlans;
u16 enabled_ports;
u8 imp_port;
u8 cpu_port;
u8 vta_regs[3];
u8 arl_bins;
u16 arl_buckets;
u8 duplex_reg;
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
};
#define B53_VTA_REGS \
{ B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
#define B53_VTA_REGS_9798 \
{ B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
#define B53_VTA_REGS_63XX \
{ B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
static const struct b53_chip_data b53_switch_chips[] = {
{
.chip_id = BCM5325_DEVICE_ID,
.dev_name = "BCM5325",
.vlans = 16,
.enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5365_DEVICE_ID,
.dev_name = "BCM5365",
.vlans = 256,
.enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5389_DEVICE_ID,
.dev_name = "BCM5389",
.vlans = 4096,
.enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM5395_DEVICE_ID,
.dev_name = "BCM5395",
.vlans = 4096,
.enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM5397_DEVICE_ID,
.dev_name = "BCM5397",
.vlans = 4096,
.enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM5398_DEVICE_ID,
.dev_name = "BCM5398",
.vlans = 4096,
.enabled_ports = 0x17f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53115_DEVICE_ID,
.dev_name = "BCM53115",
.vlans = 4096,
.enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
.imp_port = 8,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53125_DEVICE_ID,
.dev_name = "BCM53125",
.vlans = 4096,
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53128_DEVICE_ID,
.dev_name = "BCM53128",
.vlans = 4096,
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM63XX_DEVICE_ID,
.dev_name = "BCM63xx",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
},
{
.chip_id = BCM63268_DEVICE_ID,
.dev_name = "BCM63268",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
},
{
.chip_id = BCM53010_DEVICE_ID,
.dev_name = "BCM53010",
.vlans = 4096,
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53011_DEVICE_ID,
.dev_name = "BCM53011",
.vlans = 4096,
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53012_DEVICE_ID,
.dev_name = "BCM53012",
.vlans = 4096,
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53018_DEVICE_ID,
.dev_name = "BCM53018",
.vlans = 4096,
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53019_DEVICE_ID,
.dev_name = "BCM53019",
.vlans = 4096,
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM58XX_DEVICE_ID,
.dev_name = "BCM585xx/586xx/88312",
.vlans = 4096,
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM583XX_DEVICE_ID,
.dev_name = "BCM583xx/11360",
.vlans = 4096,
.enabled_ports = 0x103,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
/* Starfighter 2 */
{
.chip_id = BCM4908_DEVICE_ID,
.dev_name = "BCM4908",
.vlans = 4096,
.enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM7445_DEVICE_ID,
.dev_name = "BCM7445",
.vlans = 4096,
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM7278_DEVICE_ID,
.dev_name = "BCM7278",
.vlans = 4096,
.enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
.chip_id = BCM53134_DEVICE_ID,
.dev_name = "BCM53134",
.vlans = 4096,
.enabled_ports = 0x12f,
.imp_port = 8,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.arl_bins = 4,
.arl_buckets = 1024,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
};
static int b53_switch_init(struct b53_device *dev)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
const struct b53_chip_data *chip = &b53_switch_chips[i];
if (chip->chip_id == dev->chip_id) {
if (!dev->enabled_ports)
dev->enabled_ports = chip->enabled_ports;
dev->name = chip->dev_name;
dev->duplex_reg = chip->duplex_reg;
dev->vta_regs[0] = chip->vta_regs[0];
dev->vta_regs[1] = chip->vta_regs[1];
dev->vta_regs[2] = chip->vta_regs[2];
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
dev->imp_port = chip->imp_port;
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
break;
}
}
/* check which BCM5325x version we have */
if (is5325(dev)) {
u8 vc4;
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
/* check reserved bits */
switch (vc4 & 3) {
case 1:
/* BCM5325E */
break;
case 3:
/* BCM5325F - do not use port 4 */
dev->enabled_ports &= ~BIT(4);
break;
default:
/* On the BCM47XX SoCs this is the supported internal switch.*/
#ifndef CONFIG_BCM47XX
/* BCM5325M */
return -EINVAL;
#else
break;
#endif
}
}
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
/* Include non standard CPU port built-in PHYs to be probed */
if (is539x(dev) || is531x5(dev)) {
for (i = 0; i < dev->num_ports; i++) {
if (!(dev->ds->phys_mii_mask & BIT(i)) &&
!b53_possible_cpu_port(dev->ds, i))
dev->ds->phys_mii_mask |= BIT(i);
}
}
dev->ports = devm_kcalloc(dev->dev,
dev->num_ports, sizeof(struct b53_port),
GFP_KERNEL);
if (!dev->ports)
return -ENOMEM;
dev->vlans = devm_kcalloc(dev->dev,
dev->num_vlans, sizeof(struct b53_vlan),
GFP_KERNEL);
if (!dev->vlans)
return -ENOMEM;
dev->reset_gpio = b53_switch_get_reset_gpio(dev);
if (dev->reset_gpio >= 0) {
ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
GPIOF_OUT_INIT_HIGH, "robo_reset");
if (ret)
return ret;
}
return 0;
}
struct b53_device *b53_switch_alloc(struct device *base,
const struct b53_io_ops *ops,
void *priv)
{
struct dsa_switch *ds;
struct b53_device *dev;
ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
ds->dev = base;
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
ds->priv = dev;
dev->dev = base;
dev->ds = ds;
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
dev->vlan_enabled = true;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
* devices. (not hardware supported)
*/
ds->vlan_filtering_is_global = true;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
mutex_init(&dev->arl_mutex);
return dev;
}
EXPORT_SYMBOL(b53_switch_alloc);
int b53_switch_detect(struct b53_device *dev)
{
u32 id32;
u16 tmp;
u8 id8;
int ret;
ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
if (ret)
return ret;
switch (id8) {
case 0:
/* BCM5325 and BCM5365 do not have this register so reads
* return 0. But the read operation did succeed, so assume this
* is one of them.
*
* Next check if we can write to the 5325's VTA register; for
* 5365 it is read only.
*/
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
if (tmp == 0xf)
dev->chip_id = BCM5325_DEVICE_ID;
else
dev->chip_id = BCM5365_DEVICE_ID;
break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
case BCM5397_DEVICE_ID:
case BCM5398_DEVICE_ID:
dev->chip_id = id8;
break;
default:
ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
if (ret)
return ret;
switch (id32) {
case BCM53115_DEVICE_ID:
case BCM53125_DEVICE_ID:
case BCM53128_DEVICE_ID:
case BCM53010_DEVICE_ID:
case BCM53011_DEVICE_ID:
case BCM53012_DEVICE_ID:
case BCM53018_DEVICE_ID:
case BCM53019_DEVICE_ID:
case BCM53134_DEVICE_ID:
dev->chip_id = id32;
break;
default:
dev_err(dev->dev,
"unsupported switch detected (BCM53%02x/BCM%x)\n",
id8, id32);
return -ENODEV;
}
}
if (dev->chip_id == BCM5325_DEVICE_ID)
return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
&dev->core_rev);
else
return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
&dev->core_rev);
}
EXPORT_SYMBOL(b53_switch_detect);
int b53_switch_register(struct b53_device *dev)
{
int ret;
if (dev->pdata) {
dev->chip_id = dev->pdata->chip_id;
dev->enabled_ports = dev->pdata->enabled_ports;
}
if (!dev->chip_id && b53_switch_detect(dev))
return -EINVAL;
ret = b53_switch_init(dev);
if (ret)
return ret;
dev_info(dev->dev, "found switch: %s, rev %i\n",
dev->name, dev->core_rev);
return dsa_register_switch(dev->ds);
}
EXPORT_SYMBOL(b53_switch_register);
MODULE_AUTHOR("Jonas Gorski <[email protected]>");
MODULE_DESCRIPTION("B53 switch library");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/net/dsa/b53/b53_common.c |
/*
* B53 register access through memory mapped registers
*
* Copyright (C) 2012-2013 Jonas Gorski <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
#include "b53_priv.h"
struct b53_mmap_priv {
void __iomem *regs;
};
static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
*val = readb(regs + (page << 8) + reg);
return 0;
}
static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
if (WARN_ON(reg % 2))
return -EINVAL;
if (dev->pdata && dev->pdata->big_endian)
*val = ioread16be(regs + (page << 8) + reg);
else
*val = readw(regs + (page << 8) + reg);
return 0;
}
static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
if (WARN_ON(reg % 4))
return -EINVAL;
if (dev->pdata && dev->pdata->big_endian)
*val = ioread32be(regs + (page << 8) + reg);
else
*val = readl(regs + (page << 8) + reg);
return 0;
}
static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
if (WARN_ON(reg % 2))
return -EINVAL;
if (reg % 4) {
u16 lo;
u32 hi;
if (dev->pdata && dev->pdata->big_endian) {
lo = ioread16be(regs + (page << 8) + reg);
hi = ioread32be(regs + (page << 8) + reg + 2);
} else {
lo = readw(regs + (page << 8) + reg);
hi = readl(regs + (page << 8) + reg + 2);
}
*val = ((u64)hi << 16) | lo;
} else {
u32 lo;
u16 hi;
if (dev->pdata && dev->pdata->big_endian) {
lo = ioread32be(regs + (page << 8) + reg);
hi = ioread16be(regs + (page << 8) + reg + 4);
} else {
lo = readl(regs + (page << 8) + reg);
hi = readw(regs + (page << 8) + reg + 4);
}
*val = ((u64)hi << 32) | lo;
}
return 0;
}
static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
u32 hi, lo;
if (WARN_ON(reg % 4))
return -EINVAL;
if (dev->pdata && dev->pdata->big_endian) {
lo = ioread32be(regs + (page << 8) + reg);
hi = ioread32be(regs + (page << 8) + reg + 4);
} else {
lo = readl(regs + (page << 8) + reg);
hi = readl(regs + (page << 8) + reg + 4);
}
*val = ((u64)hi << 32) | lo;
return 0;
}
static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
writeb(value, regs + (page << 8) + reg);
return 0;
}
static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
u16 value)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
if (WARN_ON(reg % 2))
return -EINVAL;
if (dev->pdata && dev->pdata->big_endian)
iowrite16be(value, regs + (page << 8) + reg);
else
writew(value, regs + (page << 8) + reg);
return 0;
}
static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
u32 value)
{
struct b53_mmap_priv *priv = dev->priv;
void __iomem *regs = priv->regs;
if (WARN_ON(reg % 4))
return -EINVAL;
if (dev->pdata && dev->pdata->big_endian)
iowrite32be(value, regs + (page << 8) + reg);
else
writel(value, regs + (page << 8) + reg);
return 0;
}
static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
if (WARN_ON(reg % 2))
return -EINVAL;
if (reg % 4) {
u32 hi = (u32)(value >> 16);
u16 lo = (u16)value;
b53_mmap_write16(dev, page, reg, lo);
b53_mmap_write32(dev, page, reg + 2, hi);
} else {
u16 hi = (u16)(value >> 32);
u32 lo = (u32)value;
b53_mmap_write32(dev, page, reg, lo);
b53_mmap_write16(dev, page, reg + 4, hi);
}
return 0;
}
static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
u32 hi, lo;
hi = upper_32_bits(value);
lo = lower_32_bits(value);
if (WARN_ON(reg % 4))
return -EINVAL;
b53_mmap_write32(dev, page, reg, lo);
b53_mmap_write32(dev, page, reg + 4, hi);
return 0;
}
static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
u16 *value)
{
return -EIO;
}
static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
u16 value)
{
return -EIO;
}
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
.read32 = b53_mmap_read32,
.read48 = b53_mmap_read48,
.read64 = b53_mmap_read64,
.write8 = b53_mmap_write8,
.write16 = b53_mmap_write16,
.write32 = b53_mmap_write32,
.write48 = b53_mmap_write48,
.write64 = b53_mmap_write64,
.phy_read16 = b53_mmap_phy_read16,
.phy_write16 = b53_mmap_phy_write16,
};
static int b53_mmap_probe_of(struct platform_device *pdev,
struct b53_platform_data **ppdata)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *of_ports, *of_port;
struct device *dev = &pdev->dev;
struct b53_platform_data *pdata;
void __iomem *mem;
mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->regs = mem;
pdata->chip_id = (u32)(unsigned long)device_get_match_data(dev);
pdata->big_endian = of_property_read_bool(np, "big-endian");
of_ports = of_get_child_by_name(np, "ports");
if (!of_ports) {
dev_err(dev, "no ports child node found\n");
return -EINVAL;
}
for_each_available_child_of_node(of_ports, of_port) {
u32 reg;
if (of_property_read_u32(of_port, "reg", ®))
continue;
if (reg < B53_N_PORTS)
pdata->enabled_ports |= BIT(reg);
}
of_node_put(of_ports);
*ppdata = pdata;
return 0;
}
static int b53_mmap_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct b53_platform_data *pdata = pdev->dev.platform_data;
struct b53_mmap_priv *priv;
struct b53_device *dev;
int ret;
if (!pdata && np) {
ret = b53_mmap_probe_of(pdev, &pdata);
if (ret) {
dev_err(&pdev->dev, "OF probe error\n");
return ret;
}
}
if (!pdata)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = pdata->regs;
dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv);
if (!dev)
return -ENOMEM;
dev->pdata = pdata;
platform_set_drvdata(pdev, dev);
return b53_switch_register(dev);
}
static int b53_mmap_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (dev)
b53_switch_remove(dev);
return 0;
}
static void b53_mmap_shutdown(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (dev)
b53_switch_shutdown(dev);
platform_set_drvdata(pdev, NULL);
}
static const struct of_device_id b53_mmap_of_table[] = {
{
.compatible = "brcm,bcm3384-switch",
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm6318-switch",
.data = (void *)BCM63268_DEVICE_ID,
}, {
.compatible = "brcm,bcm6328-switch",
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm6362-switch",
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm6368-switch",
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm63268-switch",
.data = (void *)BCM63268_DEVICE_ID,
}, {
.compatible = "brcm,bcm63xx-switch",
.data = (void *)BCM63XX_DEVICE_ID,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
.remove = b53_mmap_remove,
.shutdown = b53_mmap_shutdown,
.driver = {
.name = "b53-switch",
.of_match_table = b53_mmap_of_table,
},
};
module_platform_driver(b53_mmap_driver);
MODULE_AUTHOR("Jonas Gorski <[email protected]>");
MODULE_DESCRIPTION("B53 MMAP access driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/net/dsa/b53/b53_mmap.c |
/*
* B53 register access through SPI
*
* Copyright (C) 2011-2013 Jonas Gorski <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/b53.h>
#include "b53_priv.h"
#define B53_SPI_DATA 0xf0
#define B53_SPI_STATUS 0xfe
#define B53_SPI_CMD_SPIF BIT(7)
#define B53_SPI_CMD_RACK BIT(5)
#define B53_SPI_CMD_READ 0x00
#define B53_SPI_CMD_WRITE 0x01
#define B53_SPI_CMD_NORMAL 0x60
#define B53_SPI_CMD_FAST 0x10
#define B53_SPI_PAGE_SELECT 0xff
static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val,
unsigned int len)
{
u8 txbuf[2];
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ;
txbuf[1] = reg;
return spi_write_then_read(spi, txbuf, 2, val, len);
}
static inline int b53_spi_clear_status(struct spi_device *spi)
{
unsigned int i;
u8 rxbuf;
int ret;
for (i = 0; i < 10; i++) {
ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
if (ret)
return ret;
if (!(rxbuf & B53_SPI_CMD_SPIF))
break;
mdelay(1);
}
if (i == 10)
return -EIO;
return 0;
}
static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
{
u8 txbuf[3];
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
txbuf[1] = B53_SPI_PAGE_SELECT;
txbuf[2] = page;
return spi_write(spi, txbuf, sizeof(txbuf));
}
static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
{
int ret = b53_spi_clear_status(spi);
if (ret)
return ret;
return b53_spi_set_page(spi, page);
}
static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg)
{
u8 rxbuf;
int retry_count;
int ret;
ret = b53_spi_read_reg(spi, reg, &rxbuf, 1);
if (ret)
return ret;
for (retry_count = 0; retry_count < 10; retry_count++) {
ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
if (ret)
return ret;
if (rxbuf & B53_SPI_CMD_RACK)
break;
mdelay(1);
}
if (retry_count == 10)
return -EIO;
return 0;
}
static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
unsigned int len)
{
struct spi_device *spi = dev->priv;
int ret;
ret = b53_prepare_reg_access(spi, page);
if (ret)
return ret;
ret = b53_spi_prepare_reg_read(spi, reg);
if (ret)
return ret;
return b53_spi_read_reg(spi, B53_SPI_DATA, data, len);
}
static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
{
return b53_spi_read(dev, page, reg, val, 1);
}
static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
{
__le16 value;
int ret;
ret = b53_spi_read(dev, page, reg, (u8 *)&value, 2);
if (!ret)
*val = le16_to_cpu(value);
return ret;
}
static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
{
__le32 value;
int ret;
ret = b53_spi_read(dev, page, reg, (u8 *)&value, 4);
if (!ret)
*val = le32_to_cpu(value);
return ret;
}
static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
__le64 value;
int ret;
*val = 0;
ret = b53_spi_read(dev, page, reg, (u8 *)&value, 6);
if (!ret)
*val = le64_to_cpu(value);
return ret;
}
static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
__le64 value;
int ret;
ret = b53_spi_read(dev, page, reg, (u8 *)&value, 8);
if (!ret)
*val = le64_to_cpu(value);
return ret;
}
static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
{
struct spi_device *spi = dev->priv;
int ret;
u8 txbuf[3];
ret = b53_prepare_reg_access(spi, page);
if (ret)
return ret;
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
txbuf[1] = reg;
txbuf[2] = value;
return spi_write(spi, txbuf, sizeof(txbuf));
}
static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
{
struct spi_device *spi = dev->priv;
int ret;
u8 txbuf[4];
ret = b53_prepare_reg_access(spi, page);
if (ret)
return ret;
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
txbuf[1] = reg;
put_unaligned_le16(value, &txbuf[2]);
return spi_write(spi, txbuf, sizeof(txbuf));
}
static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
{
struct spi_device *spi = dev->priv;
int ret;
u8 txbuf[6];
ret = b53_prepare_reg_access(spi, page);
if (ret)
return ret;
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
txbuf[1] = reg;
put_unaligned_le32(value, &txbuf[2]);
return spi_write(spi, txbuf, sizeof(txbuf));
}
static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
{
struct spi_device *spi = dev->priv;
int ret;
u8 txbuf[10];
ret = b53_prepare_reg_access(spi, page);
if (ret)
return ret;
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
txbuf[1] = reg;
put_unaligned_le64(value, &txbuf[2]);
return spi_write(spi, txbuf, sizeof(txbuf) - 2);
}
static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
{
struct spi_device *spi = dev->priv;
int ret;
u8 txbuf[10];
ret = b53_prepare_reg_access(spi, page);
if (ret)
return ret;
txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
txbuf[1] = reg;
put_unaligned_le64(value, &txbuf[2]);
return spi_write(spi, txbuf, sizeof(txbuf));
}
static const struct b53_io_ops b53_spi_ops = {
.read8 = b53_spi_read8,
.read16 = b53_spi_read16,
.read32 = b53_spi_read32,
.read48 = b53_spi_read48,
.read64 = b53_spi_read64,
.write8 = b53_spi_write8,
.write16 = b53_spi_write16,
.write32 = b53_spi_write32,
.write48 = b53_spi_write48,
.write64 = b53_spi_write64,
};
static int b53_spi_probe(struct spi_device *spi)
{
struct b53_device *dev;
int ret;
dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi);
if (!dev)
return -ENOMEM;
if (spi->dev.platform_data)
dev->pdata = spi->dev.platform_data;
ret = b53_switch_register(dev);
if (ret)
return ret;
spi_set_drvdata(spi, dev);
return 0;
}
static void b53_spi_remove(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
if (dev)
b53_switch_remove(dev);
}
static void b53_spi_shutdown(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
if (dev)
b53_switch_shutdown(dev);
spi_set_drvdata(spi, NULL);
}
static const struct of_device_id b53_spi_of_match[] = {
{ .compatible = "brcm,bcm5325" },
{ .compatible = "brcm,bcm5365" },
{ .compatible = "brcm,bcm5395" },
{ .compatible = "brcm,bcm5397" },
{ .compatible = "brcm,bcm5398" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
static const struct spi_device_id b53_spi_ids[] = {
{ .name = "bcm5325" },
{ .name = "bcm5365" },
{ .name = "bcm5395" },
{ .name = "bcm5397" },
{ .name = "bcm5398" },
{ .name = "bcm53115" },
{ .name = "bcm53125" },
{ .name = "bcm53128" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, b53_spi_ids);
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
.of_match_table = b53_spi_of_match,
},
.probe = b53_spi_probe,
.remove = b53_spi_remove,
.shutdown = b53_spi_shutdown,
.id_table = b53_spi_ids,
};
module_spi_driver(b53_spi_driver);
MODULE_AUTHOR("Jonas Gorski <[email protected]>");
MODULE_DESCRIPTION("B53 SPI access driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/net/dsa/b53/b53_spi.c |
/*
* B53 register access through MII registers
*
* Copyright (C) 2011-2013 Jonas Gorski <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/phy.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/brcmphy.h>
#include <linux/rtnetlink.h>
#include <net/dsa.h>
#include "b53_priv.h"
/* MII registers */
#define REG_MII_PAGE 0x10 /* MII Page register */
#define REG_MII_ADDR 0x11 /* MII Address register */
#define REG_MII_DATA0 0x18 /* MII Data register 0 */
#define REG_MII_DATA1 0x19 /* MII Data register 1 */
#define REG_MII_DATA2 0x1a /* MII Data register 2 */
#define REG_MII_DATA3 0x1b /* MII Data register 3 */
#define REG_MII_PAGE_ENABLE BIT(0)
#define REG_MII_ADDR_WRITE BIT(0)
#define REG_MII_ADDR_READ BIT(1)
static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
{
int i;
u16 v;
int ret;
struct mii_bus *bus = dev->priv;
if (dev->current_page != page) {
/* set page number */
v = (page << 8) | REG_MII_PAGE_ENABLE;
ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_PAGE, v);
if (ret)
return ret;
dev->current_page = page;
}
/* set register address */
v = (reg << 8) | op;
ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v);
if (ret)
return ret;
/* check if operation completed */
for (i = 0; i < 5; ++i) {
v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_ADDR);
if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ)))
break;
usleep_range(10, 100);
}
if (WARN_ON(i == 5))
return -EIO;
return 0;
}
static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
{
struct mii_bus *bus = dev->priv;
int ret;
ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
if (ret)
return ret;
*val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0) & 0xff;
return 0;
}
static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
{
struct mii_bus *bus = dev->priv;
int ret;
ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
if (ret)
return ret;
*val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
return 0;
}
static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
{
struct mii_bus *bus = dev->priv;
int ret;
ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
if (ret)
return ret;
*val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
*val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA1) << 16;
return 0;
}
static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
struct mii_bus *bus = dev->priv;
u64 temp = 0;
int i;
int ret;
ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
if (ret)
return ret;
for (i = 2; i >= 0; i--) {
temp <<= 16;
temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0 + i);
}
*val = temp;
return 0;
}
static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
struct mii_bus *bus = dev->priv;
u64 temp = 0;
int i;
int ret;
ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
if (ret)
return ret;
for (i = 3; i >= 0; i--) {
temp <<= 16;
temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0 + i);
}
*val = temp;
return 0;
}
static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
{
struct mii_bus *bus = dev->priv;
int ret;
ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0, value);
if (ret)
return ret;
return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
}
static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
u16 value)
{
struct mii_bus *bus = dev->priv;
int ret;
ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0, value);
if (ret)
return ret;
return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
}
static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
u32 value)
{
struct mii_bus *bus = dev->priv;
unsigned int i;
u32 temp = value;
for (i = 0; i < 2; i++) {
int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0 + i,
temp & 0xffff);
if (ret)
return ret;
temp >>= 16;
}
return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
}
static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
struct mii_bus *bus = dev->priv;
unsigned int i;
u64 temp = value;
for (i = 0; i < 3; i++) {
int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0 + i,
temp & 0xffff);
if (ret)
return ret;
temp >>= 16;
}
return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
}
static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
struct mii_bus *bus = dev->priv;
unsigned int i;
u64 temp = value;
for (i = 0; i < 4; i++) {
int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
REG_MII_DATA0 + i,
temp & 0xffff);
if (ret)
return ret;
temp >>= 16;
}
return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
}
static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg,
u16 *value)
{
struct mii_bus *bus = dev->priv;
*value = mdiobus_read_nested(bus, addr, reg);
return 0;
}
static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
u16 value)
{
struct mii_bus *bus = dev->bus;
return mdiobus_write_nested(bus, addr, reg, value);
}
static const struct b53_io_ops b53_mdio_ops = {
.read8 = b53_mdio_read8,
.read16 = b53_mdio_read16,
.read32 = b53_mdio_read32,
.read48 = b53_mdio_read48,
.read64 = b53_mdio_read64,
.write8 = b53_mdio_write8,
.write16 = b53_mdio_write16,
.write32 = b53_mdio_write32,
.write48 = b53_mdio_write48,
.write64 = b53_mdio_write64,
.phy_read16 = b53_mdio_phy_read16,
.phy_write16 = b53_mdio_phy_write16,
};
#define B53_BRCM_OUI_1 0x0143bc00
#define B53_BRCM_OUI_2 0x03625c00
#define B53_BRCM_OUI_3 0x00406000
#define B53_BRCM_OUI_4 0x01410c00
#define B53_BRCM_OUI_5 0xae025000
static int b53_mdio_probe(struct mdio_device *mdiodev)
{
struct b53_device *dev;
u32 phy_id;
int ret;
/* allow the generic PHY driver to take over the non-management MDIO
* addresses
*/
if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) {
dev_err(&mdiodev->dev, "leaving address %d to PHY\n",
mdiodev->addr);
return -ENODEV;
}
/* read the first port's id */
phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16;
phy_id |= mdiobus_read(mdiodev->bus, 0, 3);
/* BCM5325, BCM539x (OUI_1)
* BCM53125, BCM53128 (OUI_2)
* BCM5365 (OUI_3)
*/
if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_4 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_5) {
dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
return -ENODEV;
}
/* First probe will come from SWITCH_MDIO controller on the 7445D0
* switch, which will conflict with the 7445 integrated switch
* pseudo-phy (we end-up programming both). In that case, we return
* -EPROBE_DEFER for the first time we get here, and wait until we come
* back with the slave MDIO bus which has the correct indirection
* layer setup
*/
if (of_machine_is_compatible("brcm,bcm7445d0") &&
strcmp(mdiodev->bus->name, "sf2 slave mii"))
return -EPROBE_DEFER;
dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
if (!dev)
return -ENOMEM;
/* we don't use page 0xff, so force a page set */
dev->current_page = 0xff;
dev->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, dev);
ret = b53_switch_register(dev);
if (ret) {
dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
return ret;
}
return ret;
}
static void b53_mdio_remove(struct mdio_device *mdiodev)
{
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
if (!dev)
return;
b53_switch_remove(dev);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
{
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
if (!dev)
return;
b53_switch_shutdown(dev);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm5325" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
{ .compatible = "brcm,bcm53134" },
{ .compatible = "brcm,bcm5365" },
{ .compatible = "brcm,bcm5389" },
{ .compatible = "brcm,bcm5395" },
{ .compatible = "brcm,bcm5397" },
{ .compatible = "brcm,bcm5398" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, b53_of_match);
static struct mdio_driver b53_mdio_driver = {
.probe = b53_mdio_probe,
.remove = b53_mdio_remove,
.shutdown = b53_mdio_shutdown,
.mdiodrv.driver = {
.name = "bcm53xx",
.of_match_table = b53_of_match,
},
};
mdio_module_driver(b53_mdio_driver);
MODULE_DESCRIPTION("B53 MDIO access driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/net/dsa/b53/b53_mdio.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Northstar Plus switch SerDes/SGMII PHY main logic
*
* Copyright (C) 2018 Florian Fainelli <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/dsa.h>
#include "b53_priv.h"
#include "b53_serdes.h"
#include "b53_regs.h"
static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct b53_pcs, pcs);
}
static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
u16 value)
{
b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
b53_write16(dev, B53_SERDES_PAGE, offset, value);
}
static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block)
{
u16 value;
b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
b53_read16(dev, B53_SERDES_PAGE, offset, &value);
return value;
}
static void b53_serdes_set_lane(struct b53_device *dev, u8 lane)
{
if (dev->serdes_lane == lane)
return;
WARN_ON(lane > 1);
b53_serdes_write_blk(dev, B53_SERDES_LANE,
SERDES_XGXSBLK0_BLOCKADDRESS, lane);
dev->serdes_lane = lane;
}
static void b53_serdes_write(struct b53_device *dev, u8 lane,
u8 offset, u16 block, u16 value)
{
b53_serdes_set_lane(dev, lane);
b53_serdes_write_blk(dev, offset, block, value);
}
static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
u8 offset, u16 block)
{
b53_serdes_set_lane(dev, lane);
return b53_serdes_read_blk(dev, offset, block);
}
static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK);
if (interface == PHY_INTERFACE_MODE_1000BASEX)
reg |= FIBER_MODE_1000X;
else
reg &= ~FIBER_MODE_1000X;
b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK, reg);
return 0;
}
static void b53_serdes_an_restart(struct phylink_pcs *pcs)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK);
reg |= BMCR_ANRESTART;
b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK, reg);
}
static void b53_serdes_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 dig, bmsr;
dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
SERDES_DIGITAL_BLK);
bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
SERDES_MII_BLK);
switch ((dig >> SPEED_STATUS_SHIFT) & SPEED_STATUS_MASK) {
case SPEED_STATUS_10:
state->speed = SPEED_10;
break;
case SPEED_STATUS_100:
state->speed = SPEED_100;
break;
case SPEED_STATUS_1000:
state->speed = SPEED_1000;
break;
default:
case SPEED_STATUS_2500:
state->speed = SPEED_2500;
break;
}
state->duplex = dig & DUPLEX_STATUS ? DUPLEX_FULL : DUPLEX_HALF;
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
state->link = !!(dig & LINK_STATUS);
if (dig & PAUSE_RESOLUTION_RX_SIDE)
state->pause |= MLO_PAUSE_RX;
if (dig & PAUSE_RESOLUTION_TX_SIDE)
state->pause |= MLO_PAUSE_TX;
}
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up)
{
u8 lane = b53_serdes_map_lane(dev, port);
u16 reg;
if (lane == B53_INVALID_LANE)
return;
reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK);
if (link_up)
reg &= ~BMCR_PDOWN;
else
reg |= BMCR_PDOWN;
b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK, reg);
}
EXPORT_SYMBOL(b53_serdes_link_set);
static const struct phylink_pcs_ops b53_pcs_ops = {
.pcs_get_state = b53_serdes_get_state,
.pcs_config = b53_serdes_config,
.pcs_an_restart = b53_serdes_an_restart,
};
void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
struct phylink_config *config)
{
u8 lane = b53_serdes_map_lane(dev, port);
if (lane == B53_INVALID_LANE)
return;
switch (lane) {
case 0:
/* It appears lane 0 supports 2500base-X and 1000base-X */
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
config->supported_interfaces);
config->mac_capabilities |= MAC_2500FD;
fallthrough;
case 1:
/* It appears lane 1 only supports 1000base-X and SGMII */
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
config->mac_capabilities |= MAC_1000FD;
break;
default:
break;
}
}
EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
int port,
phy_interface_t interface)
{
u8 lane = b53_serdes_map_lane(dev, port);
if (lane == B53_INVALID_LANE || lane >= B53_N_PCS ||
!dev->pcs[lane].dev)
return NULL;
if (!phy_interface_mode_is_8023z(interface) &&
interface != PHY_INTERFACE_MODE_SGMII)
return NULL;
return &dev->pcs[lane].pcs;
}
EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs);
int b53_serdes_init(struct b53_device *dev, int port)
{
u8 lane = b53_serdes_map_lane(dev, port);
struct b53_pcs *pcs;
u16 id0, msb, lsb;
if (lane == B53_INVALID_LANE)
return -EINVAL;
id0 = b53_serdes_read(dev, lane, B53_SERDES_ID0, SERDES_ID0);
msb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID1),
SERDES_MII_BLK);
lsb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID2),
SERDES_MII_BLK);
if (id0 == 0 || id0 == 0xffff) {
dev_err(dev->dev, "SerDes not initialized, check settings\n");
return -ENODEV;
}
dev_info(dev->dev,
"SerDes lane %d, model: %d, rev %c%d (OUI: 0x%08x)\n",
lane, id0 & SERDES_ID0_MODEL_MASK,
(id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41,
(id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
(u32)msb << 16 | lsb);
pcs = &dev->pcs[lane];
pcs->dev = dev;
pcs->lane = lane;
pcs->pcs.ops = &b53_pcs_ops;
pcs->pcs.neg_mode = true;
return 0;
}
EXPORT_SYMBOL(b53_serdes_init);
MODULE_AUTHOR("Florian Fainelli <[email protected]>");
MODULE_DESCRIPTION("B53 Switch SerDes driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/net/dsa/b53/b53_serdes.c |
/*
* B53 register access through Switch Register Access Bridge Registers
*
* Copyright (C) 2013 Hauke Mehrtens <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
#include <linux/of.h>
#include "b53_priv.h"
#include "b53_serdes.h"
/* command and status register of the SRAB */
#define B53_SRAB_CMDSTAT 0x2c
#define B53_SRAB_CMDSTAT_RST BIT(2)
#define B53_SRAB_CMDSTAT_WRITE BIT(1)
#define B53_SRAB_CMDSTAT_GORDYN BIT(0)
#define B53_SRAB_CMDSTAT_PAGE 24
#define B53_SRAB_CMDSTAT_REG 16
/* high order word of write data to switch registe */
#define B53_SRAB_WD_H 0x30
/* low order word of write data to switch registe */
#define B53_SRAB_WD_L 0x34
/* high order word of read data from switch register */
#define B53_SRAB_RD_H 0x38
/* low order word of read data from switch register */
#define B53_SRAB_RD_L 0x3c
/* command and status register of the SRAB */
#define B53_SRAB_CTRLS 0x40
#define B53_SRAB_CTRLS_HOST_INTR BIT(1)
#define B53_SRAB_CTRLS_RCAREQ BIT(3)
#define B53_SRAB_CTRLS_RCAGNT BIT(4)
#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
/* the register captures interrupt pulses from the switch */
#define B53_SRAB_INTR 0x44
#define B53_SRAB_INTR_P(x) BIT(x)
#define B53_SRAB_SWITCH_PHY BIT(8)
#define B53_SRAB_1588_SYNC BIT(9)
#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10)
#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
/* Port mux configuration registers */
#define B53_MUX_CONFIG_P5 0x00
#define MUX_CONFIG_SGMII 0
#define MUX_CONFIG_MII_LITE 1
#define MUX_CONFIG_RGMII 2
#define MUX_CONFIG_GMII 3
#define MUX_CONFIG_GPHY 4
#define MUX_CONFIG_INTERNAL 5
#define MUX_CONFIG_MASK 0x7
#define B53_MUX_CONFIG_P4 0x04
struct b53_srab_port_priv {
int irq;
bool irq_enabled;
struct b53_device *dev;
unsigned int num;
phy_interface_t mode;
};
struct b53_srab_priv {
void __iomem *regs;
void __iomem *mux_config;
struct b53_srab_port_priv port_intrs[B53_N_PORTS];
};
static int b53_srab_request_grant(struct b53_device *dev)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
u32 ctrls;
int i;
ctrls = readl(regs + B53_SRAB_CTRLS);
ctrls |= B53_SRAB_CTRLS_RCAREQ;
writel(ctrls, regs + B53_SRAB_CTRLS);
for (i = 0; i < 20; i++) {
ctrls = readl(regs + B53_SRAB_CTRLS);
if (ctrls & B53_SRAB_CTRLS_RCAGNT)
break;
usleep_range(10, 100);
}
if (WARN_ON(i == 5))
return -EIO;
return 0;
}
static void b53_srab_release_grant(struct b53_device *dev)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
u32 ctrls;
ctrls = readl(regs + B53_SRAB_CTRLS);
ctrls &= ~B53_SRAB_CTRLS_RCAREQ;
writel(ctrls, regs + B53_SRAB_CTRLS);
}
static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int i;
u32 cmdstat;
/* set register address */
cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
(reg << B53_SRAB_CMDSTAT_REG) |
B53_SRAB_CMDSTAT_GORDYN |
op;
writel(cmdstat, regs + B53_SRAB_CMDSTAT);
/* check if operation completed */
for (i = 0; i < 5; ++i) {
cmdstat = readl(regs + B53_SRAB_CMDSTAT);
if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN))
break;
usleep_range(10, 100);
}
if (WARN_ON(i == 5))
return -EIO;
return 0;
}
static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
ret = b53_srab_op(dev, page, reg, 0);
if (ret)
goto err;
*val = readl(regs + B53_SRAB_RD_L) & 0xff;
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
ret = b53_srab_op(dev, page, reg, 0);
if (ret)
goto err;
*val = readl(regs + B53_SRAB_RD_L) & 0xffff;
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
ret = b53_srab_op(dev, page, reg, 0);
if (ret)
goto err;
*val = readl(regs + B53_SRAB_RD_L);
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
ret = b53_srab_op(dev, page, reg, 0);
if (ret)
goto err;
*val = readl(regs + B53_SRAB_RD_L);
*val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32;
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
ret = b53_srab_op(dev, page, reg, 0);
if (ret)
goto err;
*val = readl(regs + B53_SRAB_RD_L);
*val += (u64)readl(regs + B53_SRAB_RD_H) << 32;
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
writel(value, regs + B53_SRAB_WD_L);
ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
u16 value)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
writel(value, regs + B53_SRAB_WD_L);
ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
u32 value)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
writel(value, regs + B53_SRAB_WD_L);
ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
writel((u32)value, regs + B53_SRAB_WD_L);
writel((u16)(value >> 32), regs + B53_SRAB_WD_H);
ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
err:
b53_srab_release_grant(dev);
return ret;
}
static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
u64 value)
{
struct b53_srab_priv *priv = dev->priv;
u8 __iomem *regs = priv->regs;
int ret = 0;
ret = b53_srab_request_grant(dev);
if (ret)
goto err;
writel((u32)value, regs + B53_SRAB_WD_L);
writel((u32)(value >> 32), regs + B53_SRAB_WD_H);
ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
err:
b53_srab_release_grant(dev);
return ret;
}
static irqreturn_t b53_srab_port_thread(int irq, void *dev_id)
{
struct b53_srab_port_priv *port = dev_id;
struct b53_device *dev = port->dev;
if (port->mode == PHY_INTERFACE_MODE_SGMII)
b53_port_event(dev->ds, port->num);
return IRQ_HANDLED;
}
static irqreturn_t b53_srab_port_isr(int irq, void *dev_id)
{
struct b53_srab_port_priv *port = dev_id;
struct b53_device *dev = port->dev;
struct b53_srab_priv *priv = dev->priv;
/* Acknowledge the interrupt */
writel(BIT(port->num), priv->regs + B53_SRAB_INTR);
return IRQ_WAKE_THREAD;
}
#if IS_ENABLED(CONFIG_B53_SERDES)
static u8 b53_srab_serdes_map_lane(struct b53_device *dev, int port)
{
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p = &priv->port_intrs[port];
if (p->mode != PHY_INTERFACE_MODE_SGMII)
return B53_INVALID_LANE;
switch (port) {
case 5:
return 0;
case 4:
return 1;
default:
return B53_INVALID_LANE;
}
}
#endif
static int b53_srab_irq_enable(struct b53_device *dev, int port)
{
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p = &priv->port_intrs[port];
int ret = 0;
/* Interrupt is optional and was not specified, do not make
* this fatal
*/
if (p->irq == -ENXIO)
return ret;
ret = request_threaded_irq(p->irq, b53_srab_port_isr,
b53_srab_port_thread, 0,
dev_name(dev->dev), p);
if (!ret)
p->irq_enabled = true;
return ret;
}
static void b53_srab_irq_disable(struct b53_device *dev, int port)
{
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p = &priv->port_intrs[port];
if (p->irq_enabled) {
free_irq(p->irq, p);
p->irq_enabled = false;
}
}
static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
struct phylink_config *config)
{
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p = &priv->port_intrs[port];
switch (p->mode) {
case PHY_INTERFACE_MODE_SGMII:
#if IS_ENABLED(CONFIG_B53_SERDES)
/* If p->mode indicates SGMII mode, that essentially means we
* are using a serdes. As the serdes for the capabilities.
*/
b53_serdes_phylink_get_caps(dev, port, config);
#endif
break;
case PHY_INTERFACE_MODE_NA:
break;
case PHY_INTERFACE_MODE_RGMII:
/* If we support RGMII, support all RGMII modes, since
* that dictates the PHY delay settings.
*/
phy_interface_set_rgmii(config->supported_interfaces);
break;
default:
/* Some other mode (e.g. MII, GMII etc) */
__set_bit(p->mode, config->supported_interfaces);
break;
}
}
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
.read32 = b53_srab_read32,
.read48 = b53_srab_read48,
.read64 = b53_srab_read64,
.write8 = b53_srab_write8,
.write16 = b53_srab_write16,
.write32 = b53_srab_write32,
.write48 = b53_srab_write48,
.write64 = b53_srab_write64,
.irq_enable = b53_srab_irq_enable,
.irq_disable = b53_srab_irq_disable,
.phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
.phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs,
.serdes_map_lane = b53_srab_serdes_map_lane,
.serdes_link_set = b53_serdes_link_set,
#endif
};
static const struct of_device_id b53_srab_of_match[] = {
{ .compatible = "brcm,bcm53010-srab" },
{ .compatible = "brcm,bcm53011-srab" },
{ .compatible = "brcm,bcm53012-srab" },
{ .compatible = "brcm,bcm53018-srab" },
{ .compatible = "brcm,bcm53019-srab" },
{ .compatible = "brcm,bcm5301x-srab" },
{ .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58622-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,omega-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, b53_srab_of_match);
static void b53_srab_intr_set(struct b53_srab_priv *priv, bool set)
{
u32 reg;
reg = readl(priv->regs + B53_SRAB_CTRLS);
if (set)
reg |= B53_SRAB_CTRLS_HOST_INTR;
else
reg &= ~B53_SRAB_CTRLS_HOST_INTR;
writel(reg, priv->regs + B53_SRAB_CTRLS);
}
static void b53_srab_prepare_irq(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *port;
unsigned int i;
char *name;
/* Clear all pending interrupts */
writel(0xffffffff, priv->regs + B53_SRAB_INTR);
for (i = 0; i < B53_N_PORTS; i++) {
port = &priv->port_intrs[i];
/* There is no port 6 */
if (i == 6)
continue;
name = kasprintf(GFP_KERNEL, "link_state_p%d", i);
if (!name)
return;
port->num = i;
port->dev = dev;
port->irq = platform_get_irq_byname_optional(pdev, name);
kfree(name);
}
b53_srab_intr_set(priv, true);
}
static void b53_srab_mux_init(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p;
unsigned int port;
u32 reg, off = 0;
int ret;
if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
return;
priv->mux_config = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->mux_config))
return;
/* Obtain the port mux configuration so we know which lanes
* actually map to SerDes lanes
*/
for (port = 5; port > 3; port--, off += 4) {
p = &priv->port_intrs[port];
reg = readl(priv->mux_config + B53_MUX_CONFIG_P5 + off);
switch (reg & MUX_CONFIG_MASK) {
case MUX_CONFIG_SGMII:
p->mode = PHY_INTERFACE_MODE_SGMII;
ret = b53_serdes_init(dev, port);
if (ret)
continue;
break;
case MUX_CONFIG_MII_LITE:
p->mode = PHY_INTERFACE_MODE_MII;
break;
case MUX_CONFIG_GMII:
p->mode = PHY_INTERFACE_MODE_GMII;
break;
case MUX_CONFIG_RGMII:
p->mode = PHY_INTERFACE_MODE_RGMII;
break;
case MUX_CONFIG_INTERNAL:
p->mode = PHY_INTERFACE_MODE_INTERNAL;
break;
default:
p->mode = PHY_INTERFACE_MODE_NA;
break;
}
if (p->mode != PHY_INTERFACE_MODE_NA)
dev_info(&pdev->dev, "Port %d mode: %s\n",
port, phy_modes(p->mode));
}
}
static int b53_srab_probe(struct platform_device *pdev)
{
struct b53_platform_data *pdata = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
struct b53_srab_priv *priv;
struct b53_device *dev;
if (dn)
of_id = of_match_node(b53_srab_of_match, dn);
if (of_id) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->chip_id = (u32)(unsigned long)of_id->data;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
if (!dev)
return -ENOMEM;
if (pdata)
dev->pdata = pdata;
platform_set_drvdata(pdev, dev);
b53_srab_prepare_irq(pdev);
b53_srab_mux_init(pdev);
return b53_switch_register(dev);
}
static int b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (!dev)
return 0;
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
return 0;
}
static void b53_srab_shutdown(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (!dev)
return;
b53_switch_shutdown(dev);
platform_set_drvdata(pdev, NULL);
}
static struct platform_driver b53_srab_driver = {
.probe = b53_srab_probe,
.remove = b53_srab_remove,
.shutdown = b53_srab_shutdown,
.driver = {
.name = "b53-srab-switch",
.of_match_table = b53_srab_of_match,
},
};
module_platform_driver(b53_srab_driver);
MODULE_AUTHOR("Hauke Mehrtens <[email protected]>");
MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/net/dsa/b53/b53_srab.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/nls.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/ucs2_string.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
static void rndis_set_multicast(struct work_struct *w);
#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
struct rndis_message response_msg;
/*
* The buffer for extended info after the RNDIS response message. It's
* referenced based on the data offset in the RNDIS message. Its size
* is enough for current needs, and should be sufficient for the near
* future.
*/
u8 response_ext[RNDIS_EXT_LEN];
/* Simplify allocation by having a netvsc packet inline */
struct hv_netvsc_packet pkt;
struct rndis_message request_msg;
/*
* The buffer for the extended info after the RNDIS request message.
* It is referenced and sized in a similar way as response_ext.
*/
u8 request_ext[RNDIS_EXT_LEN];
};
static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
if (!device)
return NULL;
spin_lock_init(&device->request_lock);
INIT_LIST_HEAD(&device->req_list);
INIT_WORK(&device->mcast_work, rndis_set_multicast);
device->state = RNDIS_DEV_UNINITIALIZED;
return device;
}
static struct rndis_request *get_rndis_request(struct rndis_device *dev,
u32 msg_type,
u32 msg_len)
{
struct rndis_request *request;
struct rndis_message *rndis_msg;
struct rndis_set_request *set;
unsigned long flags;
request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
if (!request)
return NULL;
init_completion(&request->wait_event);
rndis_msg = &request->request_msg;
rndis_msg->ndis_msg_type = msg_type;
rndis_msg->msg_len = msg_len;
request->pkt.q_idx = 0;
/*
* Set the request id. This field is always after the rndis header for
* request/response packet types so we just used the SetRequest as a
* template
*/
set = &rndis_msg->msg.set_req;
set->req_id = atomic_inc_return(&dev->new_req_id);
/* Add to the request list */
spin_lock_irqsave(&dev->request_lock, flags);
list_add_tail(&request->list_ent, &dev->req_list);
spin_unlock_irqrestore(&dev->request_lock, flags);
return request;
}
static void put_rndis_request(struct rndis_device *dev,
struct rndis_request *req)
{
unsigned long flags;
spin_lock_irqsave(&dev->request_lock, flags);
list_del(&req->list_ent);
spin_unlock_irqrestore(&dev->request_lock, flags);
kfree(req);
}
static void dump_rndis_message(struct net_device *netdev,
const struct rndis_message *rndis_msg,
const void *data)
{
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
"data offset %u data len %u, # oob %u, "
"oob offset %u, oob len %u, pkt offset %u, "
"pkt len %u\n",
rndis_msg->msg_len,
pkt->data_offset,
pkt->data_len,
pkt->num_oob_data_elements,
pkt->oob_data_offset,
pkt->oob_data_len,
pkt->per_pkt_info_offset,
pkt->per_pkt_info_len);
}
break;
case RNDIS_MSG_INIT_C:
if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
sizeof(struct rndis_initialize_complete)) {
const struct rndis_initialize_complete *init_complete =
data + RNDIS_HEADER_SIZE;
netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
"device flags %d, max xfer size 0x%x, max pkts %u, "
"pkt aligned %u)\n",
rndis_msg->msg_len,
init_complete->req_id,
init_complete->status,
init_complete->major_ver,
init_complete->minor_ver,
init_complete->dev_flags,
init_complete->max_xfer_size,
init_complete->max_pkt_per_msg,
init_complete->pkt_alignment_factor);
}
break;
case RNDIS_MSG_QUERY_C:
if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
sizeof(struct rndis_query_complete)) {
const struct rndis_query_complete *query_complete =
data + RNDIS_HEADER_SIZE;
netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
"(len %u, id 0x%x, status 0x%x, buf len %u, "
"buf offset %u)\n",
rndis_msg->msg_len,
query_complete->req_id,
query_complete->status,
query_complete->info_buflen,
query_complete->info_buf_offset);
}
break;
case RNDIS_MSG_SET_C:
if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
const struct rndis_set_complete *set_complete =
data + RNDIS_HEADER_SIZE;
netdev_dbg(netdev,
"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
rndis_msg->msg_len,
set_complete->req_id,
set_complete->status);
}
break;
case RNDIS_MSG_INDICATE:
if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
sizeof(struct rndis_indicate_status)) {
const struct rndis_indicate_status *indicate_status =
data + RNDIS_HEADER_SIZE;
netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
rndis_msg->msg_len,
indicate_status->status,
indicate_status->status_buflen,
indicate_status->status_buf_offset);
}
break;
default:
netdev_dbg(netdev, "0x%x (len %u)\n",
rndis_msg->ndis_msg_type,
rndis_msg->msg_len);
break;
}
}
static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
int ret;
/* Setup the packet to send it */
packet = &req->pkt;
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
pb[0].pfn = virt_to_phys(&req->request_msg) >>
HV_HYP_PAGE_SHIFT;
pb[0].len = req->request_msg.msg_len;
pb[0].offset = offset_in_hvpage(&req->request_msg);
/* Add one page_buf when request_msg crossing page boundary */
if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
packet->page_buf_cnt++;
pb[0].len = HV_HYP_PAGE_SIZE -
pb[0].offset;
pb[1].pfn = virt_to_phys((void *)&req->request_msg
+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
pb[1].offset = 0;
pb[1].len = req->request_msg.msg_len -
pb[0].len;
}
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
rcu_read_unlock_bh();
return ret;
}
static void rndis_set_link_state(struct rndis_device *rdev,
struct rndis_request *request)
{
u32 link_status;
struct rndis_query_complete *query_complete;
u32 msg_len = request->response_msg.msg_len;
/* Ensure the packet is big enough to access its fields */
if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
return;
query_complete = &request->response_msg.msg.query_complete;
if (query_complete->status == RNDIS_STATUS_SUCCESS &&
query_complete->info_buflen >= sizeof(u32) &&
query_complete->info_buf_offset >= sizeof(*query_complete) &&
msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
>= query_complete->info_buflen) {
memcpy(&link_status, (void *)((unsigned long)query_complete +
query_complete->info_buf_offset), sizeof(u32));
rdev->link_state = link_status != 0;
}
}
static void rndis_filter_receive_response(struct net_device *ndev,
struct netvsc_device *nvdev,
struct rndis_message *resp,
void *data)
{
u32 *req_id = &resp->msg.init_complete.req_id;
struct rndis_device *dev = nvdev->extension;
struct rndis_request *request = NULL;
bool found = false;
unsigned long flags;
/* This should never happen, it means control message
* response received after device removed.
*/
if (dev->state == RNDIS_DEV_UNINITIALIZED) {
netdev_err(ndev,
"got rndis message uninitialized\n");
return;
}
/* Ensure the packet is big enough to read req_id. Req_id is the 1st
* field in any request/response message, so the payload should have at
* least sizeof(u32) bytes
*/
if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
netdev_err(ndev, "rndis msg_len too small: %u\n",
resp->msg_len);
return;
}
/* Copy the request ID into nvchan->recv_buf */
*req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
/*
* All request/response message contains RequestId as the 1st
* field
*/
if (request->request_msg.msg.init_req.req_id == *req_id) {
found = true;
break;
}
}
spin_unlock_irqrestore(&dev->request_lock, flags);
if (found) {
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
data + RNDIS_HEADER_SIZE + sizeof(*req_id),
resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
"request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
rndis_set_link_state(dev, request);
} else {
netdev_err(ndev,
"rndis response buffer overflow "
"detected (size %u max %zu)\n",
resp->msg_len,
sizeof(struct rndis_message));
if (resp->ndis_msg_type ==
RNDIS_MSG_RESET_C) {
/* does not have a request id field */
request->response_msg.msg.reset_complete.
status = RNDIS_STATUS_BUFFER_OVERFLOW;
} else {
request->response_msg.msg.
init_complete.status =
RNDIS_STATUS_BUFFER_OVERFLOW;
}
}
netvsc_dma_unmap(((struct net_device_context *)
netdev_priv(ndev))->device_ctx, &request->pkt);
complete(&request->wait_event);
} else {
netdev_err(ndev,
"no rndis request found for this response "
"(id 0x%x res type 0x%x)\n",
*req_id,
resp->ndis_msg_type);
}
}
/*
* Get the Per-Packet-Info with the specified type
* return NULL if not found.
*/
static inline void *rndis_get_ppi(struct net_device *ndev,
struct rndis_packet *rpkt,
u32 rpkt_len, u32 type, u8 internal,
u32 ppi_size, void *data)
{
struct rndis_per_packet_info *ppi;
int len;
if (rpkt->per_pkt_info_offset == 0)
return NULL;
/* Validate info_offset and info_len */
if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
rpkt->per_pkt_info_offset > rpkt_len) {
netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
rpkt->per_pkt_info_offset);
return NULL;
}
if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
rpkt->per_pkt_info_len);
return NULL;
}
ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
rpkt->per_pkt_info_offset);
/* Copy the PPIs into nvchan->recv_buf */
memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
len = rpkt->per_pkt_info_len;
while (len > 0) {
/* Validate ppi_offset and ppi_size */
if (ppi->size > len) {
netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
continue;
}
if (ppi->ppi_offset >= ppi->size) {
netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
continue;
}
if (ppi->type == type && ppi->internal == internal) {
/* ppi->size should be big enough to hold the returned object. */
if (ppi->size - ppi->ppi_offset < ppi_size) {
netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
ppi->size, ppi->ppi_offset);
continue;
}
return (void *)((ulong)ppi + ppi->ppi_offset);
}
len -= ppi->size;
ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
}
return NULL;
}
static inline
void rsc_add_data(struct netvsc_channel *nvchan,
const struct ndis_pkt_8021q_info *vlan,
const struct ndis_tcp_ip_checksum_info *csum_info,
const u32 *hash_info,
void *data, u32 len)
{
u32 cnt = nvchan->rsc.cnt;
if (cnt) {
nvchan->rsc.pktlen += len;
} else {
/* The data/values pointed by vlan, csum_info and hash_info are shared
* across the different 'fragments' of the RSC packet; store them into
* the packet itself.
*/
if (vlan != NULL) {
memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
} else {
nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
}
if (csum_info != NULL) {
memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
} else {
nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
}
nvchan->rsc.pktlen = len;
if (hash_info != NULL) {
nvchan->rsc.hash_info = *hash_info;
nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
} else {
nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
}
}
nvchan->rsc.data[cnt] = data;
nvchan->rsc.len[cnt] = len;
nvchan->rsc.cnt++;
}
static int rndis_filter_receive_data(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan,
struct rndis_message *msg,
void *data, u32 data_buflen)
{
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
const u32 *hash_info;
u32 data_offset, rpkt_len;
bool rsc_more = false;
int ret;
/* Ensure data_buflen is big enough to read header fields */
if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
data_buflen);
return NVSP_STAT_FAIL;
}
/* Copy the RNDIS packet into nvchan->recv_buf */
memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
/* Validate rndis_pkt offset */
if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
netdev_err(ndev, "invalid rndis packet offset: %u\n",
rndis_pkt->data_offset);
return NVSP_STAT_FAIL;
}
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
data_buflen -= data_offset;
/*
* Make sure we got a valid RNDIS message, now total_data_buflen
* should be the data packet size plus the trailer padding size
*/
if (unlikely(data_buflen < rndis_pkt->data_len)) {
netdev_err(ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
data_buflen, rndis_pkt->data_len);
return NVSP_STAT_FAIL;
}
vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
data);
csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
sizeof(*csum_info), data);
hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
sizeof(*hash_info), data);
pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
sizeof(*pktinfo_id), data);
/* Identify RSC frags, drop erroneous packets */
if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
nvchan->rsc.cnt = 0;
else if (nvchan->rsc.cnt == 0)
goto drop;
rsc_more = true;
if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
rsc_more = false;
if (rsc_more && nvchan->rsc.is_last)
goto drop;
} else {
nvchan->rsc.cnt = 0;
}
if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
goto drop;
/* Put data into per channel structure.
* Also, remove the rndis trailer padding from rndis packet message
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
rsc_add_data(nvchan, vlan, csum_info, hash_info,
data + data_offset, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
ret = netvsc_recv_callback(ndev, nvdev, nvchan);
nvchan->rsc.cnt = 0;
return ret;
drop:
return NVSP_STAT_FAIL;
}
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
struct netvsc_channel *nvchan,
void *data, u32 buflen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct rndis_message *rndis_msg = nvchan->recv_buf;
if (buflen < RNDIS_HEADER_SIZE) {
netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
return NVSP_STAT_FAIL;
}
/* Copy the RNDIS msg header into nvchan->recv_buf */
memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
/* Validate incoming rndis_message packet */
if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
buflen < rndis_msg->msg_len) {
netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
buflen, rndis_msg->msg_len);
return NVSP_STAT_FAIL;
}
if (netif_msg_rx_status(net_device_ctx))
dump_rndis_message(ndev, rndis_msg, data);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
return rndis_filter_receive_data(ndev, net_dev, nvchan,
rndis_msg, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
/* completion msgs */
rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
break;
case RNDIS_MSG_INDICATE:
/* notification msgs */
netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
break;
default:
netdev_err(ndev,
"unhandled rndis message (type %u len %u)\n",
rndis_msg->ndis_msg_type,
rndis_msg->msg_len);
return NVSP_STAT_FAIL;
}
return NVSP_STAT_SUCCESS;
}
static int rndis_filter_query_device(struct rndis_device *dev,
struct netvsc_device *nvdev,
u32 oid, void *result, u32 *result_size)
{
struct rndis_request *request;
u32 inresult_size = *result_size;
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
u32 msg_len;
int ret = 0;
if (!result)
return -EINVAL;
*result_size = 0;
request = get_rndis_request(dev, RNDIS_MSG_QUERY,
RNDIS_MESSAGE_SIZE(struct rndis_query_request));
if (!request) {
ret = -ENOMEM;
goto cleanup;
}
/* Setup the rndis query */
query = &request->request_msg.msg.query_req;
query->oid = oid;
query->info_buf_offset = sizeof(struct rndis_query_request);
query->info_buflen = 0;
query->dev_vc_handle = 0;
if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
struct ndis_offload *hwcaps;
u32 nvsp_version = nvdev->nvsp_version;
u8 ndis_rev;
size_t size;
if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
size = NDIS_OFFLOAD_SIZE;
} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
size = NDIS_OFFLOAD_SIZE_6_1;
} else {
ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
size = NDIS_OFFLOAD_SIZE_6_0;
}
request->request_msg.msg_len += size;
query->info_buflen = size;
hwcaps = (struct ndis_offload *)
((unsigned long)query + query->info_buf_offset);
hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
hwcaps->header.revision = ndis_rev;
hwcaps->header.size = size;
} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
struct ndis_recv_scale_cap *cap;
request->request_msg.msg_len +=
sizeof(struct ndis_recv_scale_cap);
query->info_buflen = sizeof(struct ndis_recv_scale_cap);
cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
query->info_buf_offset);
cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
}
ret = rndis_filter_send_request(dev, request);
if (ret != 0)
goto cleanup;
wait_for_completion(&request->wait_event);
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
msg_len = request->response_msg.msg_len;
/* Ensure the packet is big enough to access its fields */
if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
ret = -1;
goto cleanup;
}
if (query_complete->info_buflen > inresult_size ||
query_complete->info_buf_offset < sizeof(*query_complete) ||
msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
< query_complete->info_buflen) {
ret = -1;
goto cleanup;
}
memcpy(result,
(void *)((unsigned long)query_complete +
query_complete->info_buf_offset),
query_complete->info_buflen);
*result_size = query_complete->info_buflen;
cleanup:
if (request)
put_rndis_request(dev, request);
return ret;
}
/* Get the hardware offload capabilities */
static int
rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
struct ndis_offload *caps)
{
u32 caps_len = sizeof(*caps);
int ret;
memset(caps, 0, sizeof(*caps));
ret = rndis_filter_query_device(dev, net_device,
OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
caps, &caps_len);
if (ret)
return ret;
if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
caps->header.type);
return -EINVAL;
}
if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
caps->header.revision);
return -EINVAL;
}
if (caps->header.size > caps_len ||
caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
netdev_warn(dev->ndev,
"invalid NDIS objsize %u, data size %u\n",
caps->header.size, caps_len);
return -EINVAL;
}
return 0;
}
static int rndis_filter_query_device_mac(struct rndis_device *dev,
struct netvsc_device *net_device)
{
u32 size = ETH_ALEN;
return rndis_filter_query_device(dev, net_device,
RNDIS_OID_802_3_PERMANENT_ADDRESS,
dev->hw_mac_adr, &size);
}
#define NWADR_STR "NetworkAddress"
#define NWADR_STRLEN 14
int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
const char *mac)
{
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_config_parameter_info *cpi;
wchar_t *cfg_nwadr, *cfg_mac;
struct rndis_set_complete *set_complete;
char macstr[2*ETH_ALEN+1];
u32 extlen = sizeof(struct rndis_config_parameter_info) +
2*NWADR_STRLEN + 4*ETH_ALEN;
int ret;
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
if (!request)
return -ENOMEM;
set = &request->request_msg.msg.set_req;
set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
set->info_buflen = extlen;
set->info_buf_offset = sizeof(struct rndis_set_request);
set->dev_vc_handle = 0;
cpi = (struct rndis_config_parameter_info *)((ulong)set +
set->info_buf_offset);
cpi->parameter_name_offset =
sizeof(struct rndis_config_parameter_info);
/* Multiply by 2 because host needs 2 bytes (utf16) for each char */
cpi->parameter_name_length = 2*NWADR_STRLEN;
cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
cpi->parameter_value_offset =
cpi->parameter_name_offset + cpi->parameter_name_length;
/* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
cpi->parameter_value_length = 4*ETH_ALEN;
cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
cfg_nwadr, NWADR_STRLEN);
if (ret < 0)
goto cleanup;
snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
cfg_mac, 2*ETH_ALEN);
if (ret < 0)
goto cleanup;
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
goto cleanup;
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status != RNDIS_STATUS_SUCCESS)
ret = -EIO;
cleanup:
put_rndis_request(rdev, request);
return ret;
}
int
rndis_filter_set_offload_params(struct net_device *ndev,
struct netvsc_device *nvdev,
struct ndis_offload_params *req_offloads)
{
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
struct ndis_offload_params *offload_params;
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_offload_params);
int ret;
u32 vsp_version = nvdev->nvsp_version;
if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
extlen = VERSION_4_OFFLOAD_SIZE;
/* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
* UDP checksum offload.
*/
req_offloads->udp_ip_v4_csum = 0;
req_offloads->udp_ip_v6_csum = 0;
}
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
if (!request)
return -ENOMEM;
set = &request->request_msg.msg.set_req;
set->oid = OID_TCP_OFFLOAD_PARAMETERS;
set->info_buflen = extlen;
set->info_buf_offset = sizeof(struct rndis_set_request);
set->dev_vc_handle = 0;
offload_params = (struct ndis_offload_params *)((ulong)set +
set->info_buf_offset);
*offload_params = *req_offloads;
offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
offload_params->header.size = extlen;
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
goto cleanup;
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status != RNDIS_STATUS_SUCCESS) {
netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
set_complete->status);
ret = -EINVAL;
}
cleanup:
put_rndis_request(rdev, request);
return ret;
}
static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
struct net_device_context *ndc = netdev_priv(ndev);
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_recv_scale_param) +
4 * ndc->rx_table_sz + NETVSC_HASH_KEYLEN;
struct ndis_recv_scale_param *rssp;
u32 *itab;
u8 *keyp;
int i, ret;
request = get_rndis_request(
rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
if (!request)
return -ENOMEM;
set = &request->request_msg.msg.set_req;
set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
set->info_buflen = extlen;
set->info_buf_offset = sizeof(struct rndis_set_request);
set->dev_vc_handle = 0;
rssp = (struct ndis_recv_scale_param *)(set + 1);
rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
rssp->flag = flag;
rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
NDIS_HASH_TCP_IPV6;
rssp->indirect_tabsize = 4 * ndc->rx_table_sz;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
rssp->hashkey_size = NETVSC_HASH_KEYLEN;
rssp->hashkey_offset = rssp->indirect_taboffset +
rssp->indirect_tabsize;
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ndc->rx_table_sz; i++)
itab[i] = ndc->rx_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
goto cleanup;
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status == RNDIS_STATUS_SUCCESS) {
if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
!(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
} else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
}
cleanup:
put_rndis_request(rdev, request);
return ret;
}
int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *rss_key)
{
/* Disable RSS before change */
rndis_set_rss_param_msg(rdev, rss_key,
NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
return rndis_set_rss_param_msg(rdev, rss_key, 0);
}
static int rndis_filter_query_device_link_status(struct rndis_device *dev,
struct netvsc_device *net_device)
{
u32 size = sizeof(u32);
u32 link_status;
return rndis_filter_query_device(dev, net_device,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
}
static int rndis_filter_query_link_speed(struct rndis_device *dev,
struct netvsc_device *net_device)
{
u32 size = sizeof(u32);
u32 link_speed;
struct net_device_context *ndc;
int ret;
ret = rndis_filter_query_device(dev, net_device,
RNDIS_OID_GEN_LINK_SPEED,
&link_speed, &size);
if (!ret) {
ndc = netdev_priv(dev->ndev);
/* The link speed reported from host is in 100bps unit, so
* we convert it to Mbps here.
*/
ndc->speed = link_speed / 10000;
}
return ret;
}
static int rndis_filter_set_packet_filter(struct rndis_device *dev,
u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
int ret;
if (dev->filter == new_filter)
return 0;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
sizeof(u32));
if (!request)
return -ENOMEM;
/* Setup the rndis set */
set = &request->request_msg.msg.set_req;
set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
set->info_buflen = sizeof(u32);
set->info_buf_offset = offsetof(typeof(*set), info_buf);
memcpy(set->info_buf, &new_filter, sizeof(u32));
ret = rndis_filter_send_request(dev, request);
if (ret == 0) {
wait_for_completion(&request->wait_event);
dev->filter = new_filter;
}
put_rndis_request(dev, request);
return ret;
}
static void rndis_set_multicast(struct work_struct *w)
{
struct rndis_device *rdev
= container_of(w, struct rndis_device, mcast_work);
u32 filter = NDIS_PACKET_TYPE_DIRECTED;
unsigned int flags = rdev->ndev->flags;
if (flags & IFF_PROMISC) {
filter = NDIS_PACKET_TYPE_PROMISCUOUS;
} else {
if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
if (flags & IFF_BROADCAST)
filter |= NDIS_PACKET_TYPE_BROADCAST;
}
rndis_filter_set_packet_filter(rdev, filter);
}
void rndis_filter_update(struct netvsc_device *nvdev)
{
struct rndis_device *rdev = nvdev->extension;
schedule_work(&rdev->mcast_work);
}
static int rndis_filter_init_device(struct rndis_device *dev,
struct netvsc_device *nvdev)
{
struct rndis_request *request;
struct rndis_initialize_request *init;
struct rndis_initialize_complete *init_complete;
u32 status;
int ret;
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
if (!request) {
ret = -ENOMEM;
goto cleanup;
}
/* Setup the rndis set */
init = &request->request_msg.msg.init_req;
init->major_ver = RNDIS_MAJOR_VERSION;
init->minor_ver = RNDIS_MINOR_VERSION;
init->max_xfer_size = 0x4000;
dev->state = RNDIS_DEV_INITIALIZING;
ret = rndis_filter_send_request(dev, request);
if (ret != 0) {
dev->state = RNDIS_DEV_UNINITIALIZED;
goto cleanup;
}
wait_for_completion(&request->wait_event);
init_complete = &request->response_msg.msg.init_complete;
status = init_complete->status;
if (status == RNDIS_STATUS_SUCCESS) {
dev->state = RNDIS_DEV_INITIALIZED;
nvdev->max_pkt = init_complete->max_pkt_per_msg;
nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
ret = 0;
} else {
dev->state = RNDIS_DEV_UNINITIALIZED;
ret = -EINVAL;
}
cleanup:
if (request)
put_rndis_request(dev, request);
return ret;
}
static bool netvsc_device_idle(const struct netvsc_device *nvdev)
{
int i;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
if (nvchan->mrc.first != nvchan->mrc.next)
return false;
if (atomic_read(&nvchan->queue_sends) > 0)
return false;
}
return true;
}
static void rndis_filter_halt_device(struct netvsc_device *nvdev,
struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
/* Attempt to do a rndis device halt */
request = get_rndis_request(dev, RNDIS_MSG_HALT,
RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
if (!request)
goto cleanup;
/* Setup the rndis set */
halt = &request->request_msg.msg.halt_req;
halt->req_id = atomic_inc_return(&dev->new_req_id);
/* Ignore return since this msg is optional. */
rndis_filter_send_request(dev, request);
dev->state = RNDIS_DEV_UNINITIALIZED;
cleanup:
nvdev->destroy = true;
/* Force flag to be ordered before waiting */
wmb();
/* Wait for all send completions */
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
if (request)
put_rndis_request(dev, request);
}
static int rndis_filter_open_device(struct rndis_device *dev)
{
int ret;
if (dev->state != RNDIS_DEV_INITIALIZED)
return 0;
ret = rndis_filter_set_packet_filter(dev,
NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
if (ret == 0)
dev->state = RNDIS_DEV_DATAINITIALIZED;
return ret;
}
static int rndis_filter_close_device(struct rndis_device *dev)
{
int ret;
if (dev->state != RNDIS_DEV_DATAINITIALIZED)
return 0;
/* Make sure rndis_set_multicast doesn't re-enable filter! */
cancel_work_sync(&dev->mcast_work);
ret = rndis_filter_set_packet_filter(dev, 0);
if (ret == -ENODEV)
ret = 0;
if (ret == 0)
dev->state = RNDIS_DEV_INITIALIZED;
return ret;
}
static void netvsc_sc_open(struct vmbus_channel *new_sc)
{
struct net_device *ndev =
hv_get_drvdata(new_sc->primary_channel->device_obj);
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *nvscdev;
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
struct netvsc_channel *nvchan;
int ret;
/* This is safe because this callback only happens when
* new device is being setup and waiting on the channel_init_wait.
*/
nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
if (!nvscdev || chn_index >= nvscdev->num_chn)
return;
nvchan = nvscdev->chan_table + chn_index;
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
*/
set_channel_read_mode(new_sc, HV_CALL_ISR);
/* Set the channel before opening.*/
nvchan->channel = new_sc;
new_sc->next_request_id_callback = vmbus_next_request_id;
new_sc->request_addr_callback = vmbus_request_addr;
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
if (ret == 0)
napi_enable(&nvchan->napi);
else
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
wake_up(&nvscdev->subchan_open);
}
/* Open sub-channels after completing the handling of the device probe.
* This breaks overlap of processing the host message for the
* new primary channel with the initialization of sub-channels.
*/
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_device_info *dev_info)
{
struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct hv_device *hv_dev = ndev_ctx->device_ctx;
struct rndis_device *rdev = nvdev->extension;
int i, ret;
ASSERT_RTNL();
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
init_packet->msg.v5_msg.subchn_req.num_subchannels =
nvdev->num_chn - 1;
trace_nvsp_send(ndev, init_packet);
ret = vmbus_sendpacket(hv_dev->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) {
netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
return ret;
}
wait_for_completion(&nvdev->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
netdev_err(ndev, "sub channel request failed\n");
return -EIO;
}
/* Check that number of allocated sub channel is within the expected range */
if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
netdev_err(ndev, "invalid number of allocated sub channel\n");
return -EINVAL;
}
nvdev->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
/* wait for all sub channels to open */
wait_event(nvdev->subchan_open,
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
/* ignore failures from setting rss parameters, still have channels */
if (dev_info)
rndis_filter_set_rss_param(rdev, dev_info->rss_key);
else
rndis_filter_set_rss_param(rdev, netvsc_hash_key);
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
return 0;
}
static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct netvsc_device *nvdev)
{
struct net_device *net = rndis_device->ndev;
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
return ret;
/* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
/* Linux does not care about IP checksum, always does in kernel */
offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
/* Reset previously set hw_features flags */
net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
net_device_ctx->tx_checksum_mask = 0;
/* Compute tx offload settings based on hw capabilities */
net->hw_features |= NETIF_F_RXCSUM;
net->hw_features |= NETIF_F_SG;
net->hw_features |= NETIF_F_RXHASH;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
net->hw_features |= NETIF_F_IP_CSUM;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;
if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
gso_max_size = hwcaps.lsov2.ip4_maxsz;
}
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
}
}
if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
net->hw_features |= NETIF_F_IPV6_CSUM;
offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
(hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;
if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
gso_max_size = hwcaps.lsov2.ip6_maxsz;
}
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
}
}
if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
net->hw_features |= NETIF_F_LRO;
if (net->features & NETIF_F_LRO) {
offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
} else {
offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
}
}
/* In case some hw_features disappeared we need to remove them from
* net->features list as they're no longer supported.
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
netif_set_tso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
return ret;
}
static void rndis_get_friendly_name(struct net_device *net,
struct rndis_device *rndis_device,
struct netvsc_device *net_device)
{
ucs2_char_t wname[256];
unsigned long len;
u8 ifalias[256];
u32 size;
size = sizeof(wname);
if (rndis_filter_query_device(rndis_device, net_device,
RNDIS_OID_GEN_FRIENDLY_NAME,
wname, &size) != 0)
return; /* ignore if host does not support */
if (size == 0)
return; /* name not set */
/* Convert Windows Unicode string to UTF-8 */
len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
/* ignore the default value from host */
if (strcmp(ifalias, "Network Adapter") != 0)
dev_set_alias(net, ifalias, len);
}
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
{
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *ndc = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
u32 num_possible_rss_qs;
int i, ret;
rndis_device = get_rndis_device();
if (!rndis_device)
return ERR_PTR(-ENODEV);
/* Let the inner driver handle this first to create the netvsc channel
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
net_device = netvsc_device_add(dev, device_info);
if (IS_ERR(net_device)) {
kfree(rndis_device);
return net_device;
}
/* Initialize the rndis device */
net_device->max_chn = 1;
net_device->num_chn = 1;
net_device->extension = rndis_device;
rndis_device->ndev = net;
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
/* Get the MTU from the host */
size = sizeof(u32);
ret = rndis_filter_query_device(rndis_device, net_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
net->mtu = mtu;
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
/* Get friendly name as ifalias*/
if (!net->ifalias)
rndis_get_friendly_name(net, rndis_device, net_device);
/* Query and set hardware capabilities */
ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
rndis_filter_query_device_link_status(rndis_device, net_device);
netdev_dbg(net, "Device MAC %pM link state %s\n",
rndis_device->hw_mac_adr,
rndis_device->link_state ? "down" : "up");
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
goto out;
rndis_filter_query_link_speed(rndis_device, net_device);
/* vRSS setup */
memset(&rsscap, 0, rsscap_size);
ret = rndis_filter_query_device(rndis_device, net_device,
OID_GEN_RECEIVE_SCALE_CAPABILITIES,
&rsscap, &rsscap_size);
if (ret || rsscap.num_recv_que < 2)
goto out;
if (rsscap.num_indirect_tabent &&
rsscap.num_indirect_tabent <= ITAB_NUM_MAX)
ndc->rx_table_sz = rsscap.num_indirect_tabent;
else
ndc->rx_table_sz = ITAB_NUM;
ndc->rx_table = kcalloc(ndc->rx_table_sz, sizeof(u16), GFP_KERNEL);
if (!ndc->rx_table) {
ret = -ENOMEM;
goto err_dev_remv;
}
/* This guarantees that num_possible_rss_qs <= num_online_cpus */
num_possible_rss_qs = min_t(u32, num_online_cpus(),
rsscap.num_recv_que);
net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
/* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
if (!netif_is_rxfh_configured(net)) {
for (i = 0; i < ndc->rx_table_sz; i++)
ndc->rx_table[i] = ethtool_rxfh_indir_default(
i, net_device->num_chn);
}
atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
for (i = 1; i < net_device->num_chn; i++) {
ret = netvsc_alloc_recv_comp_ring(net_device, i);
if (ret) {
while (--i != 0)
vfree(net_device->chan_table[i].mrc.slots);
goto out;
}
}
for (i = 1; i < net_device->num_chn; i++)
netif_napi_add(net, &net_device->chan_table[i].napi,
netvsc_poll);
return net_device;
out:
/* setting up multiple channels failed */
net_device->max_chn = 1;
net_device->num_chn = 1;
return net_device;
err_dev_remv:
rndis_filter_device_remove(dev, net_device);
return ERR_PTR(ret);
}
void rndis_filter_device_remove(struct hv_device *dev,
struct netvsc_device *net_dev)
{
struct rndis_device *rndis_dev = net_dev->extension;
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *ndc;
ndc = netdev_priv(net);
/* Halt and release the rndis device */
rndis_filter_halt_device(net_dev, rndis_dev);
netvsc_device_remove(dev);
ndc->rx_table_sz = 0;
kfree(ndc->rx_table);
ndc->rx_table = NULL;
}
int rndis_filter_open(struct netvsc_device *nvdev)
{
if (!nvdev)
return -EINVAL;
return rndis_filter_open_device(nvdev->extension);
}
int rndis_filter_close(struct netvsc_device *nvdev)
{
if (!nvdev)
return -EINVAL;
return rndis_filter_close_device(nvdev->extension);
}
| linux-master | drivers/net/hyperv/rndis_filter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/atomic.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include "hyperv_net.h"
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR;
static int debug = -1;
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static LIST_HEAD(netvsc_dev_list);
static void netvsc_change_rx_flags(struct net_device *net, int change)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
int inc;
if (!vf_netdev)
return;
if (change & IFF_PROMISC) {
inc = (net->flags & IFF_PROMISC) ? 1 : -1;
dev_set_promiscuity(vf_netdev, inc);
}
if (change & IFF_ALLMULTI) {
inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
dev_set_allmulti(vf_netdev, inc);
}
}
static void netvsc_set_rx_mode(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct net_device *vf_netdev;
struct netvsc_device *nvdev;
rcu_read_lock();
vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
if (vf_netdev) {
dev_uc_sync(vf_netdev, net);
dev_mc_sync(vf_netdev, net);
}
nvdev = rcu_dereference(ndev_ctx->nvdev);
if (nvdev)
rndis_filter_update(nvdev);
rcu_read_unlock();
}
static void netvsc_tx_enable(struct netvsc_device *nvscdev,
struct net_device *ndev)
{
nvscdev->tx_disable = false;
virt_wmb(); /* ensure queue wake up mechanism is on */
netif_tx_wake_all_queues(ndev);
}
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
struct rndis_device *rdev;
int ret = 0;
netif_carrier_off(net);
/* Open up the device */
ret = rndis_filter_open(nvdev);
if (ret != 0) {
netdev_err(net, "unable to open device (ret %d).\n", ret);
return ret;
}
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
/* Setting synthetic device up transparently sets
* slave as up. If open fails, then slave will be
* still be offline (and not used).
*/
ret = dev_open(vf_netdev, NULL);
if (ret)
netdev_warn(net,
"unable to open slave: %s: %d\n",
vf_netdev->name, ret);
}
return 0;
}
static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
{
unsigned int retry = 0;
int i;
/* Ensure pending bytes in ring are read */
for (;;) {
u32 aread = 0;
for (i = 0; i < nvdev->num_chn; i++) {
struct vmbus_channel *chn
= nvdev->chan_table[i].channel;
if (!chn)
continue;
/* make sure receive not running now */
napi_synchronize(&nvdev->chan_table[i].napi);
aread = hv_get_bytes_to_read(&chn->inbound);
if (aread)
break;
aread = hv_get_bytes_to_read(&chn->outbound);
if (aread)
break;
}
if (aread == 0)
return 0;
if (++retry > RETRY_MAX)
return -ETIMEDOUT;
usleep_range(RETRY_US_LO, RETRY_US_HI);
}
}
static void netvsc_tx_disable(struct netvsc_device *nvscdev,
struct net_device *ndev)
{
if (nvscdev) {
nvscdev->tx_disable = true;
virt_wmb(); /* ensure txq will not wake up after stop */
}
netif_tx_disable(ndev);
}
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct net_device *vf_netdev
= rtnl_dereference(net_device_ctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
return 0;
ret = rndis_filter_close(nvdev);
if (ret != 0) {
netdev_err(net, "unable to close device (ret %d).\n", ret);
return ret;
}
ret = netvsc_wait_until_empty(nvdev);
if (ret)
netdev_err(net, "Ring buffer not empty after closing rndis\n");
if (vf_netdev)
dev_close(vf_netdev);
return ret;
}
static inline void *init_ppi_data(struct rndis_message *msg,
u32 ppi_size, u32 pkt_type)
{
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
struct rndis_per_packet_info *ppi;
rndis_pkt->data_offset += ppi_size;
ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+ rndis_pkt->per_pkt_info_len;
ppi->size = ppi_size;
ppi->type = pkt_type;
ppi->internal = 0;
ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
rndis_pkt->per_pkt_info_len += ppi_size;
return ppi + 1;
}
static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sk_buff *skb, int old_idx)
{
const struct net_device_context *ndc = netdev_priv(ndev);
struct sock *sk = skb->sk;
int q_idx;
q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
(VRSS_SEND_TAB_SIZE - 1)];
/* If queue index changed record the new value */
if (q_idx != old_idx &&
sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
sk_tx_queue_set(sk, q_idx);
return q_idx;
}
/*
* Select queue for transmit.
*
* If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table.
*
* This is basically similar to default (netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned.
*
* TODO support XPS - but get_xps_queue not exported
*/
static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
{
int q_idx = sk_tx_queue_get(skb->sk);
if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
/* If forwarding a packet, we use the recorded queue when
* available for better cache locality.
*/
if (skb_rx_queue_recorded(skb))
q_idx = skb_get_rx_queue(skb);
else
q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
}
return q_idx;
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev;
u16 txq;
rcu_read_lock();
vf_netdev = rcu_dereference(ndc->vf_netdev);
if (vf_netdev) {
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
else
txq = netdev_pick_tx(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
* the synthetic device.
*/
qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
} else {
txq = netvsc_pick_tx(ndev, skb);
}
rcu_read_unlock();
while (txq >= ndev->real_num_tx_queues)
txq -= ndev->real_num_tx_queues;
return txq;
}
static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
struct hv_page_buffer *pb)
{
int j = 0;
hvpfn += offset >> HV_HYP_PAGE_SHIFT;
offset = offset & ~HV_HYP_PAGE_MASK;
while (len > 0) {
unsigned long bytes;
bytes = HV_HYP_PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
pb[j].pfn = hvpfn;
pb[j].offset = offset;
pb[j].len = bytes;
offset += bytes;
len -= bytes;
if (offset == HV_HYP_PAGE_SIZE && len) {
hvpfn++;
offset = 0;
j++;
}
}
return j + 1;
}
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
u32 slots_used = 0;
char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
/* The packet is laid out thus:
* 1. hdr: RNDIS header and PPI
* 2. skb linear data
* 3. skb fragment data
*/
slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
offset_in_hvpage(hdr),
len,
&pb[slots_used]);
packet->rmsg_size = len;
packet->rmsg_pgcnt = slots_used;
slots_used += fill_pg_buf(virt_to_hvpfn(data),
offset_in_hvpage(data),
skb_headlen(skb),
&pb[slots_used]);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
skb_frag_off(frag),
skb_frag_size(frag),
&pb[slots_used]);
}
return slots_used;
}
static int count_skb_frag_slots(struct sk_buff *skb)
{
int i, frags = skb_shinfo(skb)->nr_frags;
int pages = 0;
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
offset &= ~HV_HYP_PAGE_MASK;
pages += HVPFN_UP(offset + size);
}
return pages;
}
static int netvsc_get_slots(struct sk_buff *skb)
{
char *data = skb->data;
unsigned int offset = offset_in_hvpage(data);
unsigned int len = skb_headlen(skb);
int slots;
int frag_slots;
slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
frag_slots = count_skb_frag_slots(skb);
return slots + frag_slots;
}
static u32 net_checksum_info(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
return TRANSPORT_INFO_IPV4_TCP;
else if (ip->protocol == IPPROTO_UDP)
return TRANSPORT_INFO_IPV4_UDP;
} else {
struct ipv6hdr *ip6 = ipv6_hdr(skb);
if (ip6->nexthdr == IPPROTO_TCP)
return TRANSPORT_INFO_IPV6_TCP;
else if (ip6->nexthdr == IPPROTO_UDP)
return TRANSPORT_INFO_IPV6_UDP;
}
return TRANSPORT_INFO_NOT_IP;
}
/* Send skb on the slave VF device. */
static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
struct sk_buff *skb)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
unsigned int len = skb->len;
int rc;
skb->dev = vf_netdev;
skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
rc = dev_queue_xmit(skb);
if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
struct netvsc_vf_pcpu_stats *pcpu_stats
= this_cpu_ptr(ndev_ctx->vf_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
pcpu_stats->tx_packets++;
pcpu_stats->tx_bytes += len;
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
}
return rc;
}
static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet = NULL;
int ret;
unsigned int num_data_pgs;
struct rndis_message *rndis_msg;
struct net_device *vf_netdev;
u32 rndis_msg_size;
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
* If netpoll is in uses, then VF can not be used either.
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
net_device_ctx->data_path_is_vf)
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around
* more pages we try linearizing it.
*/
num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb))
goto no_memory;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
++net_device_ctx->eth_stats.tx_too_big;
goto drop;
}
}
/*
* Place the rndis header in the skb head room and
* the skb->cb will be used for hv_netvsc_packet
* structure.
*/
ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
if (ret)
goto no_memory;
/* Use the skb control buffer for building up the packet */
BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
sizeof_field(struct sk_buff, cb));
packet = (struct hv_netvsc_packet *)skb->cb;
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
packet->total_bytes = skb->len;
packet->total_packets = 1;
rndis_msg = (struct rndis_message *)skb->head;
/* Add the rndis header */
rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
rndis_msg->msg_len = packet->total_data_buflen;
rndis_msg->msg.pkt = (struct rndis_packet) {
.data_offset = sizeof(struct rndis_packet),
.data_len = packet->total_data_buflen,
.per_pkt_info_offset = sizeof(struct rndis_packet),
};
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
hash = skb_get_hash_raw(skb);
if (hash != 0 && net->real_num_tx_queues > 1) {
u32 *hash_info;
rndis_msg_size += NDIS_HASH_PPI_SIZE;
hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
NBL_HASH_VALUE);
*hash_info = hash;
}
/* When using AF_PACKET we need to drop VLAN header from
* the frame and update the SKB to allow the HOST OS
* to transmit the 802.1Q packet
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
u16 vlan_tci;
skb_reset_mac_header(skb);
if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
++net_device_ctx->eth_stats.vlan_error;
goto drop;
}
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
/* Update the NDIS header pkt lengths */
packet->total_data_buflen -= VLAN_HLEN;
packet->total_bytes -= VLAN_HLEN;
rndis_msg->msg_len = packet->total_data_buflen;
rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
}
}
if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
IEEE_8021Q_INFO);
vlan->value = 0;
vlan->vlanid = skb_vlan_tag_get_id(skb);
vlan->cfi = skb_vlan_tag_get_cfi(skb);
vlan->pri = skb_vlan_tag_get_prio(skb);
}
if (skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info->value = 0;
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
tcp_v6_gso_csum_prep(skb);
}
lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
struct ndis_tcp_ip_checksum_info *csum_info;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info->value = 0;
csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
csum_info->transmit.is_ipv4 = 1;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
csum_info->transmit.tcp_checksum = 1;
else
csum_info->transmit.udp_checksum = 1;
} else {
csum_info->transmit.is_ipv6 = 1;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
csum_info->transmit.tcp_checksum = 1;
else
csum_info->transmit.udp_checksum = 1;
}
} else {
/* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
goto drop;
}
}
/* Start filling in the page buffers with the rndis hdr */
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
skb, packet, pb);
/* timestamp packet in software */
skb_tx_timestamp(skb);
ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
if (likely(ret == 0))
return NETDEV_TX_OK;
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;
return NETDEV_TX_BUSY;
}
if (ret == -ENOSPC)
++net_device_ctx->eth_stats.tx_no_space;
drop:
dev_kfree_skb_any(skb);
net->stats.tx_dropped++;
return NETDEV_TX_OK;
no_memory:
++net_device_ctx->eth_stats.tx_no_memory;
goto drop;
}
static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
return netvsc_xmit(skb, ndev, false);
}
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp,
void *data, u32 data_buflen)
{
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_reconfig *event;
unsigned long flags;
/* Ensure the packet is big enough to access its fields */
if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
resp->msg_len);
return;
}
/* Copy the RNDIS indicate status into nvchan->recv_buf */
memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate));
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
/* Validate status_buf_offset and status_buflen.
*
* Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account
* for the status buffer field in resp->msg_len; perform the validation
* using data_buflen (>= resp->msg_len).
*/
if (indicate->status_buflen < sizeof(speed) ||
indicate->status_buf_offset < sizeof(*indicate) ||
data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset
< indicate->status_buflen) {
netdev_err(net, "invalid rndis_indicate_status packet\n");
return;
}
speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000;
ndev_ctx->speed = speed;
return;
}
/* Handle these link change statuses below */
if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
return;
if (net->reg_state != NETREG_REGISTERED)
return;
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return;
event->event = indicate->status;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add_tail(&event->list, &ndev_ctx->reconfig_events);
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
/* This function should only be called after skb_record_rx_queue() */
void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
skb->queue_mapping = skb_get_rx_queue(skb);
__skb_push(skb, ETH_HLEN);
rc = netvsc_xmit(skb, ndev, true);
if (dev_xmit_complete(rc))
return;
dev_kfree_skb_any(skb);
ndev->stats.tx_dropped++;
}
static void netvsc_comp_ipcsum(struct sk_buff *skb)
{
struct iphdr *iph = (struct iphdr *)skb->data;
iph->check = 0;
iph->check = ip_fast_csum(iph, iph->ihl);
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
&nvchan->rsc.csum_info;
const u32 *hash_info = &nvchan->rsc.hash_info;
u8 ppi_flags = nvchan->rsc.ppi_flags;
struct sk_buff *skb;
void *xbuf = xdp->data_hard_start;
int i;
if (xbuf) {
unsigned int hdroom = xdp->data - xdp->data_hard_start;
unsigned int xlen = xdp->data_end - xdp->data;
unsigned int frag_size = xdp->frame_sz;
skb = build_skb(xbuf, frag_size);
if (!skb) {
__free_page(virt_to_page(xbuf));
return NULL;
}
skb_reserve(skb, hdroom);
skb_put(skb, xlen);
skb->dev = napi->dev;
} else {
skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
if (!skb)
return NULL;
/* Copy to skb. This copy is needed here since the memory
* pointed by hv_netvsc_packet cannot be deallocated.
*/
for (i = 0; i < nvchan->rsc.cnt; i++)
skb_put_data(skb, nvchan->rsc.data[i],
nvchan->rsc.len[i]);
}
skb->protocol = eth_type_trans(skb, net);
/* skb is already created with CHECKSUM_NONE */
skb_checksum_none_assert(skb);
/* Incoming packets may have IP header checksum verified by the host.
* They may not have IP header checksum computed after coalescing.
* We compute it here if the flags are set, because on Linux, the IP
* checksum is always checked.
*/
if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid &&
csum_info->receive.ip_checksum_succeeded &&
skb->protocol == htons(ETH_P_IP)) {
/* Check that there is enough space to hold the IP header. */
if (skb_headlen(skb) < sizeof(struct iphdr)) {
kfree_skb(skb);
return NULL;
}
netvsc_comp_ipcsum(skb);
}
/* Do L4 checksum offload if enabled and present. */
if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH))
skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
if (ppi_flags & NVSC_RSC_VLAN) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
}
return skb;
}
/*
* netvsc_recv_callback - Callback when we receive a packet from the
* "wire" on the specified device.
*/
int netvsc_recv_callback(struct net_device *net,
struct netvsc_device *net_device,
struct netvsc_channel *nvchan)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
struct xdp_buff xdp;
u32 act;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
act = netvsc_run_xdp(net, nvchan, &xdp);
if (act == XDP_REDIRECT)
return NVSP_STAT_SUCCESS;
if (act != XDP_PASS && act != XDP_TX) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
u64_stats_update_end(&rx_stats->syncp);
return NVSP_STAT_SUCCESS; /* consumed by XDP */
}
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
return NVSP_STAT_FAIL;
}
skb_record_rx_queue(skb, q_idx);
/*
* Even if injecting the packet, record the statistics
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
u64_stats_update_begin(&rx_stats->syncp);
if (act == XDP_TX)
rx_stats->xdp_tx++;
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
if (skb->pkt_type == PACKET_BROADCAST)
++rx_stats->broadcast;
else if (skb->pkt_type == PACKET_MULTICAST)
++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
if (act == XDP_TX) {
netvsc_xdp_xmit(skb, net);
return NVSP_STAT_SUCCESS;
}
napi_gro_receive(&nvchan->napi, skb);
return NVSP_STAT_SUCCESS;
}
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void netvsc_get_channels(struct net_device *net,
struct ethtool_channels *channel)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
if (nvdev) {
channel->max_combined = nvdev->max_chn;
channel->combined_count = nvdev->num_chn;
}
}
/* Alloc struct netvsc_device_info, and initialize it from either existing
* struct netvsc_device, or from default values.
*/
static
struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
{
struct netvsc_device_info *dev_info;
struct bpf_prog *prog;
dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
if (!dev_info)
return NULL;
if (nvdev) {
ASSERT_RTNL();
dev_info->num_chn = nvdev->num_chn;
dev_info->send_sections = nvdev->send_section_cnt;
dev_info->send_section_size = nvdev->send_section_size;
dev_info->recv_sections = nvdev->recv_section_cnt;
dev_info->recv_section_size = nvdev->recv_section_size;
memcpy(dev_info->rss_key, nvdev->extension->rss_key,
NETVSC_HASH_KEYLEN);
prog = netvsc_xdp_get(nvdev);
if (prog) {
bpf_prog_inc(prog);
dev_info->bprog = prog;
}
} else {
dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
dev_info->send_sections = NETVSC_DEFAULT_TX;
dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
dev_info->recv_sections = NETVSC_DEFAULT_RX;
dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
}
return dev_info;
}
/* Free struct netvsc_device_info */
static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
{
if (dev_info->bprog) {
ASSERT_RTNL();
bpf_prog_put(dev_info->bprog);
}
kfree(dev_info);
}
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct hv_device *hdev = ndev_ctx->device_ctx;
int ret;
/* Don't try continuing to try and setup sub channels */
if (cancel_work_sync(&nvdev->subchan_work))
nvdev->num_chn = 1;
netvsc_xdp_set(ndev, NULL, NULL, nvdev);
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
netdev_err(ndev,
"unable to close device (ret %d).\n", ret);
return ret;
}
ret = netvsc_wait_until_empty(nvdev);
if (ret) {
netdev_err(ndev,
"Ring buffer not empty after closing rndis\n");
return ret;
}
}
netif_device_detach(ndev);
rndis_filter_device_remove(hdev, nvdev);
return 0;
}
static int netvsc_attach(struct net_device *ndev,
struct netvsc_device_info *dev_info)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct hv_device *hdev = ndev_ctx->device_ctx;
struct netvsc_device *nvdev;
struct rndis_device *rdev;
struct bpf_prog *prog;
int ret = 0;
nvdev = rndis_filter_device_add(hdev, dev_info);
if (IS_ERR(nvdev))
return PTR_ERR(nvdev);
if (nvdev->num_chn > 1) {
ret = rndis_set_subchannel(ndev, nvdev, dev_info);
/* if unavailable, just proceed with one queue */
if (ret) {
nvdev->max_chn = 1;
nvdev->num_chn = 1;
}
}
prog = dev_info->bprog;
if (prog) {
bpf_prog_inc(prog);
ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
if (ret) {
bpf_prog_put(prog);
goto err1;
}
}
/* In any case device is now ready */
nvdev->tx_disable = false;
netif_device_attach(ndev);
/* Note: enable and attach happen when sub-channels setup */
netif_carrier_off(ndev);
if (netif_running(ndev)) {
ret = rndis_filter_open(nvdev);
if (ret)
goto err2;
rdev = nvdev->extension;
if (!rdev->link_state)
netif_carrier_on(ndev);
}
return 0;
err2:
netif_device_detach(ndev);
err1:
rndis_filter_device_remove(hdev, nvdev);
return ret;
}
static int netvsc_set_channels(struct net_device *net,
struct ethtool_channels *channels)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
unsigned int orig, count = channels->combined_count;
struct netvsc_device_info *device_info;
int ret;
/* We do not support separate count for rx, tx, or other */
if (count == 0 ||
channels->rx_count || channels->tx_count || channels->other_count)
return -EINVAL;
if (!nvdev || nvdev->destroy)
return -ENODEV;
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return -EINVAL;
if (count > nvdev->max_chn)
return -EINVAL;
orig = nvdev->num_chn;
device_info = netvsc_devinfo_get(nvdev);
if (!device_info)
return -ENOMEM;
device_info->num_chn = count;
ret = netvsc_detach(net, nvdev);
if (ret)
goto out;
ret = netvsc_attach(net, device_info);
if (ret) {
device_info->num_chn = orig;
if (netvsc_attach(net, device_info))
netdev_err(net, "restoring channel setting failed\n");
}
out:
netvsc_devinfo_put(device_info);
return ret;
}
static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
ndc->l4_hash = HV_DEFAULT_L4HASH;
ndc->speed = SPEED_UNKNOWN;
ndc->duplex = DUPLEX_FULL;
dev->features = NETIF_F_LRO;
}
static int netvsc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
struct net_device *vf_netdev;
vf_netdev = rtnl_dereference(ndc->vf_netdev);
if (vf_netdev)
return __ethtool_get_link_ksettings(vf_netdev, cmd);
cmd->base.speed = ndc->speed;
cmd->base.duplex = ndc->duplex;
cmd->base.port = PORT_OTHER;
return 0;
}
static int netvsc_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
if (vf_netdev) {
if (!vf_netdev->ethtool_ops->set_link_ksettings)
return -EOPNOTSUPP;
return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
cmd);
}
return ethtool_virtdev_set_link_ksettings(dev, cmd,
&ndc->speed, &ndc->duplex);
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
int orig_mtu = ndev->mtu;
struct netvsc_device_info *device_info;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
device_info = netvsc_devinfo_get(nvdev);
if (!device_info)
return -ENOMEM;
/* Change MTU of underlying VF netdev first. */
if (vf_netdev) {
ret = dev_set_mtu(vf_netdev, mtu);
if (ret)
goto out;
}
ret = netvsc_detach(ndev, nvdev);
if (ret)
goto rollback_vf;
ndev->mtu = mtu;
ret = netvsc_attach(ndev, device_info);
if (!ret)
goto out;
/* Attempt rollback to original MTU */
ndev->mtu = orig_mtu;
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
rollback_vf:
if (vf_netdev)
dev_set_mtu(vf_netdev, orig_mtu);
out:
netvsc_devinfo_put(device_info);
return ret;
}
static void netvsc_get_vf_stats(struct net_device *net,
struct netvsc_vf_pcpu_stats *tot)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
int i;
memset(tot, 0, sizeof(*tot));
for_each_possible_cpu(i) {
const struct netvsc_vf_pcpu_stats *stats
= per_cpu_ptr(ndev_ctx->vf_stats, i);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
do {
start = u64_stats_fetch_begin(&stats->syncp);
rx_packets = stats->rx_packets;
tx_packets = stats->tx_packets;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
tot->rx_bytes += rx_bytes;
tot->tx_bytes += tx_bytes;
tot->tx_dropped += stats->tx_dropped;
}
}
static void netvsc_get_pcpu_stats(struct net_device *net,
struct netvsc_ethtool_pcpu_stats *pcpu_tot)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
int i;
/* fetch percpu stats of vf */
for_each_possible_cpu(i) {
const struct netvsc_vf_pcpu_stats *stats =
per_cpu_ptr(ndev_ctx->vf_stats, i);
struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&stats->syncp);
this_tot->vf_rx_packets = stats->rx_packets;
this_tot->vf_tx_packets = stats->tx_packets;
this_tot->vf_rx_bytes = stats->rx_bytes;
this_tot->vf_tx_bytes = stats->tx_bytes;
} while (u64_stats_fetch_retry(&stats->syncp, start));
this_tot->rx_packets = this_tot->vf_rx_packets;
this_tot->tx_packets = this_tot->vf_tx_packets;
this_tot->rx_bytes = this_tot->vf_rx_bytes;
this_tot->tx_bytes = this_tot->vf_tx_bytes;
}
/* fetch percpu stats of netvsc */
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
const struct netvsc_stats_tx *tx_stats;
const struct netvsc_stats_rx *rx_stats;
struct netvsc_ethtool_pcpu_stats *this_tot =
&pcpu_tot[nvchan->channel->target_cpu];
u64 packets, bytes;
unsigned int start;
tx_stats = &nvchan->tx_stats;
do {
start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
rx_stats = &nvchan->rx_stats;
do {
start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
}
}
static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_device *nvdev;
struct netvsc_vf_pcpu_stats vf_tot;
int i;
rcu_read_lock();
nvdev = rcu_dereference(ndev_ctx->nvdev);
if (!nvdev)
goto out;
netdev_stats_to_stats64(t, &net->stats);
netvsc_get_vf_stats(net, &vf_tot);
t->rx_packets += vf_tot.rx_packets;
t->tx_packets += vf_tot.tx_packets;
t->rx_bytes += vf_tot.rx_bytes;
t->tx_bytes += vf_tot.tx_bytes;
t->tx_dropped += vf_tot.tx_dropped;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
const struct netvsc_stats_tx *tx_stats;
const struct netvsc_stats_rx *rx_stats;
u64 packets, bytes, multicast;
unsigned int start;
tx_stats = &nvchan->tx_stats;
do {
start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
rx_stats = &nvchan->rx_stats;
do {
start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
multicast = rx_stats->multicast + rx_stats->broadcast;
} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
t->multicast += multicast;
}
out:
rcu_read_unlock();
}
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
struct sockaddr *addr = p;
int err;
err = eth_prepare_mac_addr_change(ndev, p);
if (err)
return err;
if (!nvdev)
return -ENODEV;
if (vf_netdev) {
err = dev_set_mac_address(vf_netdev, addr, NULL);
if (err)
return err;
}
err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
if (!err) {
eth_commit_mac_addr_change(ndev, p);
} else if (vf_netdev) {
/* rollback change on VF */
memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
dev_set_mac_address(vf_netdev, addr, NULL);
}
return err;
}
static const struct {
char name[ETH_GSTRING_LEN];
u16 offset;
} netvsc_stats[] = {
{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
{ "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
}, pcpu_stats[] = {
{ "cpu%u_rx_packets",
offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
{ "cpu%u_rx_bytes",
offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
{ "cpu%u_tx_packets",
offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
{ "cpu%u_tx_bytes",
offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
{ "cpu%u_vf_rx_packets",
offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
{ "cpu%u_vf_rx_bytes",
offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
{ "cpu%u_vf_tx_packets",
offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
{ "cpu%u_vf_tx_bytes",
offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
}, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
{ "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
{ "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
{ "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
};
#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev)
return -ENODEV;
switch (string_set) {
case ETH_SS_STATS:
return NETVSC_GLOBAL_STATS_LEN
+ NETVSC_VF_STATS_LEN
+ NETVSC_QUEUE_STATS_LEN(nvdev)
+ NETVSC_PCPU_STATS_LEN;
default:
return -EINVAL;
}
}
static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
const struct netvsc_stats_tx *tx_stats;
const struct netvsc_stats_rx *rx_stats;
struct netvsc_vf_pcpu_stats sum;
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
u64 xdp_drop;
u64 xdp_redirect;
u64 xdp_tx;
u64 xdp_xmit;
int i, j, cpu;
if (!nvdev)
return;
for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
netvsc_get_vf_stats(dev, &sum);
for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
for (j = 0; j < nvdev->num_chn; j++) {
tx_stats = &nvdev->chan_table[j].tx_stats;
do {
start = u64_stats_fetch_begin(&tx_stats->syncp);
packets = tx_stats->packets;
bytes = tx_stats->bytes;
xdp_xmit = tx_stats->xdp_xmit;
} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_xmit;
rx_stats = &nvdev->chan_table[j].rx_stats;
do {
start = u64_stats_fetch_begin(&rx_stats->syncp);
packets = rx_stats->packets;
bytes = rx_stats->bytes;
xdp_drop = rx_stats->xdp_drop;
xdp_redirect = rx_stats->xdp_redirect;
xdp_tx = rx_stats->xdp_tx;
} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
data[i++] = xdp_redirect;
data[i++] = xdp_tx;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
sizeof(struct netvsc_ethtool_pcpu_stats),
GFP_KERNEL);
if (!pcpu_sum)
return;
netvsc_get_pcpu_stats(dev, pcpu_sum);
for_each_present_cpu(cpu) {
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
data[i++] = *(u64 *)((void *)this_sum
+ pcpu_stats[j].offset);
}
kvfree(pcpu_sum);
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
u8 *p = data;
int i, cpu;
if (!nvdev)
return;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
ethtool_sprintf(&p, netvsc_stats[i].name);
for (i = 0; i < ARRAY_SIZE(vf_stats); i++)
ethtool_sprintf(&p, vf_stats[i].name);
for (i = 0; i < nvdev->num_chn; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
ethtool_sprintf(&p, "rx_queue_%u_packets", i);
ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
}
for_each_present_cpu(cpu) {
for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++)
ethtool_sprintf(&p, pcpu_stats[i].name, cpu);
}
break;
}
}
static int
netvsc_get_rss_hash_opts(struct net_device_context *ndc,
struct ethtool_rxnfc *info)
{
const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
case TCP_V4_FLOW:
if (ndc->l4_hash & HV_TCP4_L4HASH)
info->data |= l4_flag;
break;
case TCP_V6_FLOW:
if (ndc->l4_hash & HV_TCP6_L4HASH)
info->data |= l4_flag;
break;
case UDP_V4_FLOW:
if (ndc->l4_hash & HV_UDP4_L4HASH)
info->data |= l4_flag;
break;
case UDP_V6_FLOW:
if (ndc->l4_hash & HV_UDP6_L4HASH)
info->data |= l4_flag;
break;
case IPV4_FLOW:
case IPV6_FLOW:
break;
default:
info->data = 0;
break;
}
return 0;
}
static int
netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev)
return -ENODEV;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = nvdev->num_chn;
return 0;
case ETHTOOL_GRXFH:
return netvsc_get_rss_hash_opts(ndc, info);
}
return -EOPNOTSUPP;
}
static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
struct ethtool_rxnfc *info)
{
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
switch (info->flow_type) {
case TCP_V4_FLOW:
ndc->l4_hash |= HV_TCP4_L4HASH;
break;
case TCP_V6_FLOW:
ndc->l4_hash |= HV_TCP6_L4HASH;
break;
case UDP_V4_FLOW:
ndc->l4_hash |= HV_UDP4_L4HASH;
break;
case UDP_V6_FLOW:
ndc->l4_hash |= HV_UDP6_L4HASH;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
switch (info->flow_type) {
case TCP_V4_FLOW:
ndc->l4_hash &= ~HV_TCP4_L4HASH;
break;
case TCP_V6_FLOW:
ndc->l4_hash &= ~HV_TCP6_L4HASH;
break;
case UDP_V4_FLOW:
ndc->l4_hash &= ~HV_UDP4_L4HASH;
break;
case UDP_V6_FLOW:
ndc->l4_hash &= ~HV_UDP6_L4HASH;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
return -EOPNOTSUPP;
}
static int
netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
{
struct net_device_context *ndc = netdev_priv(ndev);
if (info->cmd == ETHTOOL_SRXFH)
return netvsc_set_rss_hash_opts(ndc, info);
return -EOPNOTSUPP;
}
static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
{
return NETVSC_HASH_KEYLEN;
}
static u32 netvsc_rss_indir_size(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
return ndc->rx_table_sz;
}
static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
struct rndis_device *rndis_dev;
int i;
if (!ndev)
return -ENODEV;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
indir[i] = ndc->rx_table[i];
}
if (key)
memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
return 0;
}
static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
struct rndis_device *rndis_dev;
int i;
if (!ndev)
return -ENODEV;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
if (indir[i] >= ndev->num_chn)
return -EINVAL;
for (i = 0; i < ndc->rx_table_sz; i++)
ndc->rx_table[i] = indir[i];
}
if (!key) {
if (!indir)
return 0;
key = rndis_dev->rss_key;
}
return rndis_filter_set_rss_param(rndis_dev, key);
}
/* Hyper-V RNDIS protocol does not have ring in the HW sense.
* It does have pre-allocated receive area which is divided into sections.
*/
static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
struct ethtool_ringparam *ring)
{
u32 max_buf_size;
ring->rx_pending = nvdev->recv_section_cnt;
ring->tx_pending = nvdev->send_section_cnt;
if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
/ nvdev->send_section_size;
}
static void netvsc_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
if (!nvdev)
return;
__netvsc_get_ringparam(nvdev, ring);
}
static int netvsc_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct netvsc_device_info *device_info;
struct ethtool_ringparam orig;
u32 new_tx, new_rx;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
memset(&orig, 0, sizeof(orig));
__netvsc_get_ringparam(nvdev, &orig);
new_tx = clamp_t(u32, ring->tx_pending,
NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
new_rx = clamp_t(u32, ring->rx_pending,
NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
if (new_tx == orig.tx_pending &&
new_rx == orig.rx_pending)
return 0; /* no change */
device_info = netvsc_devinfo_get(nvdev);
if (!device_info)
return -ENOMEM;
device_info->send_sections = new_tx;
device_info->recv_sections = new_rx;
ret = netvsc_detach(ndev, nvdev);
if (ret)
goto out;
ret = netvsc_attach(ndev, device_info);
if (ret) {
device_info->send_sections = orig.tx_pending;
device_info->recv_sections = orig.rx_pending;
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring ringparam failed");
}
out:
netvsc_devinfo_put(device_info);
return ret;
}
static netdev_features_t netvsc_fix_features(struct net_device *ndev,
netdev_features_t features)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
if (!nvdev || nvdev->destroy)
return features;
if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
features ^= NETIF_F_LRO;
netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
}
return features;
}
static int netvsc_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t change = features ^ ndev->features;
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct ndis_offload_params offloads;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
if (!(change & NETIF_F_LRO))
goto syncvf;
memset(&offloads, 0, sizeof(struct ndis_offload_params));
if (features & NETIF_F_LRO) {
offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
} else {
offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
}
ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
if (ret) {
features ^= NETIF_F_LRO;
ndev->features = features;
}
syncvf:
if (!vf_netdev)
return ret;
vf_netdev->wanted_features = features;
netdev_update_features(vf_netdev);
return ret;
}
static int netvsc_get_regs_len(struct net_device *netdev)
{
return VRSS_SEND_TAB_SIZE * sizeof(u32);
}
static void netvsc_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct net_device_context *ndc = netdev_priv(netdev);
u32 *regs_buff = p;
/* increase the version, if buffer format is changed. */
regs->version = 1;
memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
}
static u32 netvsc_get_msglevel(struct net_device *ndev)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
return ndev_ctx->msg_enable;
}
static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
ndev_ctx->msg_enable = val;
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_regs_len = netvsc_get_regs_len,
.get_regs = netvsc_get_regs,
.get_msglevel = netvsc_get_msglevel,
.set_msglevel = netvsc_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ethtool_stats = netvsc_get_ethtool_stats,
.get_sset_count = netvsc_get_sset_count,
.get_strings = netvsc_get_strings,
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = netvsc_get_rxnfc,
.set_rxnfc = netvsc_set_rxnfc,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
.set_rxfh = netvsc_set_rxfh,
.get_link_ksettings = netvsc_get_link_ksettings,
.set_link_ksettings = netvsc_set_link_ksettings,
.get_ringparam = netvsc_get_ringparam,
.set_ringparam = netvsc_set_ringparam,
};
static const struct net_device_ops device_ops = {
.ndo_open = netvsc_open,
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
.ndo_fix_features = netvsc_fix_features,
.ndo_set_features = netvsc_set_features,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
.ndo_bpf = netvsc_bpf,
.ndo_xdp_xmit = netvsc_ndoxdp_xmit,
};
/*
* Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
* down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
* present send GARP packet to network peers with netif_notify_peers().
*/
static void netvsc_link_change(struct work_struct *w)
{
struct net_device_context *ndev_ctx =
container_of(w, struct net_device_context, dwork.work);
struct hv_device *device_obj = ndev_ctx->device_ctx;
struct net_device *net = hv_get_drvdata(device_obj);
unsigned long flags, next_reconfig, delay;
struct netvsc_reconfig *event = NULL;
struct netvsc_device *net_device;
struct rndis_device *rdev;
bool reschedule = false;
/* if changes are happening, comeback later */
if (!rtnl_trylock()) {
schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
return;
}
net_device = rtnl_dereference(ndev_ctx->nvdev);
if (!net_device)
goto out_unlock;
rdev = net_device->extension;
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
if (time_is_after_jiffies(next_reconfig)) {
/* link_watch only sends one notification with current state
* per second, avoid doing reconfig more frequently. Handle
* wrap around.
*/
delay = next_reconfig - jiffies;
delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
schedule_delayed_work(&ndev_ctx->dwork, delay);
goto out_unlock;
}
ndev_ctx->last_reconfig = jiffies;
spin_lock_irqsave(&ndev_ctx->lock, flags);
if (!list_empty(&ndev_ctx->reconfig_events)) {
event = list_first_entry(&ndev_ctx->reconfig_events,
struct netvsc_reconfig, list);
list_del(&event->list);
reschedule = !list_empty(&ndev_ctx->reconfig_events);
}
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
if (!event)
goto out_unlock;
switch (event->event) {
/* Only the following events are possible due to the check in
* netvsc_linkstatus_callback()
*/
case RNDIS_STATUS_MEDIA_CONNECT:
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
netvsc_tx_enable(net_device, net);
} else {
__netdev_notify_peers(net);
}
kfree(event);
break;
case RNDIS_STATUS_MEDIA_DISCONNECT:
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
case RNDIS_STATUS_NETWORK_CHANGE:
/* Only makes sense if carrier is present */
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
reschedule = true;
}
break;
}
rtnl_unlock();
/* link_watch only sends one notification with current state per
* second, handle next reconfig event in 2 seconds.
*/
if (reschedule)
schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
return;
out_unlock:
rtnl_unlock();
}
static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
struct net_device *dev;
dev = netdev_master_upper_dev_get(vf_netdev);
if (!dev || dev->netdev_ops != &device_ops)
return NULL; /* not a netvsc device */
net_device_ctx = netdev_priv(dev);
if (!rtnl_dereference(net_device_ctx->nvdev))
return NULL; /* device is removed */
return dev;
}
/* Called when VF is injecting data into network stack.
* Change the associated network device from VF to netvsc.
* note: already called with rcu_read_lock
*/
static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_vf_pcpu_stats *pcpu_stats
= this_cpu_ptr(ndev_ctx->vf_stats);
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
*pskb = skb;
skb->dev = ndev;
u64_stats_update_begin(&pcpu_stats->syncp);
pcpu_stats->rx_packets++;
pcpu_stats->rx_bytes += skb->len;
u64_stats_update_end(&pcpu_stats->syncp);
return RX_HANDLER_ANOTHER;
}
static int netvsc_vf_join(struct net_device *vf_netdev,
struct net_device *ndev)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
int ret;
ret = netdev_rx_handler_register(vf_netdev,
netvsc_vf_handle_frame, ndev);
if (ret != 0) {
netdev_err(vf_netdev,
"can not register netvsc VF receive handler (err = %d)\n",
ret);
goto rx_handler_failed;
}
ret = netdev_master_upper_dev_link(vf_netdev, ndev,
NULL, NULL, NULL);
if (ret != 0) {
netdev_err(vf_netdev,
"can not set master device %s (err = %d)\n",
ndev->name, ret);
goto upper_link_failed;
}
/* set slave flag before open to prevent IPv6 addrconf */
vf_netdev->flags |= IFF_SLAVE;
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
netdev_info(vf_netdev, "joined to %s\n", ndev->name);
return 0;
upper_link_failed:
netdev_rx_handler_unregister(vf_netdev);
rx_handler_failed:
return ret;
}
static void __netvsc_vf_setup(struct net_device *ndev,
struct net_device *vf_netdev)
{
int ret;
/* Align MTU of VF with master */
ret = dev_set_mtu(vf_netdev, ndev->mtu);
if (ret)
netdev_warn(vf_netdev,
"unable to change mtu to %u\n", ndev->mtu);
/* set multicast etc flags on VF */
dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
/* sync address list from ndev to VF */
netif_addr_lock_bh(ndev);
dev_uc_sync(vf_netdev, ndev);
dev_mc_sync(vf_netdev, ndev);
netif_addr_unlock_bh(ndev);
if (netif_running(ndev)) {
ret = dev_open(vf_netdev, NULL);
if (ret)
netdev_warn(vf_netdev,
"unable to open: %d\n", ret);
}
}
/* Setup VF as slave of the synthetic device.
* Runs in workqueue to avoid recursion in netlink callbacks.
*/
static void netvsc_vf_setup(struct work_struct *w)
{
struct net_device_context *ndev_ctx
= container_of(w, struct net_device_context, vf_takeover.work);
struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
struct net_device *vf_netdev;
if (!rtnl_trylock()) {
schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
return;
}
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
if (vf_netdev)
__netvsc_vf_setup(ndev, vf_netdev);
rtnl_unlock();
}
/* Find netvsc by VF serial number.
* The PCI hyperv controller records the serial number as the slot kobj name.
*/
static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
struct net_device *ndev;
struct pci_dev *pdev;
u32 serial;
if (!parent || !dev_is_pci(parent))
return NULL; /* not a PCI device */
pdev = to_pci_dev(parent);
if (!pdev->slot) {
netdev_notice(vf_netdev, "no PCI slot information\n");
return NULL;
}
if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
pci_slot_name(pdev->slot));
return NULL;
}
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
if (!ndev_ctx->vf_alloc)
continue;
if (ndev_ctx->vf_serial != serial)
continue;
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (ndev->addr_len != vf_netdev->addr_len ||
memcmp(ndev->perm_addr, vf_netdev->perm_addr,
ndev->addr_len) != 0)
continue;
return ndev;
}
/* Fallback path to check synthetic vf with
* help of mac addr
*/
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
netdev_notice(vf_netdev,
"falling back to mac addr based matching\n");
return ndev;
}
}
netdev_notice(vf_netdev,
"no netdev found for vf serial:%u\n", serial);
return NULL;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct bpf_prog *prog;
struct net_device *ndev;
int ret;
if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
ndev = get_netvsc_byslot(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
return NOTIFY_DONE;
/* if synthetic interface is a different namespace,
* then move the VF to that namespace; join will be
* done again in that context.
*/
if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
ret = dev_change_net_namespace(vf_netdev,
dev_net(ndev), "eth%d");
if (ret)
netdev_err(vf_netdev,
"could not move to same namespace as %s: %d\n",
ndev->name, ret);
else
netdev_info(vf_netdev,
"VF moved to namespace with: %s\n",
ndev->name);
return NOTIFY_DONE;
}
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
if (netvsc_vf_join(vf_netdev, ndev) != 0)
return NOTIFY_DONE;
dev_hold(vf_netdev);
rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
if (ndev->needed_headroom < vf_netdev->needed_headroom)
ndev->needed_headroom = vf_netdev->needed_headroom;
vf_netdev->wanted_features = ndev->features;
netdev_update_features(vf_netdev);
prog = netvsc_xdp_get(netvsc_dev);
netvsc_vf_setxdp(vf_netdev, prog);
return NOTIFY_OK;
}
/* Change the data path when VF UP/DOWN/CHANGE are detected.
*
* Typically a UP or DOWN event is followed by a CHANGE event, so
* net_device_ctx->data_path_is_vf is used to cache the current data path
* to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
* message.
*
* During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
* interface, there is only the CHANGE event and no UP or DOWN event.
*/
static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
bool vf_is_up = false;
int ret;
if (event != NETDEV_GOING_DOWN)
vf_is_up = netif_running(vf_netdev);
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
if (!netvsc_dev)
return NOTIFY_DONE;
if (net_device_ctx->data_path_is_vf == vf_is_up)
return NOTIFY_OK;
if (vf_is_up && !net_device_ctx->vf_alloc) {
netdev_info(ndev, "Waiting for the VF association from host\n");
wait_for_completion(&net_device_ctx->vf_add);
}
ret = netvsc_switch_datapath(ndev, vf_is_up);
if (ret) {
netdev_err(ndev,
"Data path failed to switch %s VF: %s, err: %d\n",
vf_is_up ? "to" : "from", vf_netdev->name, ret);
return NOTIFY_DONE;
} else {
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
}
return NOTIFY_OK;
}
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct net_device_context *net_device_ctx;
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
netvsc_vf_setxdp(vf_netdev, NULL);
reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
dev_put(vf_netdev);
ndev->needed_headroom = RNDIS_AND_PPI_SIZE;
return NOTIFY_OK;
}
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
int ret = -ENOMEM;
net = alloc_etherdev_mq(sizeof(struct net_device_context),
VRSS_CHANNEL_MAX);
if (!net)
goto no_net;
netif_carrier_off(net);
netvsc_init_settings(net);
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
if (netif_msg_probe(net_device_ctx))
netdev_dbg(net, "netvsc msg_enable: %d\n",
net_device_ctx->msg_enable);
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
net_device_ctx->vf_stats
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
if (!net_device_ctx->vf_stats)
goto no_stats;
net->netdev_ops = &device_ops;
net->ethtool_ops = ðtool_ops;
SET_NETDEV_DEV(net, &dev->device);
dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
/* We always need headroom for rndis header */
net->needed_headroom = RNDIS_AND_PPI_SIZE;
/* Initialize the number of queues to be 1, we may change it if more
* channels are offered later.
*/
netif_set_real_num_tx_queues(net, 1);
netif_set_real_num_rx_queues(net, 1);
/* Notify the netvsc driver of the new device */
device_info = netvsc_devinfo_get(NULL);
if (!device_info) {
ret = -ENOMEM;
goto devinfo_failed;
}
nvdev = rndis_filter_device_add(dev, device_info);
if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev);
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
goto rndis_failed;
}
eth_hw_addr_set(net, device_info->mac_adr);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
*/
rtnl_lock();
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
/* hw_features computed in rndis_netdev_set_hwcaps() */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
netdev_lockdep_set_classes(net);
net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
net->max_mtu = NETVSC_MTU - ETH_HLEN;
else
net->max_mtu = ETH_DATA_LEN;
nvdev->tx_disable = false;
ret = register_netdevice(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
goto register_failed;
}
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
netvsc_devinfo_put(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
hv_set_drvdata(dev, NULL);
free_netdev(net);
no_net:
return ret;
}
static void netvsc_remove(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
struct net_device *vf_netdev, *net;
struct netvsc_device *nvdev;
net = hv_get_drvdata(dev);
if (net == NULL) {
dev_err(&dev->device, "No net device to remove\n");
return;
}
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
netvsc_xdp_set(net, NULL, NULL, nvdev);
}
/*
* Call to the vsc driver to let it know that the device is being
* removed. Also blocks mtu and channel changes.
*/
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
if (vf_netdev)
netvsc_unregister_vf(vf_netdev);
if (nvdev)
rndis_filter_device_remove(dev, nvdev);
unregister_netdevice(net);
list_del(&ndev_ctx->list);
rtnl_unlock();
hv_set_drvdata(dev, NULL);
free_percpu(ndev_ctx->vf_stats);
free_netdev(net);
}
static int netvsc_suspend(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
struct netvsc_device *nvdev;
struct net_device *net;
int ret;
net = hv_get_drvdata(dev);
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev == NULL) {
ret = -ENODEV;
goto out;
}
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
if (!ndev_ctx->saved_netvsc_dev_info) {
ret = -ENOMEM;
goto out;
}
ret = netvsc_detach(net, nvdev);
out:
rtnl_unlock();
return ret;
}
static int netvsc_resume(struct hv_device *dev)
{
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info;
int ret;
rtnl_lock();
net_device_ctx = netdev_priv(net);
/* Reset the data path to the netvsc NIC before re-opening the vmbus
* channel. Later netvsc_netdev_event() will switch the data path to
* the VF upon the UP or CHANGE event.
*/
net_device_ctx->data_path_is_vf = false;
device_info = net_device_ctx->saved_netvsc_dev_info;
ret = netvsc_attach(net, device_info);
netvsc_devinfo_put(device_info);
net_device_ctx->saved_netvsc_dev_info = NULL;
rtnl_unlock();
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
{ HV_NIC_GUID, },
{ },
};
MODULE_DEVICE_TABLE(vmbus, id_table);
/* The one and only one */
static struct hv_driver netvsc_drv = {
.name = KBUILD_MODNAME,
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
.suspend = netvsc_suspend,
.resume = netvsc_resume,
.driver = {
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
* to the guest. When the corresponding VF instance is registered,
* we will take care of switching the data path.
*/
static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
/* Skip our own events */
if (event_dev->netdev_ops == &device_ops)
return NOTIFY_DONE;
/* Avoid non-Ethernet type devices */
if (event_dev->type != ARPHRD_ETHER)
return NOTIFY_DONE;
/* Avoid Vlan dev with same MAC registering as VF */
if (is_vlan_dev(event_dev))
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
if (netif_is_bond_master(event_dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
case NETDEV_UNREGISTER:
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_CHANGE:
case NETDEV_GOING_DOWN:
return netvsc_vf_changed(event_dev, event);
default:
return NOTIFY_DONE;
}
}
static struct notifier_block netvsc_netdev_notifier = {
.notifier_call = netvsc_netdev_event,
};
static void __exit netvsc_drv_exit(void)
{
unregister_netdevice_notifier(&netvsc_netdev_notifier);
vmbus_driver_unregister(&netvsc_drv);
}
static int __init netvsc_drv_init(void)
{
int ret;
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
return ret;
register_netdevice_notifier(&netvsc_netdev_notifier);
return 0;
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
module_init(netvsc_drv_init);
module_exit(netvsc_drv_exit);
| linux-master | drivers/net/hyperv/netvsc_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019, Microsoft Corporation.
*
* Author:
* Haiyang Zhang <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/kernel.h>
#include <net/xdp.h>
#include <linux/mutex.h>
#include <linux/rtnetlink.h>
#include "hyperv_net.h"
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
void *data = nvchan->rsc.data[0];
u32 len = nvchan->rsc.len[0];
struct page *page = NULL;
struct bpf_prog *prog;
u32 act = XDP_PASS;
bool drop = true;
xdp->data_hard_start = NULL;
rcu_read_lock();
prog = rcu_dereference(nvchan->bpf_prog);
if (!prog)
goto out;
/* Ensure that the below memcpy() won't overflow the page buffer. */
if (len > ndev->mtu + ETH_HLEN) {
act = XDP_DROP;
goto out;
}
/* allocate page buffer for data */
page = alloc_page(GFP_ATOMIC);
if (!page) {
act = XDP_DROP;
goto out;
}
xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
memcpy(xdp->data, data, len);
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
case XDP_TX:
drop = false;
break;
case XDP_DROP:
break;
case XDP_REDIRECT:
if (!xdp_do_redirect(ndev, xdp, prog)) {
nvchan->xdp_flush = true;
drop = false;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_redirect++;
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
u64_stats_update_end(&rx_stats->syncp);
break;
} else {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
u64_stats_update_end(&rx_stats->syncp);
}
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
}
out:
rcu_read_unlock();
if (page && drop) {
__free_page(page);
xdp->data_hard_start = NULL;
}
return act;
}
unsigned int netvsc_xdp_fraglen(unsigned int len)
{
return SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
{
return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
}
int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack,
struct netvsc_device *nvdev)
{
struct bpf_prog *old_prog;
int buf_max, i;
old_prog = netvsc_xdp_get(nvdev);
if (!old_prog && !prog)
return 0;
buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
if (prog && buf_max > PAGE_SIZE) {
netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
dev->mtu, buf_max);
NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
return -EOPNOTSUPP;
}
if (prog && (dev->features & NETIF_F_LRO)) {
netdev_err(dev, "XDP: not support LRO\n");
NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
return -EOPNOTSUPP;
}
if (prog)
bpf_prog_add(prog, nvdev->num_chn - 1);
for (i = 0; i < nvdev->num_chn; i++)
rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
if (old_prog)
for (i = 0; i < nvdev->num_chn; i++)
bpf_prog_put(old_prog);
return 0;
}
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
int ret;
ASSERT_RTNL();
if (!vf_netdev)
return 0;
if (!vf_netdev->netdev_ops->ndo_bpf)
return 0;
memset(&xdp, 0, sizeof(xdp));
if (prog)
bpf_prog_inc(prog);
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
return ret;
}
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct net_device_context *ndevctx = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct netlink_ext_ack *extack = bpf->extack;
int ret;
if (!nvdev || nvdev->destroy) {
return -ENODEV;
}
switch (bpf->command) {
case XDP_SETUP_PROG:
ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
if (ret)
return ret;
ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
if (ret) {
netdev_err(dev, "vf_setxdp failed:%d\n", ret);
NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
netvsc_xdp_set(dev, NULL, extack, nvdev);
}
return ret;
default:
return -EINVAL;
}
}
static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
struct xdp_frame *frame, u16 q_idx)
{
struct sk_buff *skb;
skb = xdp_build_skb_from_frame(frame, ndev);
if (unlikely(!skb))
return -ENOMEM;
netvsc_get_hash(skb, netdev_priv(ndev));
skb_record_rx_queue(skb, q_idx);
netvsc_xdp_xmit(skb, ndev);
return 0;
}
int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
const struct net_device_ops *vf_ops;
struct netvsc_stats_tx *tx_stats;
struct netvsc_device *nvsc_dev;
struct net_device *vf_netdev;
int i, count = 0;
u16 q_idx;
/* Don't transmit if netvsc_device is gone */
nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
if (unlikely(!nvsc_dev || nvsc_dev->destroy))
return 0;
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
* If netpoll is in uses, then VF can not be used either.
*/
vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
vf_netdev->netdev_ops->ndo_xdp_xmit &&
ndev_ctx->data_path_is_vf) {
vf_ops = vf_netdev->netdev_ops;
return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
}
q_idx = smp_processor_id() % ndev->real_num_tx_queues;
for (i = 0; i < n; i++) {
if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
break;
count++;
}
tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->xdp_xmit += count;
u64_stats_update_end(&tx_stats->syncp);
return count;
}
| linux-master | drivers/net/hyperv/netvsc_bpf.c |
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/netdevice.h>
#include "hyperv_net.h"
#define CREATE_TRACE_POINTS
#include "netvsc_trace.h"
| linux-master | drivers/net/hyperv/netvsc_trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* Authors:
* Haiyang Zhang <[email protected]>
* Hank Janssen <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <linux/filter.h>
#include <asm/sync_bitops.h>
#include <asm/mshyperv.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
/*
* Switch the data path from the synthetic interface to the VF
* interface.
*/
int netvsc_switch_datapath(struct net_device *ndev, bool vf)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
int ret, retry = 0;
/* Block sending traffic to VF if it's about to be gone */
if (!vf)
net_device_ctx->data_path_is_vf = vf;
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
if (vf)
init_pkt->msg.v4_msg.active_dp.active_datapath =
NVSP_DATAPATH_VF;
else
init_pkt->msg.v4_msg.active_dp.active_datapath =
NVSP_DATAPATH_SYNTHETIC;
again:
trace_nvsp_send(ndev, init_pkt);
ret = vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
(unsigned long)init_pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
/* If failed to switch to/from VF, let data_path_is_vf stay false,
* so we use synthetic path to send data.
*/
if (ret) {
if (ret != -EAGAIN) {
netdev_err(ndev,
"Unable to send sw datapath msg, err: %d\n",
ret);
return ret;
}
if (retry++ < RETRY_MAX) {
usleep_range(RETRY_US_LO, RETRY_US_HI);
goto again;
} else {
netdev_err(
ndev,
"Retry failed to send sw datapath msg, err: %d\n",
ret);
return ret;
}
}
wait_for_completion(&nv_dev->channel_init_wait);
net_device_ctx->data_path_is_vf = vf;
return 0;
}
/* Worker to setup sub channels on initial setup
* Initial hotplug event occurs in softirq context
* and can't wait for channels.
*/
static void netvsc_subchan_work(struct work_struct *w)
{
struct netvsc_device *nvdev =
container_of(w, struct netvsc_device, subchan_work);
struct rndis_device *rdev;
int i, ret;
/* Avoid deadlock with device removal already under RTNL */
if (!rtnl_trylock()) {
schedule_work(w);
return;
}
rdev = nvdev->extension;
if (rdev) {
ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
if (ret == 0) {
netif_device_attach(rdev->ndev);
} else {
/* fallback to only primary channel */
for (i = 1; i < nvdev->num_chn; i++)
netif_napi_del(&nvdev->chan_table[i].napi);
nvdev->max_chn = 1;
nvdev->num_chn = 1;
}
}
rtnl_unlock();
}
static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
if (!net_device)
return NULL;
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
net_device->tx_disable = true;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open);
INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
return net_device;
}
static void free_netvsc_device(struct rcu_head *head)
{
struct netvsc_device *nvdev
= container_of(head, struct netvsc_device, rcu);
int i;
kfree(nvdev->extension);
vfree(nvdev->recv_buf);
vfree(nvdev->send_buf);
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
kfree(nvdev->chan_table[i].recv_buf);
vfree(nvdev->chan_table[i].mrc.slots);
}
kfree(nvdev);
}
static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
{
call_rcu(&nvdev->rcu, free_netvsc_device);
}
static void netvsc_revoke_recv_buf(struct hv_device *device,
struct netvsc_device *net_device,
struct net_device *ndev)
{
struct nvsp_message *revoke_packet;
int ret;
/*
* If we got a section count, it means we received a
* SendReceiveBufferComplete msg (ie sent
* NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
* to send a revoke msg here
*/
if (net_device->recv_section_cnt) {
/* Send the revoke receive buffer */
revoke_packet = &net_device->revoke_packet;
memset(revoke_packet, 0, sizeof(struct nvsp_message));
revoke_packet->hdr.msg_type =
NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
revoke_packet->msg.v1_msg.
revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
trace_nvsp_send(ndev, revoke_packet);
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
* ignore the failure since we cannot send on a rescinded
* channel. This would allow us to properly cleanup
* even when the channel is rescinded.
*/
if (device->channel->rescind)
ret = 0;
/*
* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
*/
if (ret != 0) {
netdev_err(ndev, "unable to send "
"revoke receive buffer to netvsp\n");
return;
}
net_device->recv_section_cnt = 0;
}
}
static void netvsc_revoke_send_buf(struct hv_device *device,
struct netvsc_device *net_device,
struct net_device *ndev)
{
struct nvsp_message *revoke_packet;
int ret;
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
* NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
* NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
* to send a revoke msg here
*/
if (net_device->send_section_cnt) {
/* Send the revoke receive buffer */
revoke_packet = &net_device->revoke_packet;
memset(revoke_packet, 0, sizeof(struct nvsp_message));
revoke_packet->hdr.msg_type =
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
revoke_packet->msg.v1_msg.revoke_send_buf.id =
NETVSC_SEND_BUFFER_ID;
trace_nvsp_send(ndev, revoke_packet);
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
* ignore the failure since we cannot send on a rescinded
* channel. This would allow us to properly cleanup
* even when the channel is rescinded.
*/
if (device->channel->rescind)
ret = 0;
/* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
*/
if (ret != 0) {
netdev_err(ndev, "unable to send "
"revoke send buffer to netvsp\n");
return;
}
net_device->send_section_cnt = 0;
}
}
static void netvsc_teardown_recv_gpadl(struct hv_device *device,
struct netvsc_device *net_device,
struct net_device *ndev)
{
int ret;
if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
&net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
*/
if (ret != 0) {
netdev_err(ndev,
"unable to teardown receive buffer's gpadl\n");
return;
}
}
}
static void netvsc_teardown_send_gpadl(struct hv_device *device,
struct netvsc_device *net_device,
struct net_device *ndev)
{
int ret;
if (net_device->send_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
&net_device->send_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
*/
if (ret != 0) {
netdev_err(ndev,
"unable to teardown send buffer's gpadl\n");
return;
}
}
}
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
{
struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
int node = cpu_to_node(nvchan->channel->target_cpu);
size_t size;
size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
nvchan->mrc.slots = vzalloc_node(size, node);
if (!nvchan->mrc.slots)
nvchan->mrc.slots = vzalloc(size);
return nvchan->mrc.slots ? 0 : -ENOMEM;
}
static int netvsc_init_buf(struct hv_device *device,
struct netvsc_device *net_device,
const struct netvsc_device_info *device_info)
{
struct nvsp_1_message_send_receive_buffer_complete *resp;
struct net_device *ndev = hv_get_drvdata(device);
struct nvsp_message *init_packet;
unsigned int buf_size;
int i, ret = 0;
/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
buf_size = roundup(buf_size, PAGE_SIZE);
/* Legacy hosts only allow smaller receive buffer */
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
buf_size = min_t(unsigned int, buf_size,
NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
net_device->recv_buf = vzalloc(buf_size);
if (!net_device->recv_buf) {
netdev_err(ndev,
"unable to allocate receive buffer of size %u\n",
buf_size);
ret = -ENOMEM;
goto cleanup;
}
net_device->recv_buf_size = buf_size;
/*
* Establish the gpadl handle for this buffer on this
* channel. Note: This call uses the vmbus connection rather
* than the channel to establish the gpadl handle.
*/
ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
buf_size,
&net_device->recv_buf_gpadl_handle);
if (ret != 0) {
netdev_err(ndev,
"unable to establish receive buffer's gpadl\n");
goto cleanup;
}
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
init_packet->msg.v1_msg.send_recv_buf.
gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.
send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
trace_nvsp_send(ndev, init_packet);
/* Send the gpadl notification request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
netdev_err(ndev,
"unable to send receive buffer's gpadl to netvsp\n");
goto cleanup;
}
wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
if (resp->status != NVSP_STAT_SUCCESS) {
netdev_err(ndev,
"Unable to complete receive buffer initialization with NetVsp - status %d\n",
resp->status);
ret = -EINVAL;
goto cleanup;
}
/* Parse the response */
netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
resp->num_sections, resp->sections[0].sub_alloc_size,
resp->sections[0].num_sub_allocs);
/* There should only be one section for the entire receive buffer */
if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
ret = -EINVAL;
goto cleanup;
}
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
/* Ensure buffer will not overflow */
if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
(u64)net_device->recv_section_cnt > (u64)buf_size) {
netdev_err(ndev, "invalid recv_section_size %u\n",
net_device->recv_section_size);
ret = -EINVAL;
goto cleanup;
}
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
struct netvsc_channel *nvchan = &net_device->chan_table[i];
nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
if (nvchan->recv_buf == NULL) {
ret = -ENOMEM;
goto cleanup;
}
}
/* Setup receive completion ring.
* Add 1 to the recv_section_cnt because at least one entry in a
* ring buffer has to be empty.
*/
net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
ret = netvsc_alloc_recv_comp_ring(net_device, 0);
if (ret)
goto cleanup;
/* Now setup the send buffer. */
buf_size = device_info->send_sections * device_info->send_section_size;
buf_size = round_up(buf_size, PAGE_SIZE);
net_device->send_buf = vzalloc(buf_size);
if (!net_device->send_buf) {
netdev_err(ndev, "unable to allocate send buffer of size %u\n",
buf_size);
ret = -ENOMEM;
goto cleanup;
}
net_device->send_buf_size = buf_size;
/* Establish the gpadl handle for this buffer on this
* channel. Note: This call uses the vmbus connection rather
* than the channel to establish the gpadl handle.
*/
ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
buf_size,
&net_device->send_buf_gpadl_handle);
if (ret != 0) {
netdev_err(ndev,
"unable to establish send buffer's gpadl\n");
goto cleanup;
}
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
net_device->send_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
trace_nvsp_send(ndev, init_packet);
/* Send the gpadl notification request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
netdev_err(ndev,
"unable to send send buffer's gpadl to netvsp\n");
goto cleanup;
}
wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
netdev_err(ndev, "Unable to complete send buffer "
"initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
send_send_buf_complete.status);
ret = -EINVAL;
goto cleanup;
}
/* Parse the response */
net_device->send_section_size = init_packet->msg.
v1_msg.send_send_buf_complete.section_size;
if (net_device->send_section_size < NETVSC_MTU_MIN) {
netdev_err(ndev, "invalid send_section_size %u\n",
net_device->send_section_size);
ret = -EINVAL;
goto cleanup;
}
/* Section count is simply the size divided by the section size. */
net_device->send_section_cnt = buf_size / net_device->send_section_size;
netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
net_device->send_section_size, net_device->send_section_cnt);
/* Setup state for managing the send buffer. */
net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
GFP_KERNEL);
if (!net_device->send_section_map) {
ret = -ENOMEM;
goto cleanup;
}
goto exit;
cleanup:
netvsc_revoke_recv_buf(device, net_device, ndev);
netvsc_revoke_send_buf(device, net_device, ndev);
netvsc_teardown_recv_gpadl(device, net_device, ndev);
netvsc_teardown_send_gpadl(device, net_device, ndev);
exit:
return ret;
}
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
struct netvsc_device *net_device,
struct nvsp_message *init_packet,
u32 nvsp_ver)
{
struct net_device *ndev = hv_get_drvdata(device);
int ret;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
trace_nvsp_send(ndev, init_packet);
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
return ret;
wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.init_msg.init_complete.status !=
NVSP_STAT_SUCCESS)
return -EINVAL;
if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
/* NVSPv2 or later: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
if (hv_is_isolation_supported())
netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
else
init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
/* Teaming bit is needed to receive link speed updates */
init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
}
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
trace_nvsp_send(ndev, init_packet);
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
return ret;
}
static int netvsc_connect_vsp(struct hv_device *device,
struct netvsc_device *net_device,
const struct netvsc_device_info *device_info)
{
struct net_device *ndev = hv_get_drvdata(device);
static const u32 ver_list[] = {
NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
};
struct nvsp_message *init_packet;
int ndis_version, i, ret;
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
if (negotiate_nvsp_ver(device, net_device, init_packet,
ver_list[i]) == 0) {
net_device->nvsp_version = ver_list[i];
break;
}
if (i < 0) {
ret = -EPROTO;
goto cleanup;
}
if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
ret = -EPROTO;
goto cleanup;
}
pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
ndis_version = 0x00060001;
else
ndis_version = 0x0006001e;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
send_ndis_ver.ndis_major_ver =
(ndis_version & 0xFFFF0000) >> 16;
init_packet->msg.v1_msg.
send_ndis_ver.ndis_minor_ver =
ndis_version & 0xFFFF;
trace_nvsp_send(ndev, init_packet);
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
if (ret != 0)
goto cleanup;
ret = netvsc_init_buf(device, net_device, device_info);
cleanup:
return ret;
}
/*
* netvsc_device_remove - Callback when the root bus device is removed
*/
void netvsc_device_remove(struct hv_device *device)
{
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
= rtnl_dereference(net_device_ctx->nvdev);
int i;
/*
* Revoke receive buffer. If host is pre-Win2016 then tear down
* receive buffer GPADL. Do the same for send buffer.
*/
netvsc_revoke_recv_buf(device, net_device, ndev);
if (vmbus_proto_version < VERSION_WIN10)
netvsc_teardown_recv_gpadl(device, net_device, ndev);
netvsc_revoke_send_buf(device, net_device, ndev);
if (vmbus_proto_version < VERSION_WIN10)
netvsc_teardown_send_gpadl(device, net_device, ndev);
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
/* Disable NAPI and disassociate its context from the device. */
for (i = 0; i < net_device->num_chn; i++) {
/* See also vmbus_reset_channel_cb(). */
napi_disable(&net_device->chan_table[i].napi);
netif_napi_del(&net_device->chan_table[i].napi);
}
/*
* At this point, no one should be accessing net_device
* except in here
*/
netdev_dbg(ndev, "net device safe to remove\n");
/* Now, we can close the channel safely */
vmbus_close(device->channel);
/*
* If host is Win2016 or higher then we do the GPADL tear down
* here after VMBus is closed.
*/
if (vmbus_proto_version >= VERSION_WIN10) {
netvsc_teardown_recv_gpadl(device, net_device, ndev);
netvsc_teardown_send_gpadl(device, net_device, ndev);
}
/* Release all resources */
free_netvsc_device_rcu(net_device);
}
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
u32 index)
{
sync_change_bit(index, net_device->send_section_map);
}
static void netvsc_send_tx_complete(struct net_device *ndev,
struct netvsc_device *net_device,
struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc,
int budget)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct sk_buff *skb;
u16 q_idx = 0;
int queue_sends;
u64 cmd_rqst;
cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
skb = (struct sk_buff *)(unsigned long)cmd_rqst;
/* Notify the layer above us */
if (likely(skb)) {
struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
struct netvsc_stats_tx *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
q_idx = packet->q_idx;
tx_stats = &net_device->chan_table[q_idx].tx_stats;
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets += packet->total_packets;
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
napi_consume_skb(skb, budget);
}
queue_sends =
atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
if (unlikely(net_device->destroy)) {
if (queue_sends == 0)
wake_up(&net_device->wait_drain);
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
}
}
}
static void netvsc_send_completion(struct net_device *ndev,
struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
const struct vmpacket_descriptor *desc,
int budget)
{
const struct nvsp_message *nvsp_packet;
u32 msglen = hv_pkt_datalen(desc);
struct nvsp_message *pkt_rqst;
u64 cmd_rqst;
u32 status;
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
switch (pkt_rqst->hdr.msg_type) {
case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
complete(&net_device->channel_init_wait);
break;
default:
netdev_err(ndev, "Unexpected VMBUS completion!!\n");
}
return;
}
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
return;
}
nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_message_init_complete)) {
netdev_err(ndev, "nvsp_msg length too small: %u\n",
msglen);
return;
}
fallthrough;
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
msglen);
return;
}
fallthrough;
case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
msglen);
return;
}
fallthrough;
case NVSP_MSG5_TYPE_SUBCHANNEL:
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_5_subchannel_complete)) {
netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
msglen);
return;
}
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
complete(&net_device->channel_init_wait);
break;
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
if (net_ratelimit())
netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
msglen);
return;
}
/* If status indicates an error, output a message so we know
* there's a problem. But process the completion anyway so the
* resources are released.
*/
status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
if (status != NVSP_STAT_SUCCESS && net_ratelimit())
netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
status);
netvsc_send_tx_complete(ndev, net_device, incoming_channel,
desc, budget);
break;
default:
netdev_err(ndev,
"Unknown send completion type %d received!!\n",
nvsp_packet->hdr.msg_type);
}
}
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
unsigned long *map_addr = net_device->send_section_map;
unsigned int i;
for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
if (sync_test_and_set_bit(i, map_addr) == 0)
return i;
}
return NETVSC_INVALID_INDEX;
}
static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
unsigned int section_index,
u32 pend_size,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
bool xmit_more)
{
char *start = net_device->send_buf;
char *dest = start + (section_index * net_device->send_section_size)
+ pend_size;
int i;
u32 padding = 0;
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
packet->page_buf_cnt;
u32 remain;
/* Add padding */
remain = packet->total_data_buflen & (net_device->pkt_align - 1);
if (xmit_more && remain) {
padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding;
packet->total_data_buflen += padding;
}
for (i = 0; i < page_count; i++) {
char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
u32 offset = pb[i].offset;
u32 len = pb[i].len;
memcpy(dest, (src + offset), len);
dest += len;
}
if (padding)
memset(dest, 0, padding);
}
void netvsc_dma_unmap(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet)
{
int i;
if (!hv_is_isolation_supported())
return;
if (!packet->dma_range)
return;
for (i = 0; i < packet->page_buf_cnt; i++)
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
packet->dma_range[i].mapping_size,
DMA_TO_DEVICE);
kfree(packet->dma_range);
}
/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
* packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
* VM.
*
* In isolation VM, netvsc send buffer has been marked visible to
* host and so the data copied to send buffer doesn't need to use
* bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
* may not be copied to send buffer and so these pages need to be
* mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
* that. The pfns in the struct hv_page_buffer need to be converted
* to bounce buffer's pfn. The loop here is necessary because the
* entries in the page buffer array are not necessarily full
* pages of data. Each entry in the array has a separate offset and
* len that may be non-zero, even for entries in the middle of the
* array. And the entries are not physically contiguous. So each
* entry must be individually mapped rather than as a contiguous unit.
* So not use dma_map_sg() here.
*/
static int netvsc_dma_map(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
u32 page_count = packet->page_buf_cnt;
dma_addr_t dma;
int i;
if (!hv_is_isolation_supported())
return 0;
packet->dma_range = kcalloc(page_count,
sizeof(*packet->dma_range),
GFP_ATOMIC);
if (!packet->dma_range)
return -ENOMEM;
for (i = 0; i < page_count; i++) {
char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+ pb[i].offset);
u32 len = pb[i].len;
dma = dma_map_single(&hv_dev->device, src, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&hv_dev->device, dma)) {
kfree(packet->dma_range);
return -ENOMEM;
}
/* pb[].offset and pb[].len are not changed during dma mapping
* and so not reassign.
*/
packet->dma_range[i].dma = dma;
packet->dma_range[i].mapping_size = len;
pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
}
return 0;
}
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
struct hv_page_buffer *pb,
struct sk_buff *skb)
{
struct nvsp_message nvmsg;
struct nvsp_1_message_send_rndis_packet *rpkt =
&nvmsg.msg.v1_msg.send_rndis_pkt;
struct netvsc_channel * const nvchan =
&net_device->chan_table[packet->q_idx];
struct vmbus_channel *out_channel = nvchan->channel;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
memset(&nvmsg, 0, sizeof(struct nvsp_message));
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)
rpkt->channel_type = 0; /* 0 is RMC_DATA */
else
rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
rpkt->send_buf_section_index = packet->send_buf_index;
if (packet->send_buf_index == NETVSC_INVALID_INDEX)
rpkt->send_buf_section_size = 0;
else
rpkt->send_buf_section_size = packet->total_data_buflen;
req_id = (ulong)skb;
if (out_channel->rescind)
return -ENODEV;
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
packet->dma_range = NULL;
if (packet->page_buf_cnt) {
if (packet->cp_partial)
pb += packet->rmsg_pgcnt;
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
if (ret) {
ret = -EAGAIN;
goto exit;
}
ret = vmbus_sendpacket_pagebuffer(out_channel,
pb, packet->page_buf_cnt,
&nvmsg, sizeof(nvmsg),
req_id);
if (ret)
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
ret = vmbus_sendpacket(out_channel,
&nvmsg, sizeof(nvmsg),
req_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
exit:
if (ret == 0) {
atomic_inc_return(&nvchan->queue_sends);
if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
}
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
} else {
netdev_err(ndev,
"Unable to send packet pages %u len %u, ret %d\n",
packet->page_buf_cnt, packet->total_data_buflen,
ret);
}
if (netif_tx_queue_stopped(txq) &&
atomic_read(&nvchan->queue_sends) < 1 &&
!net_device->tx_disable) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
if (ret == -EAGAIN)
ret = -ENOSPC;
}
return ret;
}
/* Move packet out of multi send data (msd), and clear msd */
static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
struct sk_buff **msd_skb,
struct multi_send_data *msdp)
{
*msd_skb = msdp->skb;
*msd_send = msdp->pkt;
msdp->skb = NULL;
msdp->pkt = NULL;
msdp->count = 0;
}
/* RCU already held by caller */
/* Batching/bouncing logic is designed to attempt to optimize
* performance.
*
* For small, non-LSO packets we copy the packet to a send buffer
* which is pre-registered with the Hyper-V side. This enables the
* hypervisor to avoid remapping the aperture to access the packet
* descriptor and data.
*
* If we already started using a buffer and the netdev is transmitting
* a burst of packets, keep on copying into the buffer until it is
* full or we are done collecting a burst. If there is an existing
* buffer with space for the RNDIS descriptor but not the packet, copy
* the RNDIS descriptor to the buffer, keeping the packet in place.
*
* If we do batching and send more than one packet using a single
* NetVSC message, free the SKBs of the packets copied, except for the
* last packet. This is done to streamline the handling of the case
* where the last packet only had the RNDIS descriptor copied to the
* send buffer, with the data pointers included in the NetVSC message.
*/
int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
struct sk_buff *skb,
bool xdp_tx)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
= rcu_dereference_bh(ndev_ctx->nvdev);
struct hv_device *device = ndev_ctx->device_ctx;
int ret = 0;
struct netvsc_channel *nvchan;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
unsigned int section_index = NETVSC_INVALID_INDEX;
struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
struct sk_buff *msd_skb = NULL;
bool try_batch, xmit_more;
/* If device is rescinded, return error and packet will get dropped. */
if (unlikely(!net_device || net_device->destroy))
return -ENODEV;
nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
/* Send a control message or XDP packet directly without accessing
* msd (Multi-Send Data) field which may be changed during data packet
* processing.
*/
if (!skb || xdp_tx)
return netvsc_send_pkt(device, packet, net_device, pb, skb);
/* batch packets in send buffer if possible */
msdp = &nvchan->msd;
if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen;
try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
if (try_batch && msd_len + pktlen + net_device->pkt_align <
net_device->send_section_size) {
section_index = msdp->pkt->send_buf_index;
} else if (try_batch && msd_len + packet->rmsg_size <
net_device->send_section_size) {
section_index = msdp->pkt->send_buf_index;
packet->cp_partial = true;
} else if (pktlen + net_device->pkt_align <
net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device);
if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
++ndev_ctx->eth_stats.tx_send_full;
} else {
move_pkt_msd(&msd_send, &msd_skb, msdp);
msd_len = 0;
}
}
/* Keep aggregating only if stack says more data is coming
* and not doing mixed modes send and not flow blocked
*/
xmit_more = netdev_xmit_more() &&
!packet->cp_partial &&
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
if (section_index != NETVSC_INVALID_INDEX) {
netvsc_copy_to_send_buf(net_device,
section_index, msd_len,
packet, rndis_msg, pb, xmit_more);
packet->send_buf_index = section_index;
if (packet->cp_partial) {
packet->page_buf_cnt -= packet->rmsg_pgcnt;
packet->total_data_buflen = msd_len + packet->rmsg_size;
} else {
packet->page_buf_cnt = 0;
packet->total_data_buflen += msd_len;
}
if (msdp->pkt) {
packet->total_packets += msdp->pkt->total_packets;
packet->total_bytes += msdp->pkt->total_bytes;
}
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
if (xmit_more) {
msdp->skb = skb;
msdp->pkt = packet;
msdp->count++;
} else {
cur_send = packet;
msdp->skb = NULL;
msdp->pkt = NULL;
msdp->count = 0;
}
} else {
move_pkt_msd(&msd_send, &msd_skb, msdp);
cur_send = packet;
}
if (msd_send) {
int m_ret = netvsc_send_pkt(device, msd_send, net_device,
NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
msd_send->send_buf_index);
dev_kfree_skb_any(msd_skb);
}
}
if (cur_send)
ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, section_index);
return ret;
}
/* Send pending recv completions */
static int send_recv_completions(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan)
{
struct multi_recv_comp *mrc = &nvchan->mrc;
struct recv_comp_msg {
struct nvsp_message_header hdr;
u32 status;
} __packed;
struct recv_comp_msg msg = {
.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
};
int ret;
while (mrc->first != mrc->next) {
const struct recv_comp_data *rcd
= mrc->slots + mrc->first;
msg.status = rcd->status;
ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
rcd->tid, VM_PKT_COMP, 0);
if (unlikely(ret)) {
struct net_device_context *ndev_ctx = netdev_priv(ndev);
++ndev_ctx->eth_stats.rx_comp_busy;
return ret;
}
if (++mrc->first == nvdev->recv_completion_cnt)
mrc->first = 0;
}
/* receive completion ring has been emptied */
if (unlikely(nvdev->destroy))
wake_up(&nvdev->wait_drain);
return 0;
}
/* Count how many receive completions are outstanding */
static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
const struct multi_recv_comp *mrc,
u32 *filled, u32 *avail)
{
u32 count = nvdev->recv_completion_cnt;
if (mrc->next >= mrc->first)
*filled = mrc->next - mrc->first;
else
*filled = (count - mrc->first) + mrc->next;
*avail = count - *filled - 1;
}
/* Add receive complete to ring to send to host. */
static void enq_receive_complete(struct net_device *ndev,
struct netvsc_device *nvdev, u16 q_idx,
u64 tid, u32 status)
{
struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
struct multi_recv_comp *mrc = &nvchan->mrc;
struct recv_comp_data *rcd;
u32 filled, avail;
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
if (unlikely(filled > NAPI_POLL_WEIGHT)) {
send_recv_completions(ndev, nvdev, nvchan);
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
}
if (unlikely(!avail)) {
netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
q_idx, tid);
return;
}
rcd = mrc->slots + mrc->next;
rcd->tid = tid;
rcd->status = status;
if (++mrc->next == nvdev->recv_completion_cnt)
mrc->next = 0;
}
static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct netvsc_channel *nvchan,
const struct vmpacket_descriptor *desc)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = nvchan->channel;
const struct vmtransfer_page_packet_header *vmxferpage_packet
= container_of(desc, const struct vmtransfer_page_packet_header, d);
const struct nvsp_message *nvsp = hv_pkt_data(desc);
u32 msglen = hv_pkt_datalen(desc);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
netif_err(net_device_ctx, rx_err, ndev,
"invalid nvsp header, length too small: %u\n",
msglen);
return 0;
}
/* Make sure this is a valid nvsp packet */
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
netif_err(net_device_ctx, rx_err, ndev,
"Unknown nvsp packet type received %u\n",
nvsp->hdr.msg_type);
return 0;
}
/* Validate xfer page pkt header */
if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
netif_err(net_device_ctx, rx_err, ndev,
"Invalid xfer page pkt, offset too small: %u\n",
desc->offset8 << 3);
return 0;
}
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
netif_err(net_device_ctx, rx_err, ndev,
"Invalid xfer page set id - expecting %x got %x\n",
NETVSC_RECEIVE_BUFFER_ID,
vmxferpage_packet->xfer_pageset_id);
return 0;
}
count = vmxferpage_packet->range_cnt;
/* Check count for a valid value */
if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
netif_err(net_device_ctx, rx_err, ndev,
"Range count is not valid: %d\n",
count);
return 0;
}
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < count; i++) {
u32 offset = vmxferpage_packet->ranges[i].byte_offset;
u32 buflen = vmxferpage_packet->ranges[i].byte_count;
void *data;
int ret;
if (unlikely(offset > net_device->recv_buf_size ||
buflen > net_device->recv_buf_size - offset)) {
nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
netif_err(net_device_ctx, rx_err, ndev,
"Packet offset:%u + len:%u too big\n",
offset, buflen);
continue;
}
/* We're going to copy (sections of) the packet into nvchan->recv_buf;
* make sure that nvchan->recv_buf is large enough to hold the packet.
*/
if (unlikely(buflen > net_device->recv_section_size)) {
nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
netif_err(net_device_ctx, rx_err, ndev,
"Packet too big: buflen=%u recv_section_size=%u\n",
buflen, net_device->recv_section_size);
continue;
}
data = recv_buf + offset;
nvchan->rsc.is_last = (i == count - 1);
trace_rndis_recv(ndev, q_idx, data);
/* Pass it to the upper layer */
ret = rndis_filter_receive(ndev, net_device,
nvchan, data, buflen);
if (unlikely(ret != NVSP_STAT_SUCCESS)) {
/* Drop incomplete packet */
nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
}
}
enq_receive_complete(ndev, net_device, q_idx,
vmxferpage_packet->d.trans_id, status);
return count;
}
static void netvsc_send_table(struct net_device *ndev,
struct netvsc_device *nvscdev,
const struct nvsp_message *nvmsg,
u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
u32 count, offset, *tab;
int i;
/* Ensure packet is big enough to read send_table fields */
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_5_send_indirect_table)) {
netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
return;
}
count = nvmsg->msg.v5_msg.send_table.count;
offset = nvmsg->msg.v5_msg.send_table.offset;
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
return;
}
/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
* wrong due to a host bug. So fix the offset here.
*/
if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
msglen >= sizeof(struct nvsp_message_header) +
sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
offset = sizeof(struct nvsp_message_header) +
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
}
tab = (void *)nvmsg + offset;
for (i = 0; i < count; i++)
net_device_ctx->tx_table[i] = tab[i];
}
static void netvsc_send_vf(struct net_device *ndev,
const struct nvsp_message *nvmsg,
u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
/* Ensure packet is big enough to read its fields */
if (msglen < sizeof(struct nvsp_message_header) +
sizeof(struct nvsp_4_send_vf_association)) {
netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
return;
}
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
if (net_device_ctx->vf_alloc)
complete(&net_device_ctx->vf_add);
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
}
static void netvsc_receive_inband(struct net_device *ndev,
struct netvsc_device *nvscdev,
const struct vmpacket_descriptor *desc)
{
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
u32 msglen = hv_pkt_datalen(desc);
/* Ensure packet is big enough to read header fields */
if (msglen < sizeof(struct nvsp_message_header)) {
netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
return;
}
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
if (hv_is_isolation_supported())
netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
else
netvsc_send_vf(ndev, nvmsg, msglen);
break;
}
}
static int netvsc_process_raw_pkt(struct hv_device *device,
struct netvsc_channel *nvchan,
struct netvsc_device *net_device,
struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
switch (desc->type) {
case VM_PKT_COMP:
netvsc_send_completion(ndev, net_device, channel, desc, budget);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
return netvsc_receive(ndev, net_device, nvchan, desc);
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(ndev, net_device, desc);
break;
default:
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
desc->type, desc->trans_id);
break;
}
return 0;
}
static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
{
struct vmbus_channel *primary = channel->primary_channel;
return primary ? primary->device_obj : channel->device_obj;
}
/* Network processing softirq
* Process data in incoming ring buffer from host
* Stops when ring is empty or budget is met or exceeded.
*/
int netvsc_poll(struct napi_struct *napi, int budget)
{
struct netvsc_channel *nvchan
= container_of(napi, struct netvsc_channel, napi);
struct netvsc_device *net_device = nvchan->net_device;
struct vmbus_channel *channel = nvchan->channel;
struct hv_device *device = netvsc_channel_to_device(channel);
struct net_device *ndev = hv_get_drvdata(device);
int work_done = 0;
int ret;
/* If starting a new interval */
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
nvchan->xdp_flush = false;
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
if (nvchan->xdp_flush)
xdp_do_flush();
/* Send any pending receive completions */
ret = send_recv_completions(ndev, net_device, nvchan);
/* If it did not exhaust NAPI budget this time
* and not doing busy poll
* then re-enable host interrupts
* and reschedule if ring is not empty
* or sending receive completion failed.
*/
if (work_done < budget &&
napi_complete_done(napi, work_done) &&
(ret || hv_end_read(&channel->inbound)) &&
napi_schedule_prep(napi)) {
hv_begin_read(&channel->inbound);
__napi_schedule(napi);
}
/* Driver may overshoot since multiple packets per descriptor */
return min(work_done, budget);
}
/* Call back when data is available in host ring buffer.
* Processing is deferred until network softirq (NAPI)
*/
void netvsc_channel_cb(void *context)
{
struct netvsc_channel *nvchan = context;
struct vmbus_channel *channel = nvchan->channel;
struct hv_ring_buffer_info *rbi = &channel->inbound;
/* preload first vmpacket descriptor */
prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
if (napi_schedule_prep(&nvchan->napi)) {
/* disable interrupts from host */
hv_begin_read(rbi);
__napi_schedule_irqoff(&nvchan->napi);
}
}
/*
* netvsc_device_add - Callback when the device belonging to this
* driver is added
*/
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *device_info)
{
int i, ret = 0;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
net_device = alloc_net_device();
if (!net_device)
return ERR_PTR(-ENOMEM);
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
net_device_ctx->tx_table[i] = 0;
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
*/
set_channel_read_mode(device->channel, HV_CALL_ISR);
/* If we're reopening the device we may have multiple queues, fill the
* chn_table with the default channel to use it before subchannels are
* opened.
* Initialize the channel state before we open;
* we can be interrupted as soon as we open the channel.
*/
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
struct netvsc_channel *nvchan = &net_device->chan_table[i];
nvchan->channel = device->channel;
nvchan->net_device = net_device;
u64_stats_init(&nvchan->tx_stats.syncp);
u64_stats_init(&nvchan->rx_stats.syncp);
ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
if (ret) {
netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
goto cleanup2;
}
ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (ret) {
netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
goto cleanup2;
}
}
/* Enable NAPI handler before init callbacks */
netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
device->channel->request_addr_callback = vmbus_request_addr;
device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netdev_err(ndev, "unable to open channel: %d\n", ret);
goto cleanup;
}
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
napi_enable(&net_device->chan_table[0].napi);
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) {
netdev_err(ndev,
"unable to connect to NetVSP - %d\n", ret);
goto close;
}
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
return net_device;
close:
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
}
| linux-master | drivers/net/hyperv/netvsc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Controller Transport Protocol (MCTP)
* Implements DMTF specification
* "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C
* Transport Binding"
* https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf
*
* A netdev is created for each I2C bus that handles MCTP. In the case of an I2C
* mux topology a single I2C client is attached to the root of the mux topology,
* shared between all mux I2C busses underneath. For non-mux cases an I2C client
* is attached per netdev.
*
* mctp-i2c-controller.yml devicetree binding has further details.
*
* Copyright (c) 2022 Code Construct
* Copyright (c) 2022 Google
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/if_arp.h>
#include <net/mctp.h>
#include <net/mctpdevice.h>
/* byte_count is limited to u8 */
#define MCTP_I2C_MAXBLOCK 255
/* One byte is taken by source_slave */
#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1)
#define MCTP_I2C_MINMTU (64 + 4)
/* Allow space for dest_address, command, byte_count, data, PEC */
#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1)
#define MCTP_I2C_MINLEN 8
#define MCTP_I2C_COMMANDCODE 0x0f
#define MCTP_I2C_TX_WORK_LEN 100
/* Sufficient for 64kB at min mtu */
#define MCTP_I2C_TX_QUEUE_LEN 1100
#define MCTP_I2C_OF_PROP "mctp-controller"
enum {
MCTP_I2C_FLOW_STATE_NEW = 0,
MCTP_I2C_FLOW_STATE_ACTIVE,
MCTP_I2C_FLOW_STATE_INVALID,
};
/* List of all struct mctp_i2c_client
* Lock protects driver_clients and also prevents adding/removing adapters
* during mctp_i2c_client probe/remove.
*/
static DEFINE_MUTEX(driver_clients_lock);
static LIST_HEAD(driver_clients);
struct mctp_i2c_client;
/* The netdev structure. One of these per I2C adapter. */
struct mctp_i2c_dev {
struct net_device *ndev;
struct i2c_adapter *adapter;
struct mctp_i2c_client *client;
struct list_head list; /* For mctp_i2c_client.devs */
size_t rx_pos;
u8 rx_buffer[MCTP_I2C_BUFSZ];
struct completion rx_done;
struct task_struct *tx_thread;
wait_queue_head_t tx_wq;
struct sk_buff_head tx_queue;
u8 tx_scratch[MCTP_I2C_BUFSZ];
/* A fake entry in our tx queue to perform an unlock operation */
struct sk_buff unlock_marker;
/* Spinlock protects i2c_lock_count, release_count, allow_rx */
spinlock_t lock;
int i2c_lock_count;
int release_count;
/* Indicates that the netif is ready to receive incoming packets */
bool allow_rx;
};
/* The i2c client structure. One per hardware i2c bus at the top of the
* mux tree, shared by multiple netdevs
*/
struct mctp_i2c_client {
struct i2c_client *client;
u8 lladdr;
struct mctp_i2c_dev *sel;
struct list_head devs;
spinlock_t sel_lock; /* Protects sel and devs */
struct list_head list; /* For driver_clients */
};
/* Header on the wire. */
struct mctp_i2c_hdr {
u8 dest_slave;
u8 command;
/* Count of bytes following byte_count, excluding PEC */
u8 byte_count;
u8 source_slave;
};
static int mctp_i2c_recv(struct mctp_i2c_dev *midev);
static int mctp_i2c_slave_cb(struct i2c_client *client,
enum i2c_slave_event event, u8 *val);
static void mctp_i2c_ndo_uninit(struct net_device *dev);
static int mctp_i2c_ndo_open(struct net_device *dev);
static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap)
{
#if IS_ENABLED(CONFIG_I2C_MUX)
return i2c_root_adapter(&adap->dev);
#else
/* In non-mux config all i2c adapters are root adapters */
return adap;
#endif
}
/* Creates a new i2c slave device attached to the root adapter.
* Sets up the slave callback.
* Must be called with a client on a root adapter.
*/
static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
{
struct mctp_i2c_client *mcli = NULL;
struct i2c_adapter *root = NULL;
int rc;
if (client->flags & I2C_CLIENT_TEN) {
dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n",
client->addr);
rc = -EINVAL;
goto err;
}
root = mux_root_adapter(client->adapter);
if (!root) {
dev_err(&client->dev, "failed to find root adapter\n");
rc = -ENOENT;
goto err;
}
if (root != client->adapter) {
dev_err(&client->dev,
"A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n"
" It should be placed on the mux tree root adapter\n"
" then set mctp-controller property on adapters to attach\n");
rc = -EINVAL;
goto err;
}
mcli = kzalloc(sizeof(*mcli), GFP_KERNEL);
if (!mcli) {
rc = -ENOMEM;
goto err;
}
spin_lock_init(&mcli->sel_lock);
INIT_LIST_HEAD(&mcli->devs);
INIT_LIST_HEAD(&mcli->list);
mcli->lladdr = client->addr & 0xff;
mcli->client = client;
i2c_set_clientdata(client, mcli);
rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb);
if (rc < 0) {
dev_err(&client->dev, "i2c register failed %d\n", rc);
mcli->client = NULL;
i2c_set_clientdata(client, NULL);
goto err;
}
return mcli;
err:
if (mcli) {
if (mcli->client)
i2c_unregister_device(mcli->client);
kfree(mcli);
}
return ERR_PTR(rc);
}
static void mctp_i2c_free_client(struct mctp_i2c_client *mcli)
{
int rc;
WARN_ON(!mutex_is_locked(&driver_clients_lock));
WARN_ON(!list_empty(&mcli->devs));
WARN_ON(mcli->sel); /* sanity check, no locking */
rc = i2c_slave_unregister(mcli->client);
/* Leak if it fails, we can't propagate errors upwards */
if (rc < 0)
dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc);
else
kfree(mcli);
}
/* Switch the mctp i2c device to receive responses.
* Call with sel_lock held
*/
static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli,
struct mctp_i2c_dev *midev)
{
assert_spin_locked(&mcli->sel_lock);
if (midev)
dev_hold(midev->ndev);
if (mcli->sel)
dev_put(mcli->sel->ndev);
mcli->sel = midev;
}
/* Switch the mctp i2c device to receive responses */
static void mctp_i2c_device_select(struct mctp_i2c_client *mcli,
struct mctp_i2c_dev *midev)
{
unsigned long flags;
spin_lock_irqsave(&mcli->sel_lock, flags);
__mctp_i2c_device_select(mcli, midev);
spin_unlock_irqrestore(&mcli->sel_lock, flags);
}
static int mctp_i2c_slave_cb(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
{
struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
struct mctp_i2c_dev *midev = NULL;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&mcli->sel_lock, flags);
midev = mcli->sel;
if (midev)
dev_hold(midev->ndev);
spin_unlock_irqrestore(&mcli->sel_lock, flags);
if (!midev)
return 0;
switch (event) {
case I2C_SLAVE_WRITE_RECEIVED:
if (midev->rx_pos < MCTP_I2C_BUFSZ) {
midev->rx_buffer[midev->rx_pos] = *val;
midev->rx_pos++;
} else {
midev->ndev->stats.rx_over_errors++;
}
break;
case I2C_SLAVE_WRITE_REQUESTED:
/* dest_slave as first byte */
midev->rx_buffer[0] = mcli->lladdr << 1;
midev->rx_pos = 1;
break;
case I2C_SLAVE_STOP:
rc = mctp_i2c_recv(midev);
break;
default:
break;
}
dev_put(midev->ndev);
return rc;
}
/* Processes incoming data that has been accumulated by the slave cb */
static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
{
struct net_device *ndev = midev->ndev;
struct mctp_i2c_hdr *hdr;
struct mctp_skb_cb *cb;
struct sk_buff *skb;
unsigned long flags;
u8 pec, calc_pec;
size_t recvlen;
int status;
/* + 1 for the PEC */
if (midev->rx_pos < MCTP_I2C_MINLEN + 1) {
ndev->stats.rx_length_errors++;
return -EINVAL;
}
/* recvlen excludes PEC */
recvlen = midev->rx_pos - 1;
hdr = (void *)midev->rx_buffer;
if (hdr->command != MCTP_I2C_COMMANDCODE) {
ndev->stats.rx_dropped++;
return -EINVAL;
}
if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) {
ndev->stats.rx_length_errors++;
return -EINVAL;
}
pec = midev->rx_buffer[midev->rx_pos - 1];
calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen);
if (pec != calc_pec) {
ndev->stats.rx_crc_errors++;
return -EINVAL;
}
skb = netdev_alloc_skb(ndev, recvlen);
if (!skb) {
ndev->stats.rx_dropped++;
return -ENOMEM;
}
skb->protocol = htons(ETH_P_MCTP);
skb_put_data(skb, midev->rx_buffer, recvlen);
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_network_header(skb);
cb = __mctp_cb(skb);
cb->halen = 1;
cb->haddr[0] = hdr->source_slave >> 1;
/* We need to ensure that the netif is not used once netdev
* unregister occurs
*/
spin_lock_irqsave(&midev->lock, flags);
if (midev->allow_rx) {
reinit_completion(&midev->rx_done);
spin_unlock_irqrestore(&midev->lock, flags);
status = netif_rx(skb);
complete(&midev->rx_done);
} else {
status = NET_RX_DROP;
spin_unlock_irqrestore(&midev->lock, flags);
}
if (status == NET_RX_SUCCESS) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += recvlen;
} else {
ndev->stats.rx_dropped++;
}
return 0;
}
enum mctp_i2c_flow_state {
MCTP_I2C_TX_FLOW_INVALID,
MCTP_I2C_TX_FLOW_NONE,
MCTP_I2C_TX_FLOW_NEW,
MCTP_I2C_TX_FLOW_EXISTING,
};
static enum mctp_i2c_flow_state
mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
{
enum mctp_i2c_flow_state state;
struct mctp_sk_key *key;
struct mctp_flow *flow;
unsigned long flags;
flow = skb_ext_find(skb, SKB_EXT_MCTP);
if (!flow)
return MCTP_I2C_TX_FLOW_NONE;
key = flow->key;
if (!key)
return MCTP_I2C_TX_FLOW_NONE;
spin_lock_irqsave(&key->lock, flags);
/* If the key is present but invalid, we're unlikely to be able
* to handle the flow at all; just drop now
*/
if (!key->valid) {
state = MCTP_I2C_TX_FLOW_INVALID;
} else {
switch (key->dev_flow_state) {
case MCTP_I2C_FLOW_STATE_NEW:
key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
state = MCTP_I2C_TX_FLOW_NEW;
break;
case MCTP_I2C_FLOW_STATE_ACTIVE:
state = MCTP_I2C_TX_FLOW_EXISTING;
break;
default:
state = MCTP_I2C_TX_FLOW_INVALID;
}
}
spin_unlock_irqrestore(&key->lock, flags);
return state;
}
/* We're not contending with ourselves here; we only need to exclude other
* i2c clients from using the bus. refcounts are simply to prevent
* recursive locking.
*/
static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev)
{
unsigned long flags;
bool lock;
spin_lock_irqsave(&midev->lock, flags);
lock = midev->i2c_lock_count == 0;
midev->i2c_lock_count++;
spin_unlock_irqrestore(&midev->lock, flags);
if (lock)
i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT);
}
static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev)
{
unsigned long flags;
bool unlock;
spin_lock_irqsave(&midev->lock, flags);
if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!"))
midev->i2c_lock_count--;
unlock = midev->i2c_lock_count == 0;
spin_unlock_irqrestore(&midev->lock, flags);
if (unlock)
i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
}
/* Unlocks the bus if was previously locked, used for cleanup */
static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
{
unsigned long flags;
bool unlock;
spin_lock_irqsave(&midev->lock, flags);
unlock = midev->i2c_lock_count > 0;
midev->i2c_lock_count = 0;
spin_unlock_irqrestore(&midev->lock, flags);
if (unlock)
i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
}
static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
{
struct net_device_stats *stats = &midev->ndev->stats;
enum mctp_i2c_flow_state fs;
struct mctp_i2c_hdr *hdr;
struct i2c_msg msg = {0};
u8 *pecp;
int rc;
fs = mctp_i2c_get_tx_flow_state(midev, skb);
hdr = (void *)skb_mac_header(skb);
/* Sanity check that packet contents matches skb length,
* and can't exceed MCTP_I2C_BUFSZ
*/
if (skb->len != hdr->byte_count + 3) {
dev_warn_ratelimited(&midev->adapter->dev,
"Bad tx length %d vs skb %u\n",
hdr->byte_count + 3, skb->len);
return;
}
if (skb_tailroom(skb) >= 1) {
/* Linear case with space, we can just append the PEC */
skb_put(skb, 1);
} else {
/* Otherwise need to copy the buffer */
skb_copy_bits(skb, 0, midev->tx_scratch, skb->len);
hdr = (void *)midev->tx_scratch;
}
pecp = (void *)&hdr->source_slave + hdr->byte_count;
*pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3);
msg.buf = (void *)&hdr->command;
/* command, bytecount, data, pec */
msg.len = 2 + hdr->byte_count + 1;
msg.addr = hdr->dest_slave >> 1;
switch (fs) {
case MCTP_I2C_TX_FLOW_NONE:
/* no flow: full lock & unlock */
mctp_i2c_lock_nest(midev);
mctp_i2c_device_select(midev->client, midev);
rc = __i2c_transfer(midev->adapter, &msg, 1);
mctp_i2c_unlock_nest(midev);
break;
case MCTP_I2C_TX_FLOW_NEW:
/* new flow: lock, tx, but don't unlock; that will happen
* on flow release
*/
mctp_i2c_lock_nest(midev);
mctp_i2c_device_select(midev->client, midev);
fallthrough;
case MCTP_I2C_TX_FLOW_EXISTING:
/* existing flow: we already have the lock; just tx */
rc = __i2c_transfer(midev->adapter, &msg, 1);
break;
case MCTP_I2C_TX_FLOW_INVALID:
return;
}
if (rc < 0) {
dev_warn_ratelimited(&midev->adapter->dev,
"__i2c_transfer failed %d\n", rc);
stats->tx_errors++;
} else {
stats->tx_bytes += skb->len;
stats->tx_packets++;
}
}
static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev)
{
unsigned long flags;
bool unlock;
spin_lock_irqsave(&midev->lock, flags);
if (midev->release_count > midev->i2c_lock_count) {
WARN_ONCE(1, "release count overflow");
midev->release_count = midev->i2c_lock_count;
}
midev->i2c_lock_count -= midev->release_count;
unlock = midev->i2c_lock_count == 0 && midev->release_count > 0;
midev->release_count = 0;
spin_unlock_irqrestore(&midev->lock, flags);
if (unlock)
i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
}
static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len)
{
struct mctp_i2c_hdr *hdr;
struct mctp_hdr *mhdr;
u8 lldst, llsrc;
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
skb_push(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_mac_header(skb);
hdr = (void *)skb_mac_header(skb);
mhdr = mctp_hdr(skb);
hdr->dest_slave = (lldst << 1) & 0xff;
hdr->command = MCTP_I2C_COMMANDCODE;
hdr->byte_count = len + 1;
hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
mhdr->ver = 0x01;
return sizeof(struct mctp_i2c_hdr);
}
static int mctp_i2c_tx_thread(void *data)
{
struct mctp_i2c_dev *midev = data;
struct sk_buff *skb;
unsigned long flags;
for (;;) {
if (kthread_should_stop())
break;
spin_lock_irqsave(&midev->tx_queue.lock, flags);
skb = __skb_dequeue(&midev->tx_queue);
if (netif_queue_stopped(midev->ndev))
netif_wake_queue(midev->ndev);
spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
if (skb == &midev->unlock_marker) {
mctp_i2c_flow_release(midev);
} else if (skb) {
mctp_i2c_xmit(midev, skb);
kfree_skb(skb);
} else {
wait_event_idle(midev->tx_wq,
!skb_queue_empty(&midev->tx_queue) ||
kthread_should_stop());
}
}
return 0;
}
static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct mctp_i2c_dev *midev = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&midev->tx_queue.lock, flags);
if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
__skb_queue_tail(&midev->tx_queue, skb);
if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN)
netif_stop_queue(dev);
spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
wake_up(&midev->tx_wq);
return NETDEV_TX_OK;
}
static void mctp_i2c_release_flow(struct mctp_dev *mdev,
struct mctp_sk_key *key)
{
struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
bool queue_release = false;
unsigned long flags;
spin_lock_irqsave(&midev->lock, flags);
/* if we have seen the flow/key previously, we need to pair the
* original lock with a release
*/
if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE) {
midev->release_count++;
queue_release = true;
}
key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
spin_unlock_irqrestore(&midev->lock, flags);
if (queue_release) {
/* Ensure we have a release operation queued, through the fake
* marker skb
*/
spin_lock(&midev->tx_queue.lock);
if (!midev->unlock_marker.next)
__skb_queue_tail(&midev->tx_queue,
&midev->unlock_marker);
spin_unlock(&midev->tx_queue.lock);
wake_up(&midev->tx_wq);
}
}
static const struct net_device_ops mctp_i2c_ops = {
.ndo_start_xmit = mctp_i2c_start_xmit,
.ndo_uninit = mctp_i2c_ndo_uninit,
.ndo_open = mctp_i2c_ndo_open,
};
static const struct header_ops mctp_i2c_headops = {
.create = mctp_i2c_header_create,
};
static const struct mctp_netdev_ops mctp_i2c_mctp_ops = {
.release_flow = mctp_i2c_release_flow,
};
static void mctp_i2c_net_setup(struct net_device *dev)
{
dev->type = ARPHRD_MCTP;
dev->mtu = MCTP_I2C_MAXMTU;
dev->min_mtu = MCTP_I2C_MINMTU;
dev->max_mtu = MCTP_I2C_MAXMTU;
dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN;
dev->hard_header_len = sizeof(struct mctp_i2c_hdr);
dev->addr_len = 1;
dev->netdev_ops = &mctp_i2c_ops;
dev->header_ops = &mctp_i2c_headops;
}
/* Populates the mctp_i2c_dev priv struct for a netdev.
* Returns an error pointer on failure.
*/
static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev,
struct mctp_i2c_client *mcli,
struct i2c_adapter *adap)
{
struct mctp_i2c_dev *midev = netdev_priv(dev);
unsigned long flags;
midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev,
"%s/tx", dev->name);
if (IS_ERR(midev->tx_thread))
return ERR_CAST(midev->tx_thread);
midev->ndev = dev;
get_device(&adap->dev);
midev->adapter = adap;
get_device(&mcli->client->dev);
midev->client = mcli;
INIT_LIST_HEAD(&midev->list);
spin_lock_init(&midev->lock);
midev->i2c_lock_count = 0;
midev->release_count = 0;
init_completion(&midev->rx_done);
complete(&midev->rx_done);
init_waitqueue_head(&midev->tx_wq);
skb_queue_head_init(&midev->tx_queue);
/* Add to the parent mcli */
spin_lock_irqsave(&mcli->sel_lock, flags);
list_add(&midev->list, &mcli->devs);
/* Select a device by default */
if (!mcli->sel)
__mctp_i2c_device_select(mcli, midev);
spin_unlock_irqrestore(&mcli->sel_lock, flags);
/* Start the worker thread */
wake_up_process(midev->tx_thread);
return midev;
}
/* Counterpart of mctp_i2c_midev_init */
static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev)
{
struct mctp_i2c_client *mcli = midev->client;
unsigned long flags;
if (midev->tx_thread) {
kthread_stop(midev->tx_thread);
midev->tx_thread = NULL;
}
/* Unconditionally unlock on close */
mctp_i2c_unlock_reset(midev);
/* Remove the netdev from the parent i2c client. */
spin_lock_irqsave(&mcli->sel_lock, flags);
list_del(&midev->list);
if (mcli->sel == midev) {
struct mctp_i2c_dev *first;
first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list);
__mctp_i2c_device_select(mcli, first);
}
spin_unlock_irqrestore(&mcli->sel_lock, flags);
skb_queue_purge(&midev->tx_queue);
put_device(&midev->adapter->dev);
put_device(&mcli->client->dev);
}
/* Stops, unregisters, and frees midev */
static void mctp_i2c_unregister(struct mctp_i2c_dev *midev)
{
unsigned long flags;
/* Stop tx thread prior to unregister, it uses netif_() functions */
kthread_stop(midev->tx_thread);
midev->tx_thread = NULL;
/* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */
spin_lock_irqsave(&midev->lock, flags);
midev->allow_rx = false;
spin_unlock_irqrestore(&midev->lock, flags);
wait_for_completion(&midev->rx_done);
mctp_unregister_netdev(midev->ndev);
/* midev has been freed now by mctp_i2c_ndo_uninit callback */
free_netdev(midev->ndev);
}
static void mctp_i2c_ndo_uninit(struct net_device *dev)
{
struct mctp_i2c_dev *midev = netdev_priv(dev);
/* Perform cleanup here to ensure that mcli->sel isn't holding
* a reference that would prevent unregister_netdevice()
* from completing.
*/
mctp_i2c_midev_free(midev);
}
static int mctp_i2c_ndo_open(struct net_device *dev)
{
struct mctp_i2c_dev *midev = netdev_priv(dev);
unsigned long flags;
/* i2c rx handler can only pass packets once the netdev is registered */
spin_lock_irqsave(&midev->lock, flags);
midev->allow_rx = true;
spin_unlock_irqrestore(&midev->lock, flags);
return 0;
}
static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli,
struct i2c_adapter *adap)
{
struct mctp_i2c_dev *midev = NULL;
struct net_device *ndev = NULL;
struct i2c_adapter *root;
unsigned long flags;
char namebuf[30];
int rc;
root = mux_root_adapter(adap);
if (root != mcli->client->adapter) {
dev_err(&mcli->client->dev,
"I2C adapter %s is not a child bus of %s\n",
mcli->client->adapter->name, root->name);
return -EINVAL;
}
WARN_ON(!mutex_is_locked(&driver_clients_lock));
snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr);
ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup);
if (!ndev) {
dev_err(&mcli->client->dev, "alloc netdev failed\n");
rc = -ENOMEM;
goto err;
}
dev_net_set(ndev, current->nsproxy->net_ns);
SET_NETDEV_DEV(ndev, &adap->dev);
dev_addr_set(ndev, &mcli->lladdr);
midev = mctp_i2c_midev_init(ndev, mcli, adap);
if (IS_ERR(midev)) {
rc = PTR_ERR(midev);
midev = NULL;
goto err;
}
rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops);
if (rc < 0) {
dev_err(&mcli->client->dev,
"register netdev \"%s\" failed %d\n",
ndev->name, rc);
goto err;
}
spin_lock_irqsave(&midev->lock, flags);
midev->allow_rx = false;
spin_unlock_irqrestore(&midev->lock, flags);
return 0;
err:
if (midev)
mctp_i2c_midev_free(midev);
if (ndev)
free_netdev(ndev);
return rc;
}
/* Removes any netdev for adap. mcli is the parent root i2c client */
static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli,
struct i2c_adapter *adap)
{
struct mctp_i2c_dev *midev = NULL, *m = NULL;
unsigned long flags;
WARN_ON(!mutex_is_locked(&driver_clients_lock));
spin_lock_irqsave(&mcli->sel_lock, flags);
/* List size is limited by number of MCTP netdevs on a single hardware bus */
list_for_each_entry(m, &mcli->devs, list)
if (m->adapter == adap) {
midev = m;
break;
}
spin_unlock_irqrestore(&mcli->sel_lock, flags);
if (midev)
mctp_i2c_unregister(midev);
}
/* Determines whether a device is an i2c adapter.
* Optionally returns the root i2c_adapter
*/
static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev,
struct i2c_adapter **ret_root)
{
struct i2c_adapter *root, *adap;
if (dev->type != &i2c_adapter_type)
return NULL;
adap = to_i2c_adapter(dev);
root = mux_root_adapter(adap);
WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n",
dev_name(dev));
if (!root)
return NULL;
if (ret_root)
*ret_root = root;
return adap;
}
/* Determines whether a device is an i2c adapter with the "mctp-controller"
* devicetree property set. If adap is not an OF node, returns match_no_of
*/
static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of)
{
if (!adap->dev.of_node)
return match_no_of;
return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP);
}
/* Called for each existing i2c device (adapter or client) when a
* new mctp-i2c client is probed.
*/
static int mctp_i2c_client_try_attach(struct device *dev, void *data)
{
struct i2c_adapter *adap = NULL, *root = NULL;
struct mctp_i2c_client *mcli = data;
adap = mctp_i2c_get_adapter(dev, &root);
if (!adap)
return 0;
if (mcli->client->adapter != root)
return 0;
/* Must either have mctp-controller property on the adapter, or
* be a root adapter if it's non-devicetree
*/
if (!mctp_i2c_adapter_match(adap, adap == root))
return 0;
return mctp_i2c_add_netdev(mcli, adap);
}
static void mctp_i2c_notify_add(struct device *dev)
{
struct mctp_i2c_client *mcli = NULL, *m = NULL;
struct i2c_adapter *root = NULL, *adap = NULL;
int rc;
adap = mctp_i2c_get_adapter(dev, &root);
if (!adap)
return;
/* Check for mctp-controller property on the adapter */
if (!mctp_i2c_adapter_match(adap, false))
return;
/* Find an existing mcli for adap's root */
mutex_lock(&driver_clients_lock);
list_for_each_entry(m, &driver_clients, list) {
if (m->client->adapter == root) {
mcli = m;
break;
}
}
if (mcli) {
rc = mctp_i2c_add_netdev(mcli, adap);
if (rc < 0)
dev_warn(dev, "Failed adding mctp-i2c net device\n");
}
mutex_unlock(&driver_clients_lock);
}
static void mctp_i2c_notify_del(struct device *dev)
{
struct i2c_adapter *root = NULL, *adap = NULL;
struct mctp_i2c_client *mcli = NULL;
adap = mctp_i2c_get_adapter(dev, &root);
if (!adap)
return;
mutex_lock(&driver_clients_lock);
list_for_each_entry(mcli, &driver_clients, list) {
if (mcli->client->adapter == root) {
mctp_i2c_remove_netdev(mcli, adap);
break;
}
}
mutex_unlock(&driver_clients_lock);
}
static int mctp_i2c_probe(struct i2c_client *client)
{
struct mctp_i2c_client *mcli = NULL;
int rc;
mutex_lock(&driver_clients_lock);
mcli = mctp_i2c_new_client(client);
if (IS_ERR(mcli)) {
rc = PTR_ERR(mcli);
mcli = NULL;
goto out;
} else {
list_add(&mcli->list, &driver_clients);
}
/* Add a netdev for adapters that have a 'mctp-controller' property */
i2c_for_each_dev(mcli, mctp_i2c_client_try_attach);
rc = 0;
out:
mutex_unlock(&driver_clients_lock);
return rc;
}
static void mctp_i2c_remove(struct i2c_client *client)
{
struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
mutex_lock(&driver_clients_lock);
list_del(&mcli->list);
/* Remove all child adapter netdevs */
list_for_each_entry_safe(midev, tmp, &mcli->devs, list)
mctp_i2c_unregister(midev);
mctp_i2c_free_client(mcli);
mutex_unlock(&driver_clients_lock);
}
/* We look for a 'mctp-controller' property on I2C busses as they are
* added/deleted, creating/removing netdevs as required.
*/
static int mctp_i2c_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
mctp_i2c_notify_add(dev);
break;
case BUS_NOTIFY_DEL_DEVICE:
mctp_i2c_notify_del(dev);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block mctp_i2c_notifier = {
.notifier_call = mctp_i2c_notifier_call,
};
static const struct i2c_device_id mctp_i2c_id[] = {
{ "mctp-i2c-interface", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
static const struct of_device_id mctp_i2c_of_match[] = {
{ .compatible = "mctp-i2c-controller" },
{},
};
MODULE_DEVICE_TABLE(of, mctp_i2c_of_match);
static struct i2c_driver mctp_i2c_driver = {
.driver = {
.name = "mctp-i2c-interface",
.of_match_table = mctp_i2c_of_match,
},
.probe = mctp_i2c_probe,
.remove = mctp_i2c_remove,
.id_table = mctp_i2c_id,
};
static __init int mctp_i2c_mod_init(void)
{
int rc;
pr_info("MCTP I2C interface driver\n");
rc = i2c_add_driver(&mctp_i2c_driver);
if (rc < 0)
return rc;
rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier);
if (rc < 0) {
i2c_del_driver(&mctp_i2c_driver);
return rc;
}
return 0;
}
static __exit void mctp_i2c_mod_exit(void)
{
int rc;
rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier);
if (rc < 0)
pr_warn("MCTP I2C could not unregister notifier, %d\n", rc);
i2c_del_driver(&mctp_i2c_driver);
}
module_init(mctp_i2c_mod_init);
module_exit(mctp_i2c_mod_exit);
MODULE_DESCRIPTION("MCTP I2C device");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Matt Johnston <[email protected]>");
| linux-master | drivers/net/mctp/mctp-i2c.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Management Component Transport Protocol (MCTP) - serial transport
* binding. This driver is an implementation of the DMTF specificiation
* "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport
* Binding", available at:
*
* https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf
*
* This driver provides DSP0253-type MCTP-over-serial transport using a Linux
* tty device, by setting the N_MCTP line discipline on the tty.
*
* Copyright (c) 2021 Code Construct
*/
#include <linux/idr.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/tty.h>
#include <linux/workqueue.h>
#include <linux/crc-ccitt.h>
#include <linux/mctp.h>
#include <net/mctp.h>
#include <net/pkt_sched.h>
#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */
#define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */
#define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */
#define BUFSIZE MCTP_SERIAL_FRAME_MTU
#define BYTE_FRAME 0x7e
#define BYTE_ESC 0x7d
#define FCS_INIT 0xffff
static DEFINE_IDA(mctp_serial_ida);
enum mctp_serial_state {
STATE_IDLE,
STATE_START,
STATE_HEADER,
STATE_DATA,
STATE_ESCAPE,
STATE_TRAILER,
STATE_DONE,
STATE_ERR,
};
struct mctp_serial {
struct net_device *netdev;
struct tty_struct *tty;
int idx;
/* protects our rx & tx state machines; held during both paths */
spinlock_t lock;
struct work_struct tx_work;
enum mctp_serial_state txstate, rxstate;
u16 txfcs, rxfcs, rxfcs_rcvd;
unsigned int txlen, rxlen;
unsigned int txpos, rxpos;
unsigned char txbuf[BUFSIZE],
rxbuf[BUFSIZE];
};
static bool needs_escape(unsigned char c)
{
return c == BYTE_ESC || c == BYTE_FRAME;
}
static int next_chunk_len(struct mctp_serial *dev)
{
int i;
/* either we have no bytes to send ... */
if (dev->txpos == dev->txlen)
return 0;
/* ... or the next byte to send is an escaped byte; requiring a
* single-byte chunk...
*/
if (needs_escape(dev->txbuf[dev->txpos]))
return 1;
/* ... or we have one or more bytes up to the next escape - this chunk
* will be those non-escaped bytes, and does not include the escaped
* byte.
*/
for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
break;
}
return i;
}
static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
{
return dev->tty->ops->write(dev->tty, buf, len);
}
static void mctp_serial_tx_work(struct work_struct *work)
{
struct mctp_serial *dev = container_of(work, struct mctp_serial,
tx_work);
unsigned char c, buf[3];
unsigned long flags;
int len, txlen;
spin_lock_irqsave(&dev->lock, flags);
/* txstate represents the next thing to send */
switch (dev->txstate) {
case STATE_START:
dev->txpos = 0;
fallthrough;
case STATE_HEADER:
buf[0] = BYTE_FRAME;
buf[1] = MCTP_SERIAL_VERSION;
buf[2] = dev->txlen;
if (!dev->txpos)
dev->txfcs = crc_ccitt(FCS_INIT, buf + 1, 2);
txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
if (txlen <= 0) {
dev->txstate = STATE_ERR;
} else {
dev->txpos += txlen;
if (dev->txpos == 3) {
dev->txstate = STATE_DATA;
dev->txpos = 0;
}
}
break;
case STATE_ESCAPE:
buf[0] = dev->txbuf[dev->txpos] & ~0x20;
txlen = write_chunk(dev, buf, 1);
if (txlen <= 0) {
dev->txstate = STATE_ERR;
} else {
dev->txpos += txlen;
if (dev->txpos == dev->txlen) {
dev->txstate = STATE_TRAILER;
dev->txpos = 0;
}
}
break;
case STATE_DATA:
len = next_chunk_len(dev);
if (len) {
c = dev->txbuf[dev->txpos];
if (len == 1 && needs_escape(c)) {
buf[0] = BYTE_ESC;
buf[1] = c & ~0x20;
dev->txfcs = crc_ccitt_byte(dev->txfcs, c);
txlen = write_chunk(dev, buf, 2);
if (txlen == 2)
dev->txpos++;
else if (txlen == 1)
dev->txstate = STATE_ESCAPE;
else
dev->txstate = STATE_ERR;
} else {
txlen = write_chunk(dev,
dev->txbuf + dev->txpos,
len);
if (txlen <= 0) {
dev->txstate = STATE_ERR;
} else {
dev->txfcs = crc_ccitt(dev->txfcs,
dev->txbuf +
dev->txpos,
txlen);
dev->txpos += txlen;
}
}
if (dev->txstate == STATE_DATA &&
dev->txpos == dev->txlen) {
dev->txstate = STATE_TRAILER;
dev->txpos = 0;
}
break;
}
dev->txstate = STATE_TRAILER;
dev->txpos = 0;
fallthrough;
case STATE_TRAILER:
buf[0] = dev->txfcs >> 8;
buf[1] = dev->txfcs & 0xff;
buf[2] = BYTE_FRAME;
txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
if (txlen <= 0) {
dev->txstate = STATE_ERR;
} else {
dev->txpos += txlen;
if (dev->txpos == 3) {
dev->txstate = STATE_DONE;
dev->txpos = 0;
}
}
break;
default:
netdev_err_once(dev->netdev, "invalid tx state %d\n",
dev->txstate);
}
if (dev->txstate == STATE_DONE) {
dev->netdev->stats.tx_packets++;
dev->netdev->stats.tx_bytes += dev->txlen;
dev->txlen = 0;
dev->txpos = 0;
clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
dev->txstate = STATE_IDLE;
spin_unlock_irqrestore(&dev->lock, flags);
netif_wake_queue(dev->netdev);
} else {
spin_unlock_irqrestore(&dev->lock, flags);
}
}
static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct mctp_serial *dev = netdev_priv(ndev);
unsigned long flags;
WARN_ON(dev->txstate != STATE_IDLE);
if (skb->len > MCTP_SERIAL_MTU) {
dev->netdev->stats.tx_dropped++;
goto out;
}
spin_lock_irqsave(&dev->lock, flags);
netif_stop_queue(dev->netdev);
skb_copy_bits(skb, 0, dev->txbuf, skb->len);
dev->txpos = 0;
dev->txlen = skb->len;
dev->txstate = STATE_START;
spin_unlock_irqrestore(&dev->lock, flags);
set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
schedule_work(&dev->tx_work);
out:
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void mctp_serial_tty_write_wakeup(struct tty_struct *tty)
{
struct mctp_serial *dev = tty->disc_data;
schedule_work(&dev->tx_work);
}
static void mctp_serial_rx(struct mctp_serial *dev)
{
struct mctp_skb_cb *cb;
struct sk_buff *skb;
if (dev->rxfcs != dev->rxfcs_rcvd) {
dev->netdev->stats.rx_dropped++;
dev->netdev->stats.rx_crc_errors++;
return;
}
skb = netdev_alloc_skb(dev->netdev, dev->rxlen);
if (!skb) {
dev->netdev->stats.rx_dropped++;
return;
}
skb->protocol = htons(ETH_P_MCTP);
skb_put_data(skb, dev->rxbuf, dev->rxlen);
skb_reset_network_header(skb);
cb = __mctp_cb(skb);
cb->halen = 0;
netif_rx(skb);
dev->netdev->stats.rx_packets++;
dev->netdev->stats.rx_bytes += dev->rxlen;
}
static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
{
switch (dev->rxpos) {
case 0:
if (c == BYTE_FRAME)
dev->rxpos++;
else
dev->rxstate = STATE_ERR;
break;
case 1:
if (c == MCTP_SERIAL_VERSION) {
dev->rxpos++;
dev->rxfcs = crc_ccitt_byte(FCS_INIT, c);
} else {
dev->rxstate = STATE_ERR;
}
break;
case 2:
if (c > MCTP_SERIAL_FRAME_MTU) {
dev->rxstate = STATE_ERR;
} else {
dev->rxlen = c;
dev->rxpos = 0;
dev->rxstate = STATE_DATA;
dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
}
break;
}
}
static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
{
switch (dev->rxpos) {
case 0:
dev->rxfcs_rcvd = c << 8;
dev->rxpos++;
break;
case 1:
dev->rxfcs_rcvd |= c;
dev->rxpos++;
break;
case 2:
if (c != BYTE_FRAME) {
dev->rxstate = STATE_ERR;
} else {
mctp_serial_rx(dev);
dev->rxlen = 0;
dev->rxpos = 0;
dev->rxstate = STATE_IDLE;
}
break;
}
}
static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
{
switch (dev->rxstate) {
case STATE_IDLE:
dev->rxstate = STATE_HEADER;
fallthrough;
case STATE_HEADER:
mctp_serial_push_header(dev, c);
break;
case STATE_ESCAPE:
c |= 0x20;
fallthrough;
case STATE_DATA:
if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) {
dev->rxstate = STATE_ESCAPE;
} else {
dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
dev->rxbuf[dev->rxpos] = c;
dev->rxpos++;
dev->rxstate = STATE_DATA;
if (dev->rxpos == dev->rxlen) {
dev->rxpos = 0;
dev->rxstate = STATE_TRAILER;
}
}
break;
case STATE_TRAILER:
mctp_serial_push_trailer(dev, c);
break;
case STATE_ERR:
if (c == BYTE_FRAME)
dev->rxstate = STATE_IDLE;
break;
default:
netdev_err_once(dev->netdev, "invalid rx state %d\n",
dev->rxstate);
}
}
static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
const u8 *f, size_t len)
{
struct mctp_serial *dev = tty->disc_data;
int i;
if (!netif_running(dev->netdev))
return;
/* we don't (currently) use the flag bytes, just data. */
for (i = 0; i < len; i++)
mctp_serial_push(dev, c[i]);
}
static void mctp_serial_uninit(struct net_device *ndev)
{
struct mctp_serial *dev = netdev_priv(ndev);
cancel_work_sync(&dev->tx_work);
}
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
.ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
{
ndev->type = ARPHRD_MCTP;
/* we limit at the fixed MTU, which is also the MCTP-standard
* baseline MTU, so is also our minimum
*/
ndev->mtu = MCTP_SERIAL_MTU;
ndev->max_mtu = MCTP_SERIAL_MTU;
ndev->min_mtu = MCTP_SERIAL_MTU;
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
ndev->flags = IFF_NOARP;
ndev->netdev_ops = &mctp_serial_netdev_ops;
ndev->needs_free_netdev = true;
}
static int mctp_serial_open(struct tty_struct *tty)
{
struct mctp_serial *dev;
struct net_device *ndev;
char name[32];
int idx, rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!tty->ops->write)
return -EOPNOTSUPP;
idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL);
if (idx < 0)
return idx;
snprintf(name, sizeof(name), "mctpserial%d", idx);
ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM,
mctp_serial_setup);
if (!ndev) {
rc = -ENOMEM;
goto free_ida;
}
dev = netdev_priv(ndev);
dev->idx = idx;
dev->tty = tty;
dev->netdev = ndev;
dev->txstate = STATE_IDLE;
dev->rxstate = STATE_IDLE;
spin_lock_init(&dev->lock);
INIT_WORK(&dev->tx_work, mctp_serial_tx_work);
rc = register_netdev(ndev);
if (rc)
goto free_netdev;
tty->receive_room = 64 * 1024;
tty->disc_data = dev;
return 0;
free_netdev:
free_netdev(ndev);
free_ida:
ida_free(&mctp_serial_ida, idx);
return rc;
}
static void mctp_serial_close(struct tty_struct *tty)
{
struct mctp_serial *dev = tty->disc_data;
int idx = dev->idx;
unregister_netdev(dev->netdev);
ida_free(&mctp_serial_ida, idx);
}
static struct tty_ldisc_ops mctp_ldisc = {
.owner = THIS_MODULE,
.num = N_MCTP,
.name = "mctp",
.open = mctp_serial_open,
.close = mctp_serial_close,
.receive_buf = mctp_serial_tty_receive_buf,
.write_wakeup = mctp_serial_tty_write_wakeup,
};
static int __init mctp_serial_init(void)
{
return tty_register_ldisc(&mctp_ldisc);
}
static void __exit mctp_serial_exit(void)
{
tty_unregister_ldisc(&mctp_ldisc);
}
module_init(mctp_serial_init);
module_exit(mctp_serial_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jeremy Kerr <[email protected]>");
MODULE_DESCRIPTION("MCTP Serial transport");
| linux-master | drivers/net/mctp/mctp-serial.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.