python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* general timer device for using in ISDN stacks
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mISDNif.h>
#include <linux/mutex.h>
#include <linux/sched/signal.h>
#include "core.h"
static DEFINE_MUTEX(mISDN_mutex);
static u_int *debug;
struct mISDNtimerdev {
int next_id;
struct list_head pending;
struct list_head expired;
wait_queue_head_t wait;
u_int work;
spinlock_t lock; /* protect lists */
};
struct mISDNtimer {
struct list_head list;
struct mISDNtimerdev *dev;
struct timer_list tl;
int id;
};
static int
mISDN_open(struct inode *ino, struct file *filep)
{
struct mISDNtimerdev *dev;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
dev = kmalloc(sizeof(struct mISDNtimerdev) , GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->next_id = 1;
INIT_LIST_HEAD(&dev->pending);
INIT_LIST_HEAD(&dev->expired);
spin_lock_init(&dev->lock);
dev->work = 0;
init_waitqueue_head(&dev->wait);
filep->private_data = dev;
return nonseekable_open(ino, filep);
}
static int
mISDN_close(struct inode *ino, struct file *filep)
{
struct mISDNtimerdev *dev = filep->private_data;
struct list_head *list = &dev->pending;
struct mISDNtimer *timer, *next;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
spin_lock_irq(&dev->lock);
while (!list_empty(list)) {
timer = list_first_entry(list, struct mISDNtimer, list);
spin_unlock_irq(&dev->lock);
timer_shutdown_sync(&timer->tl);
spin_lock_irq(&dev->lock);
/* it might have been moved to ->expired */
list_del(&timer->list);
kfree(timer);
}
spin_unlock_irq(&dev->lock);
list_for_each_entry_safe(timer, next, &dev->expired, list) {
kfree(timer);
}
kfree(dev);
return 0;
}
static ssize_t
mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
{
struct mISDNtimerdev *dev = filep->private_data;
struct list_head *list = &dev->expired;
struct mISDNtimer *timer;
int ret = 0;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
filep, buf, (int)count, off);
if (count < sizeof(int))
return -ENOSPC;
spin_lock_irq(&dev->lock);
while (list_empty(list) && (dev->work == 0)) {
spin_unlock_irq(&dev->lock);
if (filep->f_flags & O_NONBLOCK)
return -EAGAIN;
wait_event_interruptible(dev->wait, (dev->work ||
!list_empty(list)));
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&dev->lock);
}
if (dev->work)
dev->work = 0;
if (!list_empty(list)) {
timer = list_first_entry(list, struct mISDNtimer, list);
list_del(&timer->list);
spin_unlock_irq(&dev->lock);
if (put_user(timer->id, (int __user *)buf))
ret = -EFAULT;
else
ret = sizeof(int);
kfree(timer);
} else {
spin_unlock_irq(&dev->lock);
}
return ret;
}
static __poll_t
mISDN_poll(struct file *filep, poll_table *wait)
{
struct mISDNtimerdev *dev = filep->private_data;
__poll_t mask = EPOLLERR;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
if (dev) {
poll_wait(filep, &dev->wait, wait);
mask = 0;
if (dev->work || !list_empty(&dev->expired))
mask |= (EPOLLIN | EPOLLRDNORM);
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
dev->work, list_empty(&dev->expired));
}
return mask;
}
static void
dev_expire_timer(struct timer_list *t)
{
struct mISDNtimer *timer = from_timer(timer, t, tl);
u_long flags;
spin_lock_irqsave(&timer->dev->lock, flags);
if (timer->id >= 0)
list_move_tail(&timer->list, &timer->dev->expired);
wake_up_interruptible(&timer->dev->wait);
spin_unlock_irqrestore(&timer->dev->lock, flags);
}
static int
misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
{
int id;
struct mISDNtimer *timer;
if (!timeout) {
dev->work = 1;
wake_up_interruptible(&dev->wait);
id = 0;
} else {
timer = kzalloc(sizeof(struct mISDNtimer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->dev = dev;
timer_setup(&timer->tl, dev_expire_timer, 0);
spin_lock_irq(&dev->lock);
id = timer->id = dev->next_id++;
if (dev->next_id < 0)
dev->next_id = 1;
list_add_tail(&timer->list, &dev->pending);
timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
add_timer(&timer->tl);
spin_unlock_irq(&dev->lock);
}
return id;
}
static int
misdn_del_timer(struct mISDNtimerdev *dev, int id)
{
struct mISDNtimer *timer;
spin_lock_irq(&dev->lock);
list_for_each_entry(timer, &dev->pending, list) {
if (timer->id == id) {
list_del_init(&timer->list);
timer->id = -1;
spin_unlock_irq(&dev->lock);
timer_shutdown_sync(&timer->tl);
kfree(timer);
return id;
}
}
spin_unlock_irq(&dev->lock);
return 0;
}
static long
mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct mISDNtimerdev *dev = filep->private_data;
int id, tout, ret = 0;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
filep, cmd, arg);
mutex_lock(&mISDN_mutex);
switch (cmd) {
case IMADDTIMER:
if (get_user(tout, (int __user *)arg)) {
ret = -EFAULT;
break;
}
id = misdn_add_timer(dev, tout);
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s add %d id %d\n", __func__,
tout, id);
if (id < 0) {
ret = id;
break;
}
if (put_user(id, (int __user *)arg))
ret = -EFAULT;
break;
case IMDELTIMER:
if (get_user(id, (int __user *)arg)) {
ret = -EFAULT;
break;
}
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s del id %d\n", __func__, id);
id = misdn_del_timer(dev, id);
if (put_user(id, (int __user *)arg))
ret = -EFAULT;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&mISDN_mutex);
return ret;
}
static const struct file_operations mISDN_fops = {
.owner = THIS_MODULE,
.read = mISDN_read,
.poll = mISDN_poll,
.unlocked_ioctl = mISDN_ioctl,
.open = mISDN_open,
.release = mISDN_close,
.llseek = no_llseek,
};
static struct miscdevice mISDNtimer = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mISDNtimer",
.fops = &mISDN_fops,
};
int
mISDN_inittimer(u_int *deb)
{
int err;
debug = deb;
err = misc_register(&mISDNtimer);
if (err)
printk(KERN_WARNING "mISDN: Could not register timer device\n");
return err;
}
void mISDN_timer_cleanup(void)
{
misc_deregister(&mISDNtimer);
}
| linux-master | drivers/isdn/mISDN/timerdev.c |
/*
* Blowfish encryption/decryption for mISDN_dsp.
*
* Copyright Andreas Eversberg ([email protected])
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
/*
* how to encode a sample stream to 64-bit blocks that will be encryped
*
* first of all, data is collected until a block of 9 samples are received.
* of course, a packet may have much more than 9 sample, but is may have
* not excacly the multiple of 9 samples. if there is a rest, the next
* received data will complete the block.
*
* the block is then converted to 9 uLAW samples without the least sigificant
* bit. the result is a 7-bit encoded sample.
*
* the samples will be reoganised to form 8 bytes of data:
* (5(6) means: encoded sample no. 5, bit 6)
*
* 0(6) 0(5) 0(4) 0(3) 0(2) 0(1) 0(0) 1(6)
* 1(5) 1(4) 1(3) 1(2) 1(1) 1(0) 2(6) 2(5)
* 2(4) 2(3) 2(2) 2(1) 2(0) 3(6) 3(5) 3(4)
* 3(3) 3(2) 3(1) 3(0) 4(6) 4(5) 4(4) 4(3)
* 4(2) 4(1) 4(0) 5(6) 5(5) 5(4) 5(3) 5(2)
* 5(1) 5(0) 6(6) 6(5) 6(4) 6(3) 6(2) 6(1)
* 6(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
* 8(6) 8(5) 8(4) 8(3) 8(2) 8(1) 8(0)
*
* the missing bit 0 of the last byte is filled with some
* random noise, to fill all 8 bytes.
*
* the 8 bytes will be encrypted using blowfish.
*
* the result will be converted into 9 bytes. the bit 7 is used for
* checksumme (CS) for sync (0, 1) and for the last bit:
* (5(6) means: crypted byte 5, bit 6)
*
* 1 0(7) 0(6) 0(5) 0(4) 0(3) 0(2) 0(1)
* 0 0(0) 1(7) 1(6) 1(5) 1(4) 1(3) 1(2)
* 0 1(1) 1(0) 2(7) 2(6) 2(5) 2(4) 2(3)
* 0 2(2) 2(1) 2(0) 3(7) 3(6) 3(5) 3(4)
* 0 3(3) 3(2) 3(1) 3(0) 4(7) 4(6) 4(5)
* CS 4(4) 4(3) 4(2) 4(1) 4(0) 5(7) 5(6)
* CS 5(5) 5(4) 5(3) 5(2) 5(1) 5(0) 6(7)
* CS 6(6) 6(5) 6(4) 6(3) 6(2) 6(1) 6(0)
* 7(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
*
* the checksum is used to detect transmission errors and frame drops.
*
* synchronisation of received block is done by shifting the upper bit of each
* byte (bit 7) to a shift register. if the rigister has the first five bits
* (10000), this is used to find the sync. only if sync has been found, the
* current block of 9 received bytes are decrypted. before that the check
* sum is calculated. if it is incorrect the block is dropped.
* this will avoid loud noise due to corrupt encrypted data.
*
* if the last block is corrupt, the current decoded block is repeated
* until a valid block has been received.
*/
/*
* some blowfish parts are taken from the
* crypto-api for faster implementation
*/
struct bf_ctx {
u32 p[18];
u32 s[1024];
};
static const u32 bf_pbox[16 + 2] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
0x9216d5d9, 0x8979fb1b,
};
static const u32 bf_sbox[256 * 4] = {
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
};
/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
#define GET32_3(x) (((x) & 0xff))
#define GET32_2(x) (((x) >> (8)) & (0xff))
#define GET32_1(x) (((x) >> (16)) & (0xff))
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define EROUND(a, b, n) do { b ^= P[n]; a ^= bf_F(b); } while (0)
#define DROUND(a, b, n) do { a ^= bf_F(b); b ^= P[n]; } while (0)
/*
* encrypt isdn data frame
* every block with 9 samples is encrypted
*/
void
dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len)
{
int i = 0, j = dsp->bf_crypt_pos;
u8 *bf_data_in = dsp->bf_data_in;
u8 *bf_crypt_out = dsp->bf_crypt_out;
u32 *P = dsp->bf_p;
u32 *S = dsp->bf_s;
u32 yl, yr;
u32 cs;
u8 nibble;
while (i < len) {
/* collect a block of 9 samples */
if (j < 9) {
bf_data_in[j] = *data;
*data++ = bf_crypt_out[j++];
i++;
continue;
}
j = 0;
/* transcode 9 samples xlaw to 8 bytes */
yl = dsp_audio_law2seven[bf_data_in[0]];
yl = (yl << 7) | dsp_audio_law2seven[bf_data_in[1]];
yl = (yl << 7) | dsp_audio_law2seven[bf_data_in[2]];
yl = (yl << 7) | dsp_audio_law2seven[bf_data_in[3]];
nibble = dsp_audio_law2seven[bf_data_in[4]];
yr = nibble;
yl = (yl << 4) | (nibble >> 3);
yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[5]];
yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[6]];
yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[7]];
yr = (yr << 7) | dsp_audio_law2seven[bf_data_in[8]];
yr = (yr << 1) | (bf_data_in[0] & 1);
/* fill unused bit with random noise of audio input */
/* encrypt */
EROUND(yr, yl, 0);
EROUND(yl, yr, 1);
EROUND(yr, yl, 2);
EROUND(yl, yr, 3);
EROUND(yr, yl, 4);
EROUND(yl, yr, 5);
EROUND(yr, yl, 6);
EROUND(yl, yr, 7);
EROUND(yr, yl, 8);
EROUND(yl, yr, 9);
EROUND(yr, yl, 10);
EROUND(yl, yr, 11);
EROUND(yr, yl, 12);
EROUND(yl, yr, 13);
EROUND(yr, yl, 14);
EROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
/* calculate 3-bit checksumme */
cs = yl ^ (yl >> 3) ^ (yl >> 6) ^ (yl >> 9) ^ (yl >> 12) ^ (yl >> 15)
^ (yl >> 18) ^ (yl >> 21) ^ (yl >> 24) ^ (yl >> 27) ^ (yl >> 30)
^ (yr << 2) ^ (yr >> 1) ^ (yr >> 4) ^ (yr >> 7) ^ (yr >> 10)
^ (yr >> 13) ^ (yr >> 16) ^ (yr >> 19) ^ (yr >> 22) ^ (yr >> 25)
^ (yr >> 28) ^ (yr >> 31);
/*
* transcode 8 crypted bytes to 9 data bytes with sync
* and checksum information
*/
bf_crypt_out[0] = (yl >> 25) | 0x80;
bf_crypt_out[1] = (yl >> 18) & 0x7f;
bf_crypt_out[2] = (yl >> 11) & 0x7f;
bf_crypt_out[3] = (yl >> 4) & 0x7f;
bf_crypt_out[4] = ((yl << 3) & 0x78) | ((yr >> 29) & 0x07);
bf_crypt_out[5] = ((yr >> 22) & 0x7f) | ((cs << 5) & 0x80);
bf_crypt_out[6] = ((yr >> 15) & 0x7f) | ((cs << 6) & 0x80);
bf_crypt_out[7] = ((yr >> 8) & 0x7f) | (cs << 7);
bf_crypt_out[8] = yr;
}
/* write current count */
dsp->bf_crypt_pos = j;
}
/*
* decrypt isdn data frame
* every block with 9 bytes is decrypted
*/
void
dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len)
{
int i = 0;
u8 j = dsp->bf_decrypt_in_pos;
u8 k = dsp->bf_decrypt_out_pos;
u8 *bf_crypt_inring = dsp->bf_crypt_inring;
u8 *bf_data_out = dsp->bf_data_out;
u16 sync = dsp->bf_sync;
u32 *P = dsp->bf_p;
u32 *S = dsp->bf_s;
u32 yl, yr;
u8 nibble;
u8 cs, cs0, cs1, cs2;
while (i < len) {
/*
* shift upper bit and rotate data to buffer ring
* send current decrypted data
*/
sync = (sync << 1) | ((*data) >> 7);
bf_crypt_inring[j++ & 15] = *data;
*data++ = bf_data_out[k++];
i++;
if (k == 9)
k = 0; /* repeat if no sync has been found */
/* check if not in sync */
if ((sync & 0x1f0) != 0x100)
continue;
j -= 9;
/* transcode receive data to 64 bit block of encrypted data */
yl = bf_crypt_inring[j++ & 15];
yl = (yl << 7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
yl = (yl << 7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
yl = (yl << 7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
nibble = bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
yr = nibble;
yl = (yl << 4) | (nibble >> 3);
cs2 = bf_crypt_inring[j++ & 15];
yr = (yr << 7) | (cs2 & 0x7f);
cs1 = bf_crypt_inring[j++ & 15];
yr = (yr << 7) | (cs1 & 0x7f);
cs0 = bf_crypt_inring[j++ & 15];
yr = (yr << 7) | (cs0 & 0x7f);
yr = (yr << 8) | bf_crypt_inring[j++ & 15];
/* calculate 3-bit checksumme */
cs = yl ^ (yl >> 3) ^ (yl >> 6) ^ (yl >> 9) ^ (yl >> 12) ^ (yl >> 15)
^ (yl >> 18) ^ (yl >> 21) ^ (yl >> 24) ^ (yl >> 27) ^ (yl >> 30)
^ (yr << 2) ^ (yr >> 1) ^ (yr >> 4) ^ (yr >> 7) ^ (yr >> 10)
^ (yr >> 13) ^ (yr >> 16) ^ (yr >> 19) ^ (yr >> 22) ^ (yr >> 25)
^ (yr >> 28) ^ (yr >> 31);
/* check if frame is valid */
if ((cs & 0x7) != (((cs2 >> 5) & 4) | ((cs1 >> 6) & 2) | (cs0 >> 7))) {
if (dsp_debug & DEBUG_DSP_BLOWFISH)
printk(KERN_DEBUG
"DSP BLOWFISH: received corrupt frame, "
"checksumme is not correct\n");
continue;
}
/* decrypt */
yr ^= P[17];
yl ^= P[16];
DROUND(yl, yr, 15);
DROUND(yr, yl, 14);
DROUND(yl, yr, 13);
DROUND(yr, yl, 12);
DROUND(yl, yr, 11);
DROUND(yr, yl, 10);
DROUND(yl, yr, 9);
DROUND(yr, yl, 8);
DROUND(yl, yr, 7);
DROUND(yr, yl, 6);
DROUND(yl, yr, 5);
DROUND(yr, yl, 4);
DROUND(yl, yr, 3);
DROUND(yr, yl, 2);
DROUND(yl, yr, 1);
DROUND(yr, yl, 0);
/* transcode 8 crypted bytes to 9 sample bytes */
bf_data_out[0] = dsp_audio_seven2law[(yl >> 25) & 0x7f];
bf_data_out[1] = dsp_audio_seven2law[(yl >> 18) & 0x7f];
bf_data_out[2] = dsp_audio_seven2law[(yl >> 11) & 0x7f];
bf_data_out[3] = dsp_audio_seven2law[(yl >> 4) & 0x7f];
bf_data_out[4] = dsp_audio_seven2law[((yl << 3) & 0x78) |
((yr >> 29) & 0x07)];
bf_data_out[5] = dsp_audio_seven2law[(yr >> 22) & 0x7f];
bf_data_out[6] = dsp_audio_seven2law[(yr >> 15) & 0x7f];
bf_data_out[7] = dsp_audio_seven2law[(yr >> 8) & 0x7f];
bf_data_out[8] = dsp_audio_seven2law[(yr >> 1) & 0x7f];
k = 0; /* start with new decoded frame */
}
/* write current count and sync */
dsp->bf_decrypt_in_pos = j;
dsp->bf_decrypt_out_pos = k;
dsp->bf_sync = sync;
}
/* used to encrypt S and P boxes */
static inline void
encrypt_block(const u32 *P, const u32 *S, u32 *dst, u32 *src)
{
u32 yl = src[0];
u32 yr = src[1];
EROUND(yr, yl, 0);
EROUND(yl, yr, 1);
EROUND(yr, yl, 2);
EROUND(yl, yr, 3);
EROUND(yr, yl, 4);
EROUND(yl, yr, 5);
EROUND(yr, yl, 6);
EROUND(yl, yr, 7);
EROUND(yr, yl, 8);
EROUND(yl, yr, 9);
EROUND(yr, yl, 10);
EROUND(yl, yr, 11);
EROUND(yr, yl, 12);
EROUND(yl, yr, 13);
EROUND(yr, yl, 14);
EROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
dst[0] = yr;
dst[1] = yl;
}
/*
* initialize the dsp for encryption and decryption using the same key
* Calculates the blowfish S and P boxes for encryption and decryption.
* The margin of keylen must be 4-56 bytes.
* returns 0 if ok.
*/
int
dsp_bf_init(struct dsp *dsp, const u8 *key, uint keylen)
{
short i, j, count;
u32 data[2], temp;
u32 *P = (u32 *)dsp->bf_p;
u32 *S = (u32 *)dsp->bf_s;
if (keylen < 4 || keylen > 56)
return 1;
/* Set dsp states */
i = 0;
while (i < 9) {
dsp->bf_crypt_out[i] = 0xff;
dsp->bf_data_out[i] = dsp_silence;
i++;
}
dsp->bf_crypt_pos = 0;
dsp->bf_decrypt_in_pos = 0;
dsp->bf_decrypt_out_pos = 0;
dsp->bf_sync = 0x1ff;
dsp->bf_enable = 1;
/* Copy the initialization s-boxes */
for (i = 0, count = 0; i < 256; i++)
for (j = 0; j < 4; j++, count++)
S[count] = bf_sbox[count];
/* Set the p-boxes */
for (i = 0; i < 16 + 2; i++)
P[i] = bf_pbox[i];
/* Actual subkey generation */
for (j = 0, i = 0; i < 16 + 2; i++) {
temp = (((u32)key[j] << 24) |
((u32)key[(j + 1) % keylen] << 16) |
((u32)key[(j + 2) % keylen] << 8) |
((u32)key[(j + 3) % keylen]));
P[i] = P[i] ^ temp;
j = (j + 4) % keylen;
}
data[0] = 0x00000000;
data[1] = 0x00000000;
for (i = 0; i < 16 + 2; i += 2) {
encrypt_block(P, S, data, data);
P[i] = data[0];
P[i + 1] = data[1];
}
for (i = 0; i < 4; i++) {
for (j = 0, count = i * 256; j < 256; j += 2, count += 2) {
encrypt_block(P, S, data, data);
S[count] = data[0];
S[count + 1] = data[1];
}
}
return 0;
}
/*
* turn encryption off
*/
void
dsp_bf_cleanup(struct dsp *dsp)
{
dsp->bf_enable = 0;
}
| linux-master | drivers/isdn/mISDN/dsp_blowfish.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mISDNhw.h>
#include "core.h"
#include "layer1.h"
#include "fsm.h"
static u_int *debug;
struct layer1 {
u_long Flags;
struct FsmInst l1m;
struct FsmTimer timer3;
struct FsmTimer timerX;
int delay;
int t3_value;
struct dchannel *dch;
dchannel_l1callback *dcb;
};
#define TIMER3_DEFAULT_VALUE 7000
static
struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
enum {
ST_L1_F2,
ST_L1_F3,
ST_L1_F4,
ST_L1_F5,
ST_L1_F6,
ST_L1_F7,
ST_L1_F8,
};
#define L1S_STATE_COUNT (ST_L1_F8 + 1)
static char *strL1SState[] =
{
"ST_L1_F2",
"ST_L1_F3",
"ST_L1_F4",
"ST_L1_F5",
"ST_L1_F6",
"ST_L1_F7",
"ST_L1_F8",
};
enum {
EV_PH_ACTIVATE,
EV_PH_DEACTIVATE,
EV_RESET_IND,
EV_DEACT_CNF,
EV_DEACT_IND,
EV_POWER_UP,
EV_ANYSIG_IND,
EV_INFO2_IND,
EV_INFO4_IND,
EV_TIMER_DEACT,
EV_TIMER_ACT,
EV_TIMER3,
};
#define L1_EVENT_COUNT (EV_TIMER3 + 1)
static char *strL1Event[] =
{
"EV_PH_ACTIVATE",
"EV_PH_DEACTIVATE",
"EV_RESET_IND",
"EV_DEACT_CNF",
"EV_DEACT_IND",
"EV_POWER_UP",
"EV_ANYSIG_IND",
"EV_INFO2_IND",
"EV_INFO4_IND",
"EV_TIMER_DEACT",
"EV_TIMER_ACT",
"EV_TIMER3",
};
static void
l1m_debug(struct FsmInst *fi, char *fmt, ...)
{
struct layer1 *l1 = fi->userdata;
struct va_format vaf;
va_list va;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
va_end(va);
}
static void
l1_reset(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_F3);
}
static void
l1_deact_cnf(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F3);
if (test_bit(FLG_L1_ACTIVATING, &l1->Flags))
l1->dcb(l1->dch, HW_POWERUP_REQ);
}
static void
l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F3);
mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2);
test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
}
static void
l1_power_up_s(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
mISDN_FsmChangeState(fi, ST_L1_F4);
l1->dcb(l1->dch, INFO3_P8);
} else
mISDN_FsmChangeState(fi, ST_L1_F3);
}
static void
l1_go_F5(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_F5);
}
static void
l1_go_F8(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_F8);
}
static void
l1_info2_ind(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F6);
l1->dcb(l1->dch, INFO3_P8);
}
static void
l1_info4_ind(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F7);
l1->dcb(l1->dch, INFO3_P8);
if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
mISDN_FsmDelTimer(&l1->timerX, 4);
if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
mISDN_FsmDelTimer(&l1->timer3, 3);
mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2);
test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
}
}
static void
l1_timer3(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags);
if (test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
l1->dcb(l1->dch, HW_D_NOBLOCKED);
l1->dcb(l1->dch, PH_DEACTIVATE_IND);
}
if (l1->l1m.state != ST_L1_F6) {
mISDN_FsmChangeState(fi, ST_L1_F3);
/* do not force anything here, we need send INFO 0 */
}
}
static void
l1_timer_act(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
test_and_clear_bit(FLG_L1_ACTTIMER, &l1->Flags);
test_and_set_bit(FLG_L1_ACTIVATED, &l1->Flags);
l1->dcb(l1->dch, PH_ACTIVATE_IND);
}
static void
l1_timer_deact(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags);
test_and_clear_bit(FLG_L1_ACTIVATED, &l1->Flags);
if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
l1->dcb(l1->dch, HW_D_NOBLOCKED);
l1->dcb(l1->dch, PH_DEACTIVATE_IND);
l1->dcb(l1->dch, HW_DEACT_REQ);
}
static void
l1_activate_s(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2);
test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
/* Tell HW to send INFO 1 */
l1->dcb(l1->dch, HW_RESET_REQ);
}
static void
l1_activate_no(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
if ((!test_bit(FLG_L1_DEACTTIMER, &l1->Flags)) &&
(!test_bit(FLG_L1_T3RUN, &l1->Flags))) {
test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags);
if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
l1->dcb(l1->dch, HW_D_NOBLOCKED);
l1->dcb(l1->dch, PH_DEACTIVATE_IND);
}
}
static struct FsmNode L1SFnList[] =
{
{ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s},
{ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no},
{ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no},
{ST_L1_F3, EV_RESET_IND, l1_reset},
{ST_L1_F4, EV_RESET_IND, l1_reset},
{ST_L1_F5, EV_RESET_IND, l1_reset},
{ST_L1_F6, EV_RESET_IND, l1_reset},
{ST_L1_F7, EV_RESET_IND, l1_reset},
{ST_L1_F8, EV_RESET_IND, l1_reset},
{ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F6, EV_DEACT_IND, l1_deact_req_s},
{ST_L1_F7, EV_DEACT_IND, l1_deact_req_s},
{ST_L1_F8, EV_DEACT_IND, l1_deact_req_s},
{ST_L1_F3, EV_POWER_UP, l1_power_up_s},
{ST_L1_F4, EV_ANYSIG_IND, l1_go_F5},
{ST_L1_F6, EV_ANYSIG_IND, l1_go_F8},
{ST_L1_F7, EV_ANYSIG_IND, l1_go_F8},
{ST_L1_F3, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F4, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F5, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F7, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F8, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F3, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F4, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F5, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F6, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F8, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F3, EV_TIMER3, l1_timer3},
{ST_L1_F4, EV_TIMER3, l1_timer3},
{ST_L1_F5, EV_TIMER3, l1_timer3},
{ST_L1_F6, EV_TIMER3, l1_timer3},
{ST_L1_F8, EV_TIMER3, l1_timer3},
{ST_L1_F7, EV_TIMER_ACT, l1_timer_act},
{ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact},
};
static void
release_l1(struct layer1 *l1) {
mISDN_FsmDelTimer(&l1->timerX, 0);
mISDN_FsmDelTimer(&l1->timer3, 0);
if (l1->dch)
l1->dch->l1 = NULL;
module_put(THIS_MODULE);
kfree(l1);
}
int
l1_event(struct layer1 *l1, u_int event)
{
int err = 0;
if (!l1)
return -EINVAL;
switch (event) {
case HW_RESET_IND:
mISDN_FsmEvent(&l1->l1m, EV_RESET_IND, NULL);
break;
case HW_DEACT_IND:
mISDN_FsmEvent(&l1->l1m, EV_DEACT_IND, NULL);
break;
case HW_POWERUP_IND:
mISDN_FsmEvent(&l1->l1m, EV_POWER_UP, NULL);
break;
case HW_DEACT_CNF:
mISDN_FsmEvent(&l1->l1m, EV_DEACT_CNF, NULL);
break;
case ANYSIGNAL:
mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
break;
case LOSTFRAMING:
mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
break;
case INFO2:
mISDN_FsmEvent(&l1->l1m, EV_INFO2_IND, NULL);
break;
case INFO4_P8:
mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
break;
case INFO4_P10:
mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
break;
case PH_ACTIVATE_REQ:
if (test_bit(FLG_L1_ACTIVATED, &l1->Flags))
l1->dcb(l1->dch, PH_ACTIVATE_IND);
else {
test_and_set_bit(FLG_L1_ACTIVATING, &l1->Flags);
mISDN_FsmEvent(&l1->l1m, EV_PH_ACTIVATE, NULL);
}
break;
case CLOSE_CHANNEL:
release_l1(l1);
break;
default:
if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) {
int val = event & HW_TIMER3_VMASK;
if (val < 5)
val = 5;
if (val > 30)
val = 30;
l1->t3_value = val;
break;
}
if (*debug & DEBUG_L1)
printk(KERN_DEBUG "%s %x unhandled\n",
__func__, event);
err = -EINVAL;
}
return err;
}
EXPORT_SYMBOL(l1_event);
int
create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
struct layer1 *nl1;
nl1 = kzalloc(sizeof(struct layer1), GFP_ATOMIC);
if (!nl1) {
printk(KERN_ERR "kmalloc struct layer1 failed\n");
return -ENOMEM;
}
nl1->l1m.fsm = &l1fsm_s;
nl1->l1m.state = ST_L1_F3;
nl1->Flags = 0;
nl1->t3_value = TIMER3_DEFAULT_VALUE;
nl1->l1m.debug = *debug & DEBUG_L1_FSM;
nl1->l1m.userdata = nl1;
nl1->l1m.userint = 0;
nl1->l1m.printdebug = l1m_debug;
nl1->dch = dch;
nl1->dcb = dcb;
mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3);
mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX);
__module_get(THIS_MODULE);
dch->l1 = nl1;
return 0;
}
EXPORT_SYMBOL(create_l1);
int
Isdnl1_Init(u_int *deb)
{
debug = deb;
l1fsm_s.state_count = L1S_STATE_COUNT;
l1fsm_s.event_count = L1_EVENT_COUNT;
l1fsm_s.strEvent = strL1Event;
l1fsm_s.strState = strL1SState;
return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
}
void
Isdnl1_cleanup(void)
{
mISDN_FsmFree(&l1fsm_s);
}
| linux-master | drivers/isdn/mISDN/layer1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/mISDNif.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "core.h"
static u_int *debug;
static struct proto mISDN_proto = {
.name = "misdn",
.owner = THIS_MODULE,
.obj_size = sizeof(struct mISDN_sock)
};
#define _pms(sk) ((struct mISDN_sock *)sk)
static struct mISDN_sock_list data_sockets = {
.lock = __RW_LOCK_UNLOCKED(data_sockets.lock)
};
static struct mISDN_sock_list base_sockets = {
.lock = __RW_LOCK_UNLOCKED(base_sockets.lock)
};
#define L2_HEADER_LEN 4
static inline struct sk_buff *
_l2_alloc_skb(unsigned int len, gfp_t gfp_mask)
{
struct sk_buff *skb;
skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
if (likely(skb))
skb_reserve(skb, L2_HEADER_LEN);
return skb;
}
static void
mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_add_node(sk, &l->head);
write_unlock_bh(&l->lock);
}
static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_del_node_init(sk);
write_unlock_bh(&l->lock);
}
static int
mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDN_sock *msk;
int err;
msk = container_of(ch, struct mISDN_sock, ch);
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb);
if (msk->sk.sk_state == MISDN_CLOSED)
return -EUNATCH;
__net_timestamp(skb);
err = sock_queue_rcv_skb(&msk->sk, skb);
if (err)
printk(KERN_WARNING "%s: error %d\n", __func__, err);
return err;
}
static int
mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDN_sock *msk;
msk = container_of(ch, struct mISDN_sock, ch);
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
msk->sk.sk_state = MISDN_CLOSED;
break;
}
return 0;
}
static inline void
mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
{
struct __kernel_old_timeval tv;
if (_pms(sk)->cmask & MISDN_TIME_STAMP) {
skb_get_timestamp(skb, &tv);
put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv);
}
}
static int
mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
int copied, err;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
__func__, (int)len, flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == MISDN_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
return err;
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT)) {
maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
} else {
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xFF;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
}
msg->msg_namelen = sizeof(*maddr);
}
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
refcount_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
}
memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
MISDN_HEADER_LEN);
err = skb_copy_datagram_msg(skb, 0, msg, copied);
mISDN_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
}
static int
mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int err = -ENOMEM;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n",
__func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr,
sk->sk_protocol);
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE))
return -EINVAL;
if (len < MISDN_HEADER_LEN)
return -EINVAL;
if (sk->sk_state != MISDN_BOUND)
return -EBADFD;
lock_sock(sk);
skb = _l2_alloc_skb(len, GFP_KERNEL);
if (!skb)
goto done;
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
goto done;
}
memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN);
skb_pull(skb, MISDN_HEADER_LEN);
if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
/* if we have a address, we use it */
DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name);
mISDN_HEAD_ID(skb) = maddr->channel;
} else { /* use default for L2 messages */
if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
(sk->sk_protocol == ISDN_P_LAPD_NT))
mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
}
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s: ID:%x\n",
__func__, mISDN_HEAD_ID(skb));
err = -ENODEV;
if (!_pms(sk)->ch.peer)
goto done;
err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb);
if (err)
goto done;
else {
skb = NULL;
err = len;
}
done:
kfree_skb(skb);
release_sock(sk);
return err;
}
static int
data_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
if (!sk)
return 0;
switch (sk->sk_protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
if (sk->sk_state == MISDN_BOUND)
delete_channel(&_pms(sk)->ch);
else
mISDN_sock_unlink(&data_sockets, sk);
break;
case ISDN_P_LAPD_TE:
case ISDN_P_LAPD_NT:
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
case ISDN_P_B_X75SLP:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
delete_channel(&_pms(sk)->ch);
mISDN_sock_unlink(&data_sockets, sk);
break;
}
lock_sock(sk);
sock_orphan(sk);
skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
return 0;
}
static int
data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
{
struct mISDN_ctrl_req cq;
int err = -EINVAL, val[2];
struct mISDNchannel *bchan, *next;
lock_sock(sk);
if (!_pms(sk)->dev) {
err = -ENODEV;
goto done;
}
switch (cmd) {
case IMCTRLREQ:
if (copy_from_user(&cq, p, sizeof(cq))) {
err = -EFAULT;
break;
}
if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) {
list_for_each_entry_safe(bchan, next,
&_pms(sk)->dev->bchannels, list) {
if (bchan->nr == cq.channel) {
err = bchan->ctrl(bchan,
CONTROL_CHANNEL, &cq);
break;
}
}
} else
err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D,
CONTROL_CHANNEL, &cq);
if (err)
break;
if (copy_to_user(p, &cq, sizeof(cq)))
err = -EFAULT;
break;
case IMCLEAR_L2:
if (sk->sk_protocol != ISDN_P_LAPD_NT) {
err = -EINVAL;
break;
}
val[0] = cmd;
if (get_user(val[1], (int __user *)p)) {
err = -EFAULT;
break;
}
err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
CONTROL_CHANNEL, val);
break;
case IMHOLD_L1:
if (sk->sk_protocol != ISDN_P_LAPD_NT
&& sk->sk_protocol != ISDN_P_LAPD_TE) {
err = -EINVAL;
break;
}
val[0] = cmd;
if (get_user(val[1], (int __user *)p)) {
err = -EFAULT;
break;
}
err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
CONTROL_CHANNEL, val);
break;
default:
err = -EINVAL;
break;
}
done:
release_sock(sk);
return err;
}
static int
data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int err = 0, id;
struct sock *sk = sock->sk;
struct mISDNdevice *dev;
struct mISDNversion ver;
switch (cmd) {
case IMGETVERSION:
ver.major = MISDN_MAJOR_VERSION;
ver.minor = MISDN_MINOR_VERSION;
ver.release = MISDN_RELEASE;
if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
err = -EFAULT;
break;
case IMGETCOUNT:
id = get_mdevice_count();
if (put_user(id, (int __user *)arg))
err = -EFAULT;
break;
case IMGETDEVINFO:
if (get_user(id, (int __user *)arg)) {
err = -EFAULT;
break;
}
dev = get_mdevice(id);
if (dev) {
struct mISDN_devinfo di;
memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
di.protocol = dev->D.protocol;
memcpy(di.channelmap, dev->channelmap,
sizeof(di.channelmap));
di.nrbchan = dev->nrbchan;
strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
err = -EFAULT;
} else
err = -ENODEV;
break;
default:
if (sk->sk_state == MISDN_BOUND)
err = data_sock_ioctl_bound(sk, cmd,
(void __user *)arg);
else
err = -ENOTCONN;
}
return err;
}
static int data_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int len)
{
struct sock *sk = sock->sk;
int err = 0, opt = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
level, optname, len);
lock_sock(sk);
switch (optname) {
case MISDN_TIME_STAMP:
if (copy_from_sockptr(&opt, optval, sizeof(int))) {
err = -EFAULT;
break;
}
if (opt)
_pms(sk)->cmask |= MISDN_TIME_STAMP;
else
_pms(sk)->cmask &= ~MISDN_TIME_STAMP;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int data_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, opt;
if (get_user(len, optlen))
return -EFAULT;
if (len != sizeof(char))
return -EINVAL;
switch (optname) {
case MISDN_TIME_STAMP:
if (_pms(sk)->cmask & MISDN_TIME_STAMP)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
return -EFAULT;
break;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int
data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
struct sock *csk;
int err = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
if (addr_len != sizeof(struct sockaddr_mISDN))
return -EINVAL;
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
if (_pms(sk)->dev) {
err = -EALREADY;
goto done;
}
_pms(sk)->dev = get_mdevice(maddr->dev);
if (!_pms(sk)->dev) {
err = -ENODEV;
goto done;
}
if (sk->sk_protocol < ISDN_P_B_START) {
read_lock_bh(&data_sockets.lock);
sk_for_each(csk, &data_sockets.head) {
if (sk == csk)
continue;
if (_pms(csk)->dev != _pms(sk)->dev)
continue;
if (csk->sk_protocol >= ISDN_P_B_START)
continue;
if (IS_ISDN_P_TE(csk->sk_protocol)
== IS_ISDN_P_TE(sk->sk_protocol))
continue;
read_unlock_bh(&data_sockets.lock);
err = -EBUSY;
goto done;
}
read_unlock_bh(&data_sockets.lock);
}
_pms(sk)->ch.send = mISDN_send;
_pms(sk)->ch.ctrl = mISDN_ctrl;
switch (sk->sk_protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
mISDN_sock_unlink(&data_sockets, sk);
err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch,
sk->sk_protocol, maddr);
if (err)
mISDN_sock_link(&data_sockets, sk);
break;
case ISDN_P_LAPD_TE:
case ISDN_P_LAPD_NT:
err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch,
sk->sk_protocol, maddr);
break;
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
case ISDN_P_B_X75SLP:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch,
sk->sk_protocol, maddr);
break;
default:
err = -EPROTONOSUPPORT;
}
if (err)
goto done;
sk->sk_state = MISDN_BOUND;
_pms(sk)->ch.protocol = sk->sk_protocol;
done:
release_sock(sk);
return err;
}
static int
data_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
if (!_pms(sk)->dev)
return -EBADFD;
lock_sock(sk);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xff;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff;
release_sock(sk);
return sizeof(*maddr);
}
static const struct proto_ops data_sock_ops = {
.family = PF_ISDN,
.owner = THIS_MODULE,
.release = data_sock_release,
.ioctl = data_sock_ioctl,
.bind = data_sock_bind,
.getname = data_sock_getname,
.sendmsg = mISDN_sock_sendmsg,
.recvmsg = mISDN_sock_recvmsg,
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = data_sock_setsockopt,
.getsockopt = data_sock_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static int
data_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
{
struct sock *sk;
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &data_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = MISDN_OPEN;
mISDN_sock_link(&data_sockets, sk);
return 0;
}
static int
base_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
if (!sk)
return 0;
mISDN_sock_unlink(&base_sockets, sk);
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int
base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int err = 0, id;
struct mISDNdevice *dev;
struct mISDNversion ver;
switch (cmd) {
case IMGETVERSION:
ver.major = MISDN_MAJOR_VERSION;
ver.minor = MISDN_MINOR_VERSION;
ver.release = MISDN_RELEASE;
if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
err = -EFAULT;
break;
case IMGETCOUNT:
id = get_mdevice_count();
if (put_user(id, (int __user *)arg))
err = -EFAULT;
break;
case IMGETDEVINFO:
if (get_user(id, (int __user *)arg)) {
err = -EFAULT;
break;
}
dev = get_mdevice(id);
if (dev) {
struct mISDN_devinfo di;
memset(&di, 0, sizeof(di));
di.id = dev->id;
di.Dprotocols = dev->Dprotocols;
di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
di.protocol = dev->D.protocol;
memcpy(di.channelmap, dev->channelmap,
sizeof(di.channelmap));
di.nrbchan = dev->nrbchan;
strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
err = -EFAULT;
} else
err = -ENODEV;
break;
case IMSETDEVNAME:
{
struct mISDN_devrename dn;
if (copy_from_user(&dn, (void __user *)arg,
sizeof(dn))) {
err = -EFAULT;
break;
}
dn.name[sizeof(dn.name) - 1] = '\0';
dev = get_mdevice(dn.id);
if (dev)
err = device_rename(&dev->dev, dn.name);
else
err = -ENODEV;
}
break;
default:
err = -EINVAL;
}
return err;
}
static int
base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
int err = 0;
if (addr_len < sizeof(struct sockaddr_mISDN))
return -EINVAL;
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
if (_pms(sk)->dev) {
err = -EALREADY;
goto done;
}
_pms(sk)->dev = get_mdevice(maddr->dev);
if (!_pms(sk)->dev) {
err = -ENODEV;
goto done;
}
sk->sk_state = MISDN_BOUND;
done:
release_sock(sk);
return err;
}
static const struct proto_ops base_sock_ops = {
.family = PF_ISDN,
.owner = THIS_MODULE,
.release = base_sock_release,
.ioctl = base_sock_ioctl,
.bind = base_sock_bind,
.getname = sock_no_getname,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static int
base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
{
struct sock *sk;
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
if (!capable(CAP_NET_RAW))
return -EPERM;
sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &base_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = MISDN_OPEN;
mISDN_sock_link(&base_sockets, sk);
return 0;
}
static int
mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
{
int err = -EPROTONOSUPPORT;
switch (proto) {
case ISDN_P_BASE:
err = base_sock_create(net, sock, proto, kern);
break;
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
case ISDN_P_LAPD_TE:
case ISDN_P_LAPD_NT:
case ISDN_P_B_RAW:
case ISDN_P_B_HDLC:
case ISDN_P_B_X75SLP:
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
err = data_sock_create(net, sock, proto, kern);
break;
default:
return err;
}
return err;
}
static const struct net_proto_family mISDN_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_ISDN,
.create = mISDN_sock_create,
};
int
misdn_sock_init(u_int *deb)
{
int err;
debug = deb;
err = sock_register(&mISDN_sock_family_ops);
if (err)
printk(KERN_ERR "%s: error(%d)\n", __func__, err);
return err;
}
void
misdn_sock_cleanup(void)
{
sock_unregister(PF_ISDN);
}
| linux-master | drivers/isdn/mISDN/socket.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 by Andreas Eversberg <[email protected]>
*
* Quick API description:
*
* A clock source registers using mISDN_register_clock:
* name = text string to name clock source
* priority = value to priorize clock sources (0 = default)
* ctl = callback function to enable/disable clock source
* priv = private pointer of clock source
* return = pointer to clock source structure;
*
* Note: Callback 'ctl' can be called before mISDN_register_clock returns!
* Also it can be called during mISDN_unregister_clock.
*
* A clock source calls mISDN_clock_update with given samples elapsed, if
* enabled. If function call is delayed, tv must be set with the timestamp
* of the actual event.
*
* A clock source unregisters using mISDN_unregister_clock.
*
* To get current clock, call mISDN_clock_get. The signed short value
* counts the number of samples since. Time since last clock event is added.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
#include <linux/mISDNif.h>
#include <linux/export.h>
#include "core.h"
static u_int *debug;
static LIST_HEAD(iclock_list);
static DEFINE_RWLOCK(iclock_lock);
static u16 iclock_count; /* counter of last clock */
static ktime_t iclock_timestamp; /* time stamp of last clock */
static int iclock_timestamp_valid; /* already received one timestamp */
static struct mISDNclock *iclock_current;
void
mISDN_init_clock(u_int *dp)
{
debug = dp;
iclock_timestamp = ktime_get();
}
static void
select_iclock(void)
{
struct mISDNclock *iclock, *bestclock = NULL, *lastclock = NULL;
int pri = -128;
list_for_each_entry(iclock, &iclock_list, list) {
if (iclock->pri > pri) {
pri = iclock->pri;
bestclock = iclock;
}
if (iclock_current == iclock)
lastclock = iclock;
}
if (lastclock && bestclock != lastclock) {
/* last used clock source still exists but changes, disable */
if (*debug & DEBUG_CLOCK)
printk(KERN_DEBUG "Old clock source '%s' disable.\n",
lastclock->name);
lastclock->ctl(lastclock->priv, 0);
}
if (bestclock && bestclock != iclock_current) {
/* new clock source selected, enable */
if (*debug & DEBUG_CLOCK)
printk(KERN_DEBUG "New clock source '%s' enable.\n",
bestclock->name);
bestclock->ctl(bestclock->priv, 1);
}
if (bestclock != iclock_current) {
/* no clock received yet */
iclock_timestamp_valid = 0;
}
iclock_current = bestclock;
}
struct mISDNclock
*mISDN_register_clock(char *name, int pri, clockctl_func_t *ctl, void *priv)
{
u_long flags;
struct mISDNclock *iclock;
if (*debug & (DEBUG_CORE | DEBUG_CLOCK))
printk(KERN_DEBUG "%s: %s %d\n", __func__, name, pri);
iclock = kzalloc(sizeof(struct mISDNclock), GFP_ATOMIC);
if (!iclock) {
printk(KERN_ERR "%s: No memory for clock entry.\n", __func__);
return NULL;
}
strncpy(iclock->name, name, sizeof(iclock->name) - 1);
iclock->pri = pri;
iclock->priv = priv;
iclock->ctl = ctl;
write_lock_irqsave(&iclock_lock, flags);
list_add_tail(&iclock->list, &iclock_list);
select_iclock();
write_unlock_irqrestore(&iclock_lock, flags);
return iclock;
}
EXPORT_SYMBOL(mISDN_register_clock);
void
mISDN_unregister_clock(struct mISDNclock *iclock)
{
u_long flags;
if (*debug & (DEBUG_CORE | DEBUG_CLOCK))
printk(KERN_DEBUG "%s: %s %d\n", __func__, iclock->name,
iclock->pri);
write_lock_irqsave(&iclock_lock, flags);
if (iclock_current == iclock) {
if (*debug & DEBUG_CLOCK)
printk(KERN_DEBUG
"Current clock source '%s' unregisters.\n",
iclock->name);
iclock->ctl(iclock->priv, 0);
}
list_del(&iclock->list);
select_iclock();
write_unlock_irqrestore(&iclock_lock, flags);
}
EXPORT_SYMBOL(mISDN_unregister_clock);
void
mISDN_clock_update(struct mISDNclock *iclock, int samples, ktime_t *timestamp)
{
u_long flags;
ktime_t timestamp_now;
u16 delta;
write_lock_irqsave(&iclock_lock, flags);
if (iclock_current != iclock) {
printk(KERN_ERR "%s: '%s' sends us clock updates, but we do "
"listen to '%s'. This is a bug!\n", __func__,
iclock->name,
iclock_current ? iclock_current->name : "nothing");
iclock->ctl(iclock->priv, 0);
write_unlock_irqrestore(&iclock_lock, flags);
return;
}
if (iclock_timestamp_valid) {
/* increment sample counter by given samples */
iclock_count += samples;
if (timestamp) { /* timestamp must be set, if function call is delayed */
iclock_timestamp = *timestamp;
} else {
iclock_timestamp = ktime_get();
}
} else {
/* calc elapsed time by system clock */
if (timestamp) { /* timestamp must be set, if function call is delayed */
timestamp_now = *timestamp;
} else {
timestamp_now = ktime_get();
}
delta = ktime_divns(ktime_sub(timestamp_now, iclock_timestamp),
(NSEC_PER_SEC / 8000));
/* add elapsed time to counter and set new timestamp */
iclock_count += delta;
iclock_timestamp = timestamp_now;
iclock_timestamp_valid = 1;
if (*debug & DEBUG_CLOCK)
printk("Received first clock from source '%s'.\n",
iclock_current ? iclock_current->name : "nothing");
}
write_unlock_irqrestore(&iclock_lock, flags);
}
EXPORT_SYMBOL(mISDN_clock_update);
unsigned short
mISDN_clock_get(void)
{
u_long flags;
ktime_t timestamp_now;
u16 delta;
u16 count;
read_lock_irqsave(&iclock_lock, flags);
/* calc elapsed time by system clock */
timestamp_now = ktime_get();
delta = ktime_divns(ktime_sub(timestamp_now, iclock_timestamp),
(NSEC_PER_SEC / 8000));
/* add elapsed time to counter */
count = iclock_count + delta;
read_unlock_irqrestore(&iclock_lock, flags);
return count;
}
EXPORT_SYMBOL(mISDN_clock_get);
| linux-master | drivers/isdn/mISDN/clock.c |
/*
* DTMF decoder.
*
* Copyright by Andreas Eversberg ([email protected])
* based on different decoders such as ISDN4Linux
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
#define NCOEFF 8 /* number of frequencies to be analyzed */
/* For DTMF recognition:
* 2 * cos(2 * PI * k / N) precalculated for all k
*/
static u64 cos2pik[NCOEFF] =
{
/* k << 15 (source: hfc-4s/8s documentation (www.colognechip.de)) */
55960, 53912, 51402, 48438, 38146, 32650, 26170, 18630
};
/* digit matrix */
static char dtmf_matrix[4][4] =
{
{'1', '2', '3', 'A'},
{'4', '5', '6', 'B'},
{'7', '8', '9', 'C'},
{'*', '0', '#', 'D'}
};
/* dtmf detection using goertzel algorithm
* init function
*/
void dsp_dtmf_goertzel_init(struct dsp *dsp)
{
dsp->dtmf.size = 0;
dsp->dtmf.lastwhat = '\0';
dsp->dtmf.lastdigit = '\0';
dsp->dtmf.count = 0;
}
/* check for hardware or software features
*/
void dsp_dtmf_hardware(struct dsp *dsp)
{
int hardware = 1;
if (!dsp->dtmf.enable)
return;
if (!dsp->features.hfc_dtmf)
hardware = 0;
/* check for volume change */
if (dsp->tx_volume) {
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
"because tx_volume is changed\n",
__func__, dsp->name);
hardware = 0;
}
if (dsp->rx_volume) {
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
"because rx_volume is changed\n",
__func__, dsp->name);
hardware = 0;
}
/* check if encryption is enabled */
if (dsp->bf_enable) {
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
"because encryption is enabled\n",
__func__, dsp->name);
hardware = 0;
}
/* check if pipeline exists */
if (dsp->pipeline.inuse) {
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
"because pipeline exists.\n",
__func__, dsp->name);
hardware = 0;
}
dsp->dtmf.hardware = hardware;
dsp->dtmf.software = !hardware;
}
/*************************************************************
* calculate the coefficients of the given sample and decode *
*************************************************************/
/* the given sample is decoded. if the sample is not long enough for a
* complete frame, the decoding is finished and continued with the next
* call of this function.
*
* the algorithm is very good for detection with a minimum of errors. i
* tested it allot. it even works with very short tones (40ms). the only
* disadvantage is, that it doesn't work good with different volumes of both
* tones. this will happen, if accoustically coupled dialers are used.
* it sometimes detects tones during speech, which is normal for decoders.
* use sequences to given commands during calls.
*
* dtmf - points to a structure of the current dtmf state
* spl and len - the sample
* fmt - 0 = alaw, 1 = ulaw, 2 = coefficients from HFC DTMF hw-decoder
*/
u8
*dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len, int fmt)
{
u8 what;
int size;
signed short *buf;
s32 sk, sk1, sk2;
int k, n, i;
s32 *hfccoeff;
s32 result[NCOEFF], tresh, treshl;
int lowgroup, highgroup;
s64 cos2pik_;
dsp->dtmf.digits[0] = '\0';
/* Note: The function will loop until the buffer has not enough samples
* left to decode a full frame.
*/
again:
/* convert samples */
size = dsp->dtmf.size;
buf = dsp->dtmf.buffer;
switch (fmt) {
case 0: /* alaw */
case 1: /* ulaw */
while (size < DSP_DTMF_NPOINTS && len) {
buf[size++] = dsp_audio_law_to_s32[*data++];
len--;
}
break;
case 2: /* HFC coefficients */
default:
if (len < 64) {
if (len > 0)
printk(KERN_ERR "%s: coefficients have invalid "
"size. (is=%d < must=%d)\n",
__func__, len, 64);
return dsp->dtmf.digits;
}
hfccoeff = (s32 *)data;
for (k = 0; k < NCOEFF; k++) {
sk2 = (*hfccoeff++) >> 4;
sk = (*hfccoeff++) >> 4;
if (sk > 32767 || sk < -32767 || sk2 > 32767
|| sk2 < -32767)
printk(KERN_WARNING
"DTMF-Detection overflow\n");
/* compute |X(k)|**2 */
result[k] =
(sk * sk) -
(((cos2pik[k] * sk) >> 15) * sk2) +
(sk2 * sk2);
}
data += 64;
len -= 64;
goto coefficients;
break;
}
dsp->dtmf.size = size;
if (size < DSP_DTMF_NPOINTS)
return dsp->dtmf.digits;
dsp->dtmf.size = 0;
/* now we have a full buffer of signed long samples - we do goertzel */
for (k = 0; k < NCOEFF; k++) {
sk = 0;
sk1 = 0;
sk2 = 0;
buf = dsp->dtmf.buffer;
cos2pik_ = cos2pik[k];
for (n = 0; n < DSP_DTMF_NPOINTS; n++) {
sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++);
sk2 = sk1;
sk1 = sk;
}
sk >>= 8;
sk2 >>= 8;
if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767)
printk(KERN_WARNING "DTMF-Detection overflow\n");
/* compute |X(k)|**2 */
result[k] =
(sk * sk) -
(((cos2pik[k] * sk) >> 15) * sk2) +
(sk2 * sk2);
}
/* our (squared) coefficients have been calculated, we need to process
* them.
*/
coefficients:
tresh = 0;
for (i = 0; i < NCOEFF; i++) {
if (result[i] < 0)
result[i] = 0;
if (result[i] > dsp->dtmf.treshold) {
if (result[i] > tresh)
tresh = result[i];
}
}
if (tresh == 0) {
what = 0;
goto storedigit;
}
if (dsp_debug & DEBUG_DSP_DTMFCOEFF) {
s32 tresh_100 = tresh/100;
if (tresh_100 == 0) {
tresh_100 = 1;
printk(KERN_DEBUG
"tresh(%d) too small set tresh/100 to 1\n",
tresh);
}
printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d"
" tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n",
result[0] / 10000, result[1] / 10000, result[2] / 10000,
result[3] / 10000, result[4] / 10000, result[5] / 10000,
result[6] / 10000, result[7] / 10000, tresh / 10000,
result[0] / (tresh_100), result[1] / (tresh_100),
result[2] / (tresh_100), result[3] / (tresh_100),
result[4] / (tresh_100), result[5] / (tresh_100),
result[6] / (tresh_100), result[7] / (tresh_100));
}
/* calc digit (lowgroup/highgroup) */
lowgroup = -1;
highgroup = -1;
treshl = tresh >> 3; /* tones which are not on, must be below 9 dB */
tresh = tresh >> 2; /* touchtones must match within 6 dB */
for (i = 0; i < NCOEFF; i++) {
if (result[i] < treshl)
continue; /* ignore */
if (result[i] < tresh) {
lowgroup = -1;
highgroup = -1;
break; /* noise in between */
}
/* good level found. This is allowed only one time per group */
if (i < NCOEFF / 2) {
/* lowgroup */
if (lowgroup >= 0) {
/* Bad. Another tone found. */
lowgroup = -1;
break;
} else
lowgroup = i;
} else {
/* higroup */
if (highgroup >= 0) {
/* Bad. Another tone found. */
highgroup = -1;
break;
} else
highgroup = i - (NCOEFF / 2);
}
}
/* get digit or null */
what = 0;
if (lowgroup >= 0 && highgroup >= 0)
what = dtmf_matrix[lowgroup][highgroup];
storedigit:
if (what && (dsp_debug & DEBUG_DSP_DTMF))
printk(KERN_DEBUG "DTMF what: %c\n", what);
if (dsp->dtmf.lastwhat != what)
dsp->dtmf.count = 0;
/* the tone (or no tone) must remain 3 times without change */
if (dsp->dtmf.count == 2) {
if (dsp->dtmf.lastdigit != what) {
dsp->dtmf.lastdigit = what;
if (what) {
if (dsp_debug & DEBUG_DSP_DTMF)
printk(KERN_DEBUG "DTMF digit: %c\n",
what);
if ((strlen(dsp->dtmf.digits) + 1)
< sizeof(dsp->dtmf.digits)) {
dsp->dtmf.digits[strlen(
dsp->dtmf.digits) + 1] = '\0';
dsp->dtmf.digits[strlen(
dsp->dtmf.digits)] = what;
}
}
}
} else
dsp->dtmf.count++;
dsp->dtmf.lastwhat = what;
goto again;
}
| linux-master | drivers/isdn/mISDN/dsp_dtmf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/mISDNif.h>
#include "core.h"
static u_int debug;
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL");
module_param(debug, uint, S_IRUGO | S_IWUSR);
static u64 device_ids;
#define MAX_DEVICE_ID 63
static LIST_HEAD(Bprotocols);
static DEFINE_RWLOCK(bp_lock);
static void mISDN_dev_release(struct device *dev)
{
/* nothing to do: the device is part of its parent's data structure */
}
static ssize_t id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return -ENODEV;
return sprintf(buf, "%d\n", mdev->id);
}
static DEVICE_ATTR_RO(id);
static ssize_t nrbchan_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return -ENODEV;
return sprintf(buf, "%d\n", mdev->nrbchan);
}
static DEVICE_ATTR_RO(nrbchan);
static ssize_t d_protocols_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return -ENODEV;
return sprintf(buf, "%d\n", mdev->Dprotocols);
}
static DEVICE_ATTR_RO(d_protocols);
static ssize_t b_protocols_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return -ENODEV;
return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols());
}
static DEVICE_ATTR_RO(b_protocols);
static ssize_t protocol_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return -ENODEV;
return sprintf(buf, "%d\n", mdev->D.protocol);
}
static DEVICE_ATTR_RO(protocol);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
strcpy(buf, dev_name(dev));
return strlen(buf);
}
static DEVICE_ATTR_RO(name);
#if 0 /* hangs */
static ssize_t name_set(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int err = 0;
char *out = kmalloc(count + 1, GFP_KERNEL);
if (!out)
return -ENOMEM;
memcpy(out, buf, count);
if (count && out[count - 1] == '\n')
out[--count] = 0;
if (count)
err = device_rename(dev, out);
kfree(out);
return (err < 0) ? err : count;
}
static DEVICE_ATTR_RW(name);
#endif
static ssize_t channelmap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
char *bp = buf;
int i;
for (i = 0; i <= mdev->nrbchan; i++)
*bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0';
return bp - buf;
}
static DEVICE_ATTR_RO(channelmap);
static struct attribute *mISDN_attrs[] = {
&dev_attr_id.attr,
&dev_attr_d_protocols.attr,
&dev_attr_b_protocols.attr,
&dev_attr_protocol.attr,
&dev_attr_channelmap.attr,
&dev_attr_nrbchan.attr,
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(mISDN);
static int mISDN_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return 0;
if (add_uevent_var(env, "nchans=%d", mdev->nrbchan))
return -ENOMEM;
return 0;
}
static struct class mISDN_class = {
.name = "mISDN",
.dev_uevent = mISDN_uevent,
.dev_groups = mISDN_groups,
.dev_release = mISDN_dev_release,
};
static int
_get_mdevice(struct device *dev, const void *id)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return 0;
if (mdev->id != *(const u_int *)id)
return 0;
return 1;
}
struct mISDNdevice
*get_mdevice(u_int id)
{
return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id,
_get_mdevice));
}
static int
_get_mdevice_count(struct device *dev, void *cnt)
{
*(int *)cnt += 1;
return 0;
}
int
get_mdevice_count(void)
{
int cnt = 0;
class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count);
return cnt;
}
static int
get_free_devid(void)
{
u_int i;
for (i = 0; i <= MAX_DEVICE_ID; i++)
if (!test_and_set_bit(i, (u_long *)&device_ids))
break;
if (i > MAX_DEVICE_ID)
return -EBUSY;
return i;
}
int
mISDN_register_device(struct mISDNdevice *dev,
struct device *parent, char *name)
{
int err;
err = get_free_devid();
if (err < 0)
return err;
dev->id = err;
device_initialize(&dev->dev);
if (name && name[0])
dev_set_name(&dev->dev, "%s", name);
else
dev_set_name(&dev->dev, "mISDN%d", dev->id);
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_register %s %d\n",
dev_name(&dev->dev), dev->id);
dev->dev.class = &mISDN_class;
err = create_stack(dev);
if (err)
goto error1;
dev->dev.platform_data = dev;
dev->dev.parent = parent;
dev_set_drvdata(&dev->dev, dev);
err = device_add(&dev->dev);
if (err)
goto error3;
return 0;
error3:
delete_stack(dev);
error1:
put_device(&dev->dev);
return err;
}
EXPORT_SYMBOL(mISDN_register_device);
void
mISDN_unregister_device(struct mISDNdevice *dev) {
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_unregister %s %d\n",
dev_name(&dev->dev), dev->id);
/* sysfs_remove_link(&dev->dev.kobj, "device"); */
device_del(&dev->dev);
dev_set_drvdata(&dev->dev, NULL);
test_and_clear_bit(dev->id, (u_long *)&device_ids);
delete_stack(dev);
put_device(&dev->dev);
}
EXPORT_SYMBOL(mISDN_unregister_device);
u_int
get_all_Bprotocols(void)
{
struct Bprotocol *bp;
u_int m = 0;
read_lock(&bp_lock);
list_for_each_entry(bp, &Bprotocols, list)
m |= bp->Bprotocols;
read_unlock(&bp_lock);
return m;
}
struct Bprotocol *
get_Bprotocol4mask(u_int m)
{
struct Bprotocol *bp;
read_lock(&bp_lock);
list_for_each_entry(bp, &Bprotocols, list)
if (bp->Bprotocols & m) {
read_unlock(&bp_lock);
return bp;
}
read_unlock(&bp_lock);
return NULL;
}
struct Bprotocol *
get_Bprotocol4id(u_int id)
{
u_int m;
if (id < ISDN_P_B_START || id > 63) {
printk(KERN_WARNING "%s id not in range %d\n",
__func__, id);
return NULL;
}
m = 1 << (id & ISDN_P_B_MASK);
return get_Bprotocol4mask(m);
}
int
mISDN_register_Bprotocol(struct Bprotocol *bp)
{
u_long flags;
struct Bprotocol *old;
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "%s: %s/%x\n", __func__,
bp->name, bp->Bprotocols);
old = get_Bprotocol4mask(bp->Bprotocols);
if (old) {
printk(KERN_WARNING
"register duplicate protocol old %s/%x new %s/%x\n",
old->name, old->Bprotocols, bp->name, bp->Bprotocols);
return -EBUSY;
}
write_lock_irqsave(&bp_lock, flags);
list_add_tail(&bp->list, &Bprotocols);
write_unlock_irqrestore(&bp_lock, flags);
return 0;
}
EXPORT_SYMBOL(mISDN_register_Bprotocol);
void
mISDN_unregister_Bprotocol(struct Bprotocol *bp)
{
u_long flags;
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name,
bp->Bprotocols);
write_lock_irqsave(&bp_lock, flags);
list_del(&bp->list);
write_unlock_irqrestore(&bp_lock, flags);
}
EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
static const char *msg_no_channel = "<no channel>";
static const char *msg_no_stack = "<no stack>";
static const char *msg_no_stackdev = "<no stack device>";
const char *mISDNDevName4ch(struct mISDNchannel *ch)
{
if (!ch)
return msg_no_channel;
if (!ch->st)
return msg_no_stack;
if (!ch->st->dev)
return msg_no_stackdev;
return dev_name(&ch->st->dev->dev);
};
EXPORT_SYMBOL(mISDNDevName4ch);
static int
mISDNInit(void)
{
int err;
printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n",
MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE);
mISDN_init_clock(&debug);
mISDN_initstack(&debug);
err = class_register(&mISDN_class);
if (err)
goto error1;
err = mISDN_inittimer(&debug);
if (err)
goto error2;
err = Isdnl1_Init(&debug);
if (err)
goto error3;
err = Isdnl2_Init(&debug);
if (err)
goto error4;
err = misdn_sock_init(&debug);
if (err)
goto error5;
return 0;
error5:
Isdnl2_cleanup();
error4:
Isdnl1_cleanup();
error3:
mISDN_timer_cleanup();
error2:
class_unregister(&mISDN_class);
error1:
return err;
}
static void mISDN_cleanup(void)
{
misdn_sock_cleanup();
Isdnl2_cleanup();
Isdnl1_cleanup();
mISDN_timer_cleanup();
class_unregister(&mISDN_class);
printk(KERN_DEBUG "mISDNcore unloaded\n");
}
module_init(mISDNInit);
module_exit(mISDN_cleanup);
| linux-master | drivers/isdn/mISDN/core.c |
/*
* Audio support data for ISDN4Linux.
*
* Copyright Andreas Eversberg ([email protected])
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/gfp.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
#define DATA_S sample_silence
#define SIZE_S (&sizeof_silence)
#define DATA_GA sample_german_all
#define SIZE_GA (&sizeof_german_all)
#define DATA_GO sample_german_old
#define SIZE_GO (&sizeof_german_old)
#define DATA_DT sample_american_dialtone
#define SIZE_DT (&sizeof_american_dialtone)
#define DATA_RI sample_american_ringing
#define SIZE_RI (&sizeof_american_ringing)
#define DATA_BU sample_american_busy
#define SIZE_BU (&sizeof_american_busy)
#define DATA_S1 sample_special1
#define SIZE_S1 (&sizeof_special1)
#define DATA_S2 sample_special2
#define SIZE_S2 (&sizeof_special2)
#define DATA_S3 sample_special3
#define SIZE_S3 (&sizeof_special3)
/***************/
/* tones loops */
/***************/
/* all tones are alaw encoded */
/* the last sample+1 is in phase with the first sample. the error is low */
static u8 sample_german_all[] = {
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
};
static u32 sizeof_german_all = sizeof(sample_german_all);
static u8 sample_german_old[] = {
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
};
static u32 sizeof_german_old = sizeof(sample_german_old);
static u8 sample_american_dialtone[] = {
0x2a, 0x18, 0x90, 0x6c, 0x4c, 0xbc, 0x4c, 0x6c,
0x10, 0x58, 0x32, 0xb9, 0x31, 0x2d, 0x8d, 0x0d,
0x8d, 0x2d, 0x31, 0x99, 0x0f, 0x28, 0x60, 0xf0,
0xd0, 0x50, 0xd0, 0x30, 0x60, 0x08, 0x8e, 0x67,
0x09, 0x19, 0x21, 0xe1, 0xd9, 0xb9, 0x29, 0x67,
0x83, 0x02, 0xce, 0xbe, 0xee, 0x1a, 0x1b, 0xef,
0xbf, 0xcf, 0x03, 0x82, 0x66, 0x28, 0xb8, 0xd8,
0xe0, 0x20, 0x18, 0x08, 0x66, 0x8f, 0x09, 0x61,
0x31, 0xd1, 0x51, 0xd1, 0xf1, 0x61, 0x29, 0x0e,
0x98, 0x30, 0x2c, 0x8c, 0x0c, 0x8c, 0x2c, 0x30,
0xb8, 0x33, 0x59, 0x11, 0x6d, 0x4d, 0xbd, 0x4d,
0x6d, 0x91, 0x19,
};
static u32 sizeof_american_dialtone = sizeof(sample_american_dialtone);
static u8 sample_american_ringing[] = {
0x2a, 0xe0, 0xac, 0x0c, 0xbc, 0x4c, 0x8c, 0x90,
0x48, 0xc7, 0xc1, 0xed, 0xcd, 0x4d, 0xcd, 0xed,
0xc1, 0xb7, 0x08, 0x30, 0xec, 0xcc, 0xcc, 0x8c,
0x10, 0x58, 0x1a, 0x99, 0x71, 0xed, 0x8d, 0x8d,
0x2d, 0x41, 0x89, 0x9e, 0x20, 0x70, 0x2c, 0xec,
0x2c, 0x70, 0x20, 0x86, 0x77, 0xe1, 0x31, 0x11,
0xd1, 0xf1, 0x81, 0x09, 0xa3, 0x56, 0x58, 0x00,
0x40, 0xc0, 0x60, 0x38, 0x46, 0x43, 0x57, 0x39,
0xd9, 0x59, 0x99, 0xc9, 0x77, 0x2f, 0x2e, 0xc6,
0xd6, 0x28, 0xd6, 0x36, 0x26, 0x2e, 0x8a, 0xa3,
0x43, 0x63, 0x4b, 0x4a, 0x62, 0x42, 0xa2, 0x8b,
0x2f, 0x27, 0x37, 0xd7, 0x29, 0xd7, 0xc7, 0x2f,
0x2e, 0x76, 0xc8, 0x98, 0x58, 0xd8, 0x38, 0x56,
0x42, 0x47, 0x39, 0x61, 0xc1, 0x41, 0x01, 0x59,
0x57, 0xa2, 0x08, 0x80, 0xf0, 0xd0, 0x10, 0x30,
0xe0, 0x76, 0x87, 0x21, 0x71, 0x2d, 0xed, 0x2d,
0x71, 0x21, 0x9f, 0x88, 0x40, 0x2c, 0x8c, 0x8c,
0xec, 0x70, 0x98, 0x1b, 0x59, 0x11, 0x8d, 0xcd,
0xcd, 0xed, 0x31, 0x09, 0xb6, 0xc0, 0xec, 0xcc,
0x4c, 0xcc, 0xec, 0xc0, 0xc6, 0x49, 0x91, 0x8d,
0x4d, 0xbd, 0x0d, 0xad, 0xe1,
};
static u32 sizeof_american_ringing = sizeof(sample_american_ringing);
static u8 sample_american_busy[] = {
0x2a, 0x00, 0x6c, 0x4c, 0x4c, 0x6c, 0xb0, 0x66,
0x99, 0x11, 0x6d, 0x8d, 0x2d, 0x41, 0xd7, 0x96,
0x60, 0xf0, 0x70, 0x40, 0x58, 0xf6, 0x53, 0x57,
0x09, 0x89, 0xd7, 0x5f, 0xe3, 0x2a, 0xe3, 0x5f,
0xd7, 0x89, 0x09, 0x57, 0x53, 0xf6, 0x58, 0x40,
0x70, 0xf0, 0x60, 0x96, 0xd7, 0x41, 0x2d, 0x8d,
0x6d, 0x11, 0x99, 0x66, 0xb0, 0x6c, 0x4c, 0x4c,
0x6c, 0x00, 0x2a, 0x01, 0x6d, 0x4d, 0x4d, 0x6d,
0xb1, 0x67, 0x98, 0x10, 0x6c, 0x8c, 0x2c, 0x40,
0xd6, 0x97, 0x61, 0xf1, 0x71, 0x41, 0x59, 0xf7,
0x52, 0x56, 0x08, 0x88, 0xd6, 0x5e, 0xe2, 0x2a,
0xe2, 0x5e, 0xd6, 0x88, 0x08, 0x56, 0x52, 0xf7,
0x59, 0x41, 0x71, 0xf1, 0x61, 0x97, 0xd6, 0x40,
0x2c, 0x8c, 0x6c, 0x10, 0x98, 0x67, 0xb1, 0x6d,
0x4d, 0x4d, 0x6d, 0x01,
};
static u32 sizeof_american_busy = sizeof(sample_american_busy);
static u8 sample_special1[] = {
0x2a, 0x2c, 0xbc, 0x6c, 0xd6, 0x71, 0xbd, 0x0d,
0xd9, 0x80, 0xcc, 0x4c, 0x40, 0x39, 0x0d, 0xbd,
0x11, 0x86, 0xec, 0xbc, 0xec, 0x0e, 0x51, 0xbd,
0x8d, 0x89, 0x30, 0x4c, 0xcc, 0xe0, 0xe1, 0xcd,
0x4d, 0x31, 0x88, 0x8c, 0xbc, 0x50, 0x0f, 0xed,
0xbd, 0xed, 0x87, 0x10, 0xbc, 0x0c, 0x38, 0x41,
0x4d, 0xcd, 0x81, 0xd8, 0x0c, 0xbc, 0x70, 0xd7,
0x6d, 0xbd, 0x2d,
};
static u32 sizeof_special1 = sizeof(sample_special1);
static u8 sample_special2[] = {
0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
};
static u32 sizeof_special2 = sizeof(sample_special2);
static u8 sample_special3[] = {
0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
};
static u32 sizeof_special3 = sizeof(sample_special3);
static u8 sample_silence[] = {
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
};
static u32 sizeof_silence = sizeof(sample_silence);
struct tones_samples {
u32 *len;
u8 *data;
};
static struct
tones_samples samples[] = {
{&sizeof_german_all, sample_german_all},
{&sizeof_german_old, sample_german_old},
{&sizeof_american_dialtone, sample_american_dialtone},
{&sizeof_american_ringing, sample_american_ringing},
{&sizeof_american_busy, sample_american_busy},
{&sizeof_special1, sample_special1},
{&sizeof_special2, sample_special2},
{&sizeof_special3, sample_special3},
{NULL, NULL},
};
/***********************************
* generate ulaw from alaw samples *
***********************************/
void
dsp_audio_generate_ulaw_samples(void)
{
int i, j;
i = 0;
while (samples[i].len) {
j = 0;
while (j < (*samples[i].len)) {
samples[i].data[j] =
dsp_audio_alaw_to_ulaw[samples[i].data[j]];
j++;
}
i++;
}
}
/****************************
* tone sequence definition *
****************************/
static struct pattern {
int tone;
u8 *data[10];
u32 *siz[10];
u32 seq[10];
} pattern[] = {
{TONE_GERMAN_DIALTONE,
{DATA_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1900, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALTONE,
{DATA_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1998, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALTONE,
{DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_DIALPBX,
{DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALPBX,
{DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALPBX,
{DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_RINGING,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDRINGING,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 40000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_RINGING,
{DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_RINGPBX,
{DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDRINGPBX,
{DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_RINGPBX,
{DATA_RI, DATA_S, DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_RI, SIZE_S, SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_BUSY,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDBUSY,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_BUSY,
{DATA_BU, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_BU, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_HANGUP,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDHANGUP,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_HANGUP,
{DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_SPECIAL_INFO,
{DATA_S1, DATA_S2, DATA_S3, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_S1, SIZE_S2, SIZE_S3, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{2666, 2666, 2666, 8002, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_GASSENBESETZT,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_AUFSCHALTTON,
{DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 1000, 17000, 0, 0, 0, 0, 0, 0} },
{0,
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
/******************
* copy tone data *
******************/
/* an sk_buff is generated from the number of samples needed.
* the count will be changed and may begin from 0 each pattern period.
* the clue is to precalculate the pointers and legths to use only one
* memcpy per function call, or two memcpy if the tone sequence changes.
*
* pattern - the type of the pattern
* count - the sample from the beginning of the pattern (phase)
* len - the number of bytes
*
* return - the sk_buff with the sample
*
* if tones has finished (e.g. knocking tone), dsp->tones is turned off
*/
void dsp_tone_copy(struct dsp *dsp, u8 *data, int len)
{
int index, count, start, num;
struct pattern *pat;
struct dsp_tone *tone = &dsp->tone;
/* if we have no tone, we copy silence */
if (!tone->tone) {
memset(data, dsp_silence, len);
return;
}
/* process pattern */
pat = (struct pattern *)tone->pattern;
/* points to the current pattern */
index = tone->index; /* gives current sequence index */
count = tone->count; /* gives current sample */
/* copy sample */
while (len) {
/* find sample to start with */
while (42) {
/* wrap around */
if (!pat->seq[index]) {
count = 0;
index = 0;
}
/* check if we are currently playing this tone */
if (count < pat->seq[index])
break;
if (dsp_debug & DEBUG_DSP_TONE)
printk(KERN_DEBUG "%s: reaching next sequence "
"(index=%d)\n", __func__, index);
count -= pat->seq[index];
index++;
}
/* calculate start and number of samples */
start = count % (*(pat->siz[index]));
num = len;
if (num + count > pat->seq[index])
num = pat->seq[index] - count;
if (num + start > (*(pat->siz[index])))
num = (*(pat->siz[index])) - start;
/* copy memory */
memcpy(data, pat->data[index] + start, num);
/* reduce length */
data += num;
count += num;
len -= num;
}
tone->index = index;
tone->count = count;
/* return sk_buff */
return;
}
/*******************************
* send HW message to hfc card *
*******************************/
static void
dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
{
struct sk_buff *nskb;
/* unlocking is not required, because we don't expect a response */
nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
(len) ? HFC_SPL_LOOP_ON : HFC_SPL_LOOP_OFF, len, sample,
GFP_ATOMIC);
if (nskb) {
if (dsp->ch.peer) {
if (dsp->ch.recv(dsp->ch.peer, nskb))
dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
}
/*****************
* timer expires *
*****************/
void
dsp_tone_timeout(struct timer_list *t)
{
struct dsp *dsp = from_timer(dsp, t, tone.tl);
struct dsp_tone *tone = &dsp->tone;
struct pattern *pat = (struct pattern *)tone->pattern;
int index = tone->index;
if (!tone->tone)
return;
index++;
if (!pat->seq[index])
index = 0;
tone->index = index;
/* set next tone */
if (pat->data[index] == DATA_S)
dsp_tone_hw_message(dsp, NULL, 0);
else
dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index]));
/* set timer */
tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000;
add_timer(&tone->tl);
}
/********************
* set/release tone *
********************/
/*
* tones are relaized by streaming or by special loop commands if supported
* by hardware. when hardware is used, the patterns will be controlled by
* timers.
*/
int
dsp_tone(struct dsp *dsp, int tone)
{
struct pattern *pat;
int i;
struct dsp_tone *tonet = &dsp->tone;
tonet->software = 0;
tonet->hardware = 0;
/* we turn off the tone */
if (!tone) {
if (dsp->features.hfc_loops && timer_pending(&tonet->tl))
del_timer(&tonet->tl);
if (dsp->features.hfc_loops)
dsp_tone_hw_message(dsp, NULL, 0);
tonet->tone = 0;
return 0;
}
pat = NULL;
i = 0;
while (pattern[i].tone) {
if (pattern[i].tone == tone) {
pat = &pattern[i];
break;
}
i++;
}
if (!pat) {
printk(KERN_WARNING "dsp: given tone 0x%x is invalid\n", tone);
return -EINVAL;
}
if (dsp_debug & DEBUG_DSP_TONE)
printk(KERN_DEBUG "%s: now starting tone %d (index=%d)\n",
__func__, tone, 0);
tonet->tone = tone;
tonet->pattern = pat;
tonet->index = 0;
tonet->count = 0;
if (dsp->features.hfc_loops) {
tonet->hardware = 1;
/* set first tone */
dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0]));
/* set timer */
if (timer_pending(&tonet->tl))
del_timer(&tonet->tl);
tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000;
add_timer(&tonet->tl);
} else {
tonet->software = 1;
}
return 0;
}
| linux-master | drivers/isdn/mISDN/dsp_tones.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* l1oip_codec.c generic codec using lookup table
* -> conversion from a-Law to u-Law
* -> conversion from u-Law to a-Law
* -> compression by reducing the number of sample resolution to 4
*
* NOTE: It is not compatible with any standard codec like ADPCM.
*
* Author Andreas Eversberg ([email protected])
*
*/
/*
How the codec works:
--------------------
The volume is increased to increase the dynamic range of the audio signal.
Each sample is converted to a-LAW with only 16 steps of level resolution.
A pair of two samples are stored in one byte.
The first byte is stored in the upper bits, the second byte is stored in the
lower bits.
To speed up compression and decompression, two lookup tables are formed:
- 16 bits index for two samples (law encoded) with 8 bit compressed result.
- 8 bits index for one compressed data with 16 bits decompressed result.
NOTE: The bytes are handled as they are law-encoded.
*/
#include <linux/vmalloc.h>
#include <linux/mISDNif.h>
#include <linux/in.h>
#include "core.h"
#include "l1oip.h"
/* definitions of codec. don't use calculations, code may run slower. */
static u8 *table_com;
static u16 *table_dec;
/* alaw -> ulaw */
static u8 alaw_to_ulaw[256] =
{
0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49,
0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57,
0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41,
0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f,
0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d,
0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b,
0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45,
0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53,
0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47,
0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55,
0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f,
0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e,
0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b,
0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59,
0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43,
0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51,
0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a,
0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58,
0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42,
0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50,
0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e,
0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c,
0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46,
0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54,
0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48,
0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56,
0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40,
0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f,
0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c,
0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a,
0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44,
0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52
};
/* ulaw -> alaw */
static u8 ulaw_to_alaw[256] =
{
0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35,
0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25,
0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d,
0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d,
0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31,
0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21,
0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9,
0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9,
0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47,
0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf,
0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f,
0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33,
0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23,
0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b,
0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b,
0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b,
0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34,
0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24,
0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c,
0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c,
0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30,
0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20,
0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8,
0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8,
0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46,
0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde,
0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e,
0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32,
0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22,
0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a,
0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a,
0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a
};
/* alaw -> 4bit compression */
static u8 alaw_to_4bit[256] = {
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0d, 0x02,
0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x01, 0x0a, 0x05,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
};
/* 4bit -> alaw decompression */
static u8 _4bit_to_alaw[16] = {
0x5d, 0x51, 0xd9, 0xd7, 0x5f, 0x53, 0xa3, 0x4b,
0x2a, 0x3a, 0x22, 0x2e, 0x26, 0x56, 0x20, 0x2c,
};
/* ulaw -> 4bit compression */
static u8 ulaw_to_4bit[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
0x09, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
};
/* 4bit -> ulaw decompression */
static u8 _4bit_to_ulaw[16] = {
0x11, 0x21, 0x31, 0x40, 0x4e, 0x5c, 0x68, 0x71,
0xfe, 0xef, 0xe7, 0xdb, 0xcd, 0xbf, 0xaf, 0x9f,
};
/*
* Compresses data to the result buffer
* The result size must be at least half of the input buffer.
* The number of samples also must be even!
*/
int
l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state)
{
int ii, i = 0, o = 0;
if (!len)
return 0;
/* send saved byte and first input byte */
if (*state) {
*result++ = table_com[(((*state) << 8) & 0xff00) | (*data++)];
len--;
o++;
}
ii = len >> 1;
while (i < ii) {
*result++ = table_com[(data[0]<<8) | (data[1])];
data += 2;
i++;
o++;
}
/* if len has an odd number, we save byte for next call */
if (len & 1)
*state = 0x100 + *data;
else
*state = 0;
return o;
}
/* Decompress data to the result buffer
* The result size must be the number of sample in packet. (2 * input data)
* The number of samples in the result are even!
*/
int
l1oip_4bit_to_law(u8 *data, int len, u8 *result)
{
int i = 0;
u16 r;
while (i < len) {
r = table_dec[*data++];
*result++ = r >> 8;
*result++ = r;
i++;
}
return len << 1;
}
/*
* law conversion
*/
int
l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result)
{
int i = 0;
while (i < len) {
*result++ = alaw_to_ulaw[*data++];
i++;
}
return len;
}
int
l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result)
{
int i = 0;
while (i < len) {
*result++ = ulaw_to_alaw[*data++];
i++;
}
return len;
}
/*
* generate/free compression and decompression table
*/
void
l1oip_4bit_free(void)
{
vfree(table_dec);
vfree(table_com);
table_com = NULL;
table_dec = NULL;
}
int
l1oip_4bit_alloc(int ulaw)
{
int i1, i2, c, sample;
/* in case, it is called again */
if (table_dec)
return 0;
/* alloc conversion tables */
table_com = vzalloc(65536);
table_dec = vzalloc(512);
if (!table_com || !table_dec) {
l1oip_4bit_free();
return -ENOMEM;
}
/* generate compression table */
i1 = 0;
while (i1 < 256) {
if (ulaw)
c = ulaw_to_4bit[i1];
else
c = alaw_to_4bit[i1];
i2 = 0;
while (i2 < 256) {
table_com[(i1 << 8) | i2] |= (c << 4);
table_com[(i2 << 8) | i1] |= c;
i2++;
}
i1++;
}
/* generate decompression table */
i1 = 0;
while (i1 < 16) {
if (ulaw)
sample = _4bit_to_ulaw[i1];
else
sample = _4bit_to_alaw[i1];
i2 = 0;
while (i2 < 16) {
table_dec[(i1 << 4) | i2] |= (sample << 8);
table_dec[(i2 << 4) | i1] |= sample;
i2++;
}
i1++;
}
return 0;
}
| linux-master | drivers/isdn/mISDN/l1oip_codec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dsp_pipeline.c: pipelined audio processing
*
* Copyright (C) 2007, Nadi Sarrar
*
* Nadi Sarrar <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include <linux/export.h>
#include "dsp.h"
#include "dsp_hwec.h"
struct dsp_pipeline_entry {
struct mISDN_dsp_element *elem;
void *p;
struct list_head list;
};
struct dsp_element_entry {
struct mISDN_dsp_element *elem;
struct device dev;
struct list_head list;
};
static LIST_HEAD(dsp_elements);
/* sysfs */
static struct class *elements_class;
static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mISDN_dsp_element *elem = dev_get_drvdata(dev);
int i;
char *p = buf;
*buf = 0;
for (i = 0; i < elem->num_args; i++)
p += sprintf(p, "Name: %s\n%s%s%sDescription: %s\n\n",
elem->args[i].name,
elem->args[i].def ? "Default: " : "",
elem->args[i].def ? elem->args[i].def : "",
elem->args[i].def ? "\n" : "",
elem->args[i].desc);
return p - buf;
}
static struct device_attribute element_attributes[] = {
__ATTR(args, 0444, attr_show_args, NULL),
};
static void
mISDN_dsp_dev_release(struct device *dev)
{
struct dsp_element_entry *entry =
container_of(dev, struct dsp_element_entry, dev);
list_del(&entry->list);
kfree(entry);
}
int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
{
struct dsp_element_entry *entry;
int ret, i;
if (!elem)
return -EINVAL;
entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
entry->dev.class = elements_class;
entry->dev.release = mISDN_dsp_dev_release;
dev_set_drvdata(&entry->dev, elem);
dev_set_name(&entry->dev, "%s", elem->name);
ret = device_register(&entry->dev);
if (ret) {
printk(KERN_ERR "%s: failed to register %s\n",
__func__, elem->name);
goto err1;
}
list_add_tail(&entry->list, &dsp_elements);
for (i = 0; i < ARRAY_SIZE(element_attributes); ++i) {
ret = device_create_file(&entry->dev,
&element_attributes[i]);
if (ret) {
printk(KERN_ERR "%s: failed to create device file\n",
__func__);
goto err2;
}
}
return 0;
err2:
device_unregister(&entry->dev);
return ret;
err1:
put_device(&entry->dev);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);
void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
{
struct dsp_element_entry *entry, *n;
if (!elem)
return;
list_for_each_entry_safe(entry, n, &dsp_elements, list)
if (entry->elem == elem) {
device_unregister(&entry->dev);
return;
}
printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
}
EXPORT_SYMBOL(mISDN_dsp_element_unregister);
int dsp_pipeline_module_init(void)
{
elements_class = class_create("dsp_pipeline");
if (IS_ERR(elements_class))
return PTR_ERR(elements_class);
dsp_hwec_init();
return 0;
}
void dsp_pipeline_module_exit(void)
{
struct dsp_element_entry *entry, *n;
dsp_hwec_exit();
class_destroy(elements_class);
list_for_each_entry_safe(entry, n, &dsp_elements, list) {
list_del(&entry->list);
printk(KERN_WARNING "%s: element was still registered: %s\n",
__func__, entry->elem->name);
kfree(entry);
}
}
int dsp_pipeline_init(struct dsp_pipeline *pipeline)
{
if (!pipeline)
return -EINVAL;
INIT_LIST_HEAD(&pipeline->list);
return 0;
}
static inline void _dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
{
struct dsp_pipeline_entry *entry, *n;
list_for_each_entry_safe(entry, n, &pipeline->list, list) {
list_del(&entry->list);
if (entry->elem == dsp_hwec)
dsp_hwec_disable(container_of(pipeline, struct dsp,
pipeline));
else
entry->elem->free(entry->p);
kfree(entry);
}
}
void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
{
if (!pipeline)
return;
_dsp_pipeline_destroy(pipeline);
}
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
{
int found = 0;
char *dup, *next, *tok, *name, *args;
struct dsp_element_entry *entry, *n;
struct dsp_pipeline_entry *pipeline_entry;
struct mISDN_dsp_element *elem;
if (!pipeline)
return -EINVAL;
if (!list_empty(&pipeline->list))
_dsp_pipeline_destroy(pipeline);
dup = next = kstrdup(cfg, GFP_ATOMIC);
if (!dup)
return 0;
while ((tok = strsep(&next, "|"))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "(");
args = strsep(&tok, ")");
if (args && !*args)
args = NULL;
list_for_each_entry_safe(entry, n, &dsp_elements, list)
if (!strcmp(entry->elem->name, name)) {
elem = entry->elem;
pipeline_entry = kmalloc(sizeof(struct
dsp_pipeline_entry), GFP_ATOMIC);
if (!pipeline_entry) {
printk(KERN_ERR "%s: failed to add "
"entry to pipeline: %s (out of "
"memory)\n", __func__, elem->name);
goto _out;
}
pipeline_entry->elem = elem;
if (elem == dsp_hwec) {
/* This is a hack to make the hwec
available as a pipeline module */
dsp_hwec_enable(container_of(pipeline,
struct dsp, pipeline), args);
list_add_tail(&pipeline_entry->list,
&pipeline->list);
} else {
pipeline_entry->p = elem->new(args);
if (pipeline_entry->p) {
list_add_tail(&pipeline_entry->
list, &pipeline->list);
} else {
printk(KERN_ERR "%s: failed "
"to add entry to pipeline: "
"%s (new() returned NULL)\n",
__func__, elem->name);
kfree(pipeline_entry);
}
}
found = 1;
break;
}
if (found)
found = 0;
else
printk(KERN_ERR "%s: element not found, skipping: "
"%s\n", __func__, name);
}
_out:
if (!list_empty(&pipeline->list))
pipeline->inuse = 1;
else
pipeline->inuse = 0;
kfree(dup);
return 0;
}
void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data, int len)
{
struct dsp_pipeline_entry *entry;
if (!pipeline)
return;
list_for_each_entry(entry, &pipeline->list, list)
if (entry->elem->process_tx)
entry->elem->process_tx(entry->p, data, len);
}
void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len,
unsigned int txlen)
{
struct dsp_pipeline_entry *entry;
if (!pipeline)
return;
list_for_each_entry_reverse(entry, &pipeline->list, list)
if (entry->elem->process_rx)
entry->elem->process_rx(entry->p, data, len, txlen);
}
| linux-master | drivers/isdn/mISDN/dsp_pipeline.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include "layer2.h"
#include <linux/random.h>
#include <linux/slab.h>
#include "core.h"
#define ID_REQUEST 1
#define ID_ASSIGNED 2
#define ID_DENIED 3
#define ID_CHK_REQ 4
#define ID_CHK_RES 5
#define ID_REMOVE 6
#define ID_VERIFY 7
#define TEI_ENTITY_ID 0xf
#define MGR_PH_ACTIVE 16
#define MGR_PH_NOTREADY 17
#define DATIMER_VAL 10000
static u_int *debug;
static struct Fsm deactfsm = {NULL, 0, 0, NULL, NULL};
static struct Fsm teifsmu = {NULL, 0, 0, NULL, NULL};
static struct Fsm teifsmn = {NULL, 0, 0, NULL, NULL};
enum {
ST_L1_DEACT,
ST_L1_DEACT_PENDING,
ST_L1_ACTIV,
};
#define DEACT_STATE_COUNT (ST_L1_ACTIV + 1)
static char *strDeactState[] =
{
"ST_L1_DEACT",
"ST_L1_DEACT_PENDING",
"ST_L1_ACTIV",
};
enum {
EV_ACTIVATE,
EV_ACTIVATE_IND,
EV_DEACTIVATE,
EV_DEACTIVATE_IND,
EV_UI,
EV_DATIMER,
};
#define DEACT_EVENT_COUNT (EV_DATIMER + 1)
static char *strDeactEvent[] =
{
"EV_ACTIVATE",
"EV_ACTIVATE_IND",
"EV_DEACTIVATE",
"EV_DEACTIVATE_IND",
"EV_UI",
"EV_DATIMER",
};
static void
da_debug(struct FsmInst *fi, char *fmt, ...)
{
struct manager *mgr = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_TEIFSM))
return;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf);
va_end(va);
}
static void
da_activate(struct FsmInst *fi, int event, void *arg)
{
struct manager *mgr = fi->userdata;
if (fi->state == ST_L1_DEACT_PENDING)
mISDN_FsmDelTimer(&mgr->datimer, 1);
mISDN_FsmChangeState(fi, ST_L1_ACTIV);
}
static void
da_deactivate_ind(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_DEACT);
}
static void
da_deactivate(struct FsmInst *fi, int event, void *arg)
{
struct manager *mgr = fi->userdata;
struct layer2 *l2;
u_long flags;
read_lock_irqsave(&mgr->lock, flags);
list_for_each_entry(l2, &mgr->layer2, list) {
if (l2->l2m.state > ST_L2_4) {
/* have still activ TEI */
read_unlock_irqrestore(&mgr->lock, flags);
return;
}
}
read_unlock_irqrestore(&mgr->lock, flags);
/* All TEI are inactiv */
if (!test_bit(OPTION_L1_HOLD, &mgr->options)) {
mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER,
NULL, 1);
mISDN_FsmChangeState(fi, ST_L1_DEACT_PENDING);
}
}
static void
da_ui(struct FsmInst *fi, int event, void *arg)
{
struct manager *mgr = fi->userdata;
/* restart da timer */
if (!test_bit(OPTION_L1_HOLD, &mgr->options)) {
mISDN_FsmDelTimer(&mgr->datimer, 2);
mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER,
NULL, 2);
}
}
static void
da_timer(struct FsmInst *fi, int event, void *arg)
{
struct manager *mgr = fi->userdata;
struct layer2 *l2;
u_long flags;
/* check again */
read_lock_irqsave(&mgr->lock, flags);
list_for_each_entry(l2, &mgr->layer2, list) {
if (l2->l2m.state > ST_L2_4) {
/* have still activ TEI */
read_unlock_irqrestore(&mgr->lock, flags);
mISDN_FsmChangeState(fi, ST_L1_ACTIV);
return;
}
}
read_unlock_irqrestore(&mgr->lock, flags);
/* All TEI are inactiv */
mISDN_FsmChangeState(fi, ST_L1_DEACT);
_queue_data(&mgr->ch, PH_DEACTIVATE_REQ, MISDN_ID_ANY, 0, NULL,
GFP_ATOMIC);
}
static struct FsmNode DeactFnList[] =
{
{ST_L1_DEACT, EV_ACTIVATE_IND, da_activate},
{ST_L1_ACTIV, EV_DEACTIVATE_IND, da_deactivate_ind},
{ST_L1_ACTIV, EV_DEACTIVATE, da_deactivate},
{ST_L1_DEACT_PENDING, EV_ACTIVATE, da_activate},
{ST_L1_DEACT_PENDING, EV_UI, da_ui},
{ST_L1_DEACT_PENDING, EV_DATIMER, da_timer},
};
enum {
ST_TEI_NOP,
ST_TEI_IDREQ,
ST_TEI_IDVERIFY,
};
#define TEI_STATE_COUNT (ST_TEI_IDVERIFY + 1)
static char *strTeiState[] =
{
"ST_TEI_NOP",
"ST_TEI_IDREQ",
"ST_TEI_IDVERIFY",
};
enum {
EV_IDREQ,
EV_ASSIGN,
EV_ASSIGN_REQ,
EV_DENIED,
EV_CHKREQ,
EV_CHKRESP,
EV_REMOVE,
EV_VERIFY,
EV_TIMER,
};
#define TEI_EVENT_COUNT (EV_TIMER + 1)
static char *strTeiEvent[] =
{
"EV_IDREQ",
"EV_ASSIGN",
"EV_ASSIGN_REQ",
"EV_DENIED",
"EV_CHKREQ",
"EV_CHKRESP",
"EV_REMOVE",
"EV_VERIFY",
"EV_TIMER",
};
static void
tei_debug(struct FsmInst *fi, char *fmt, ...)
{
struct teimgr *tm = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_TEIFSM))
return;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "sapi(%d) tei(%d): %pV\n",
tm->l2->sapi, tm->l2->tei, &vaf);
va_end(va);
}
static int
get_free_id(struct manager *mgr)
{
DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 };
int i;
struct layer2 *l2;
list_for_each_entry(l2, &mgr->layer2, list) {
if (l2->ch.nr > 63) {
printk(KERN_WARNING
"%s: more as 63 layer2 for one device\n",
__func__);
return -EBUSY;
}
__set_bit(l2->ch.nr, ids);
}
i = find_next_zero_bit(ids, 64, 1);
if (i < 64)
return i;
printk(KERN_WARNING "%s: more as 63 layer2 for one device\n",
__func__);
return -EBUSY;
}
static int
get_free_tei(struct manager *mgr)
{
DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 };
int i;
struct layer2 *l2;
list_for_each_entry(l2, &mgr->layer2, list) {
if (l2->ch.nr == 0)
continue;
if ((l2->ch.addr & 0xff) != 0)
continue;
i = l2->ch.addr >> 8;
if (i < 64)
continue;
i -= 64;
__set_bit(i, ids);
}
i = find_first_zero_bit(ids, 64);
if (i < 64)
return i + 64;
printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n",
__func__);
return -1;
}
static void
teiup_create(struct manager *mgr, u_int prim, int len, void *arg)
{
struct sk_buff *skb;
struct mISDNhead *hh;
int err;
skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = (mgr->ch.nr << 16) | mgr->ch.addr;
if (len)
skb_put_data(skb, arg, len);
err = mgr->up->send(mgr->up, skb);
if (err) {
printk(KERN_WARNING "%s: err=%d\n", __func__, err);
dev_kfree_skb(skb);
}
}
static u_int
new_id(struct manager *mgr)
{
u_int id;
id = mgr->nextid++;
if (id == 0x7fff)
mgr->nextid = 1;
id <<= 16;
id |= GROUP_TEI << 8;
id |= TEI_SAPI;
return id;
}
static void
do_send(struct manager *mgr)
{
if (!test_bit(MGR_PH_ACTIVE, &mgr->options))
return;
if (!test_and_set_bit(MGR_PH_NOTREADY, &mgr->options)) {
struct sk_buff *skb = skb_dequeue(&mgr->sendq);
if (!skb) {
test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
return;
}
mgr->lastid = mISDN_HEAD_ID(skb);
mISDN_FsmEvent(&mgr->deact, EV_UI, NULL);
if (mgr->ch.recv(mgr->ch.peer, skb)) {
dev_kfree_skb(skb);
test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
mgr->lastid = MISDN_ID_NONE;
}
}
}
static void
do_ack(struct manager *mgr, u_int id)
{
if (test_bit(MGR_PH_NOTREADY, &mgr->options)) {
if (id == mgr->lastid) {
if (test_bit(MGR_PH_ACTIVE, &mgr->options)) {
struct sk_buff *skb;
skb = skb_dequeue(&mgr->sendq);
if (skb) {
mgr->lastid = mISDN_HEAD_ID(skb);
if (!mgr->ch.recv(mgr->ch.peer, skb))
return;
dev_kfree_skb(skb);
}
}
mgr->lastid = MISDN_ID_NONE;
test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
}
}
}
static void
mgr_send_down(struct manager *mgr, struct sk_buff *skb)
{
skb_queue_tail(&mgr->sendq, skb);
if (!test_bit(MGR_PH_ACTIVE, &mgr->options)) {
_queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
} else {
do_send(mgr);
}
}
static int
dl_unit_data(struct manager *mgr, struct sk_buff *skb)
{
if (!test_bit(MGR_OPT_NETWORK, &mgr->options)) /* only net send UI */
return -EINVAL;
if (!test_bit(MGR_PH_ACTIVE, &mgr->options))
_queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
skb_push(skb, 3);
skb->data[0] = 0x02; /* SAPI 0 C/R = 1 */
skb->data[1] = 0xff; /* TEI 127 */
skb->data[2] = UI; /* UI frame */
mISDN_HEAD_PRIM(skb) = PH_DATA_REQ;
mISDN_HEAD_ID(skb) = new_id(mgr);
skb_queue_tail(&mgr->sendq, skb);
do_send(mgr);
return 0;
}
static unsigned int
random_ri(void)
{
u16 x;
get_random_bytes(&x, sizeof(x));
return x;
}
static struct layer2 *
findtei(struct manager *mgr, int tei)
{
struct layer2 *l2;
u_long flags;
read_lock_irqsave(&mgr->lock, flags);
list_for_each_entry(l2, &mgr->layer2, list) {
if ((l2->sapi == 0) && (l2->tei > 0) &&
(l2->tei != GROUP_TEI) && (l2->tei == tei))
goto done;
}
l2 = NULL;
done:
read_unlock_irqrestore(&mgr->lock, flags);
return l2;
}
static void
put_tei_msg(struct manager *mgr, u_char m_id, unsigned int ri, int tei)
{
struct sk_buff *skb;
u_char bp[8];
bp[0] = (TEI_SAPI << 2);
if (test_bit(MGR_OPT_NETWORK, &mgr->options))
bp[0] |= 2; /* CR:=1 for net command */
bp[1] = (GROUP_TEI << 1) | 0x1;
bp[2] = UI;
bp[3] = TEI_ENTITY_ID;
bp[4] = ri >> 8;
bp[5] = ri & 0xff;
bp[6] = m_id;
bp[7] = ((tei << 1) & 0xff) | 1;
skb = _alloc_mISDN_skb(PH_DATA_REQ, new_id(mgr), 8, bp, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: no skb for tei msg\n", __func__);
return;
}
mgr_send_down(mgr, skb);
}
static void
tei_id_request(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
if (tm->l2->tei != GROUP_TEI) {
tm->tei_m.printdebug(&tm->tei_m,
"assign request for already assigned tei %d",
tm->l2->tei);
return;
}
tm->ri = random_ri();
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(&tm->tei_m,
"assign request ri %d", tm->ri);
put_tei_msg(tm->mgr, ID_REQUEST, tm->ri, GROUP_TEI);
mISDN_FsmChangeState(fi, ST_TEI_IDREQ);
mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 1);
tm->nval = 3;
}
static void
tei_id_assign(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
struct layer2 *l2;
u_char *dp = arg;
int ri, tei;
ri = ((unsigned int) *dp++ << 8);
ri += *dp++;
dp++;
tei = *dp >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "identity assign ri %d tei %d",
ri, tei);
l2 = findtei(tm->mgr, tei);
if (l2) { /* same tei is in use */
if (ri != l2->tm->ri) {
tm->tei_m.printdebug(fi,
"possible duplicate assignment tei %d", tei);
tei_l2(l2, MDL_ERROR_RSP, 0);
}
} else if (ri == tm->ri) {
mISDN_FsmDelTimer(&tm->timer, 1);
mISDN_FsmChangeState(fi, ST_TEI_NOP);
tei_l2(tm->l2, MDL_ASSIGN_REQ, tei);
}
}
static void
tei_id_test_dup(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
struct layer2 *l2;
u_char *dp = arg;
int tei, ri;
ri = ((unsigned int) *dp++ << 8);
ri += *dp++;
dp++;
tei = *dp >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "foreign identity assign ri %d tei %d",
ri, tei);
l2 = findtei(tm->mgr, tei);
if (l2) { /* same tei is in use */
if (ri != l2->tm->ri) { /* and it wasn't our request */
tm->tei_m.printdebug(fi,
"possible duplicate assignment tei %d", tei);
mISDN_FsmEvent(&l2->tm->tei_m, EV_VERIFY, NULL);
}
}
}
static void
tei_id_denied(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
u_char *dp = arg;
int ri, tei;
ri = ((unsigned int) *dp++ << 8);
ri += *dp++;
dp++;
tei = *dp >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "identity denied ri %d tei %d",
ri, tei);
}
static void
tei_id_chk_req(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
u_char *dp = arg;
int tei;
tei = *(dp + 3) >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "identity check req tei %d", tei);
if ((tm->l2->tei != GROUP_TEI) && ((tei == GROUP_TEI) ||
(tei == tm->l2->tei))) {
mISDN_FsmDelTimer(&tm->timer, 4);
mISDN_FsmChangeState(&tm->tei_m, ST_TEI_NOP);
put_tei_msg(tm->mgr, ID_CHK_RES, random_ri(), tm->l2->tei);
}
}
static void
tei_id_remove(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
u_char *dp = arg;
int tei;
tei = *(dp + 3) >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "identity remove tei %d", tei);
if ((tm->l2->tei != GROUP_TEI) &&
((tei == GROUP_TEI) || (tei == tm->l2->tei))) {
mISDN_FsmDelTimer(&tm->timer, 5);
mISDN_FsmChangeState(&tm->tei_m, ST_TEI_NOP);
tei_l2(tm->l2, MDL_REMOVE_REQ, 0);
}
}
static void
tei_id_verify(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "id verify request for tei %d",
tm->l2->tei);
put_tei_msg(tm->mgr, ID_VERIFY, 0, tm->l2->tei);
mISDN_FsmChangeState(&tm->tei_m, ST_TEI_IDVERIFY);
mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 2);
tm->nval = 2;
}
static void
tei_id_req_tout(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
if (--tm->nval) {
tm->ri = random_ri();
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "assign req(%d) ri %d",
4 - tm->nval, tm->ri);
put_tei_msg(tm->mgr, ID_REQUEST, tm->ri, GROUP_TEI);
mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 3);
} else {
tm->tei_m.printdebug(fi, "assign req failed");
tei_l2(tm->l2, MDL_ERROR_RSP, 0);
mISDN_FsmChangeState(fi, ST_TEI_NOP);
}
}
static void
tei_id_ver_tout(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
if (--tm->nval) {
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi,
"id verify req(%d) for tei %d",
3 - tm->nval, tm->l2->tei);
put_tei_msg(tm->mgr, ID_VERIFY, 0, tm->l2->tei);
mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 4);
} else {
tm->tei_m.printdebug(fi, "verify req for tei %d failed",
tm->l2->tei);
tei_l2(tm->l2, MDL_REMOVE_REQ, 0);
mISDN_FsmChangeState(fi, ST_TEI_NOP);
}
}
static struct FsmNode TeiFnListUser[] =
{
{ST_TEI_NOP, EV_IDREQ, tei_id_request},
{ST_TEI_NOP, EV_ASSIGN, tei_id_test_dup},
{ST_TEI_NOP, EV_VERIFY, tei_id_verify},
{ST_TEI_NOP, EV_REMOVE, tei_id_remove},
{ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req},
{ST_TEI_IDREQ, EV_TIMER, tei_id_req_tout},
{ST_TEI_IDREQ, EV_ASSIGN, tei_id_assign},
{ST_TEI_IDREQ, EV_DENIED, tei_id_denied},
{ST_TEI_IDVERIFY, EV_TIMER, tei_id_ver_tout},
{ST_TEI_IDVERIFY, EV_REMOVE, tei_id_remove},
{ST_TEI_IDVERIFY, EV_CHKREQ, tei_id_chk_req},
};
static void
tei_l2remove(struct layer2 *l2)
{
put_tei_msg(l2->tm->mgr, ID_REMOVE, 0, l2->tei);
tei_l2(l2, MDL_REMOVE_REQ, 0);
list_del(&l2->ch.list);
l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
}
static void
tei_assign_req(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
u_char *dp = arg;
if (tm->l2->tei == GROUP_TEI) {
tm->tei_m.printdebug(&tm->tei_m,
"net tei assign request without tei");
return;
}
tm->ri = ((unsigned int) *dp++ << 8);
tm->ri += *dp++;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(&tm->tei_m,
"net assign request ri %d teim %d", tm->ri, *dp);
put_tei_msg(tm->mgr, ID_ASSIGNED, tm->ri, tm->l2->tei);
mISDN_FsmChangeState(fi, ST_TEI_NOP);
}
static void
tei_id_chk_req_net(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "id check request for tei %d",
tm->l2->tei);
tm->rcnt = 0;
put_tei_msg(tm->mgr, ID_CHK_REQ, 0, tm->l2->tei);
mISDN_FsmChangeState(&tm->tei_m, ST_TEI_IDVERIFY);
mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 2);
tm->nval = 2;
}
static void
tei_id_chk_resp(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
u_char *dp = arg;
int tei;
tei = dp[3] >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "identity check resp tei %d", tei);
if (tei == tm->l2->tei)
tm->rcnt++;
}
static void
tei_id_verify_net(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
u_char *dp = arg;
int tei;
tei = dp[3] >> 1;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi, "identity verify req tei %d/%d",
tei, tm->l2->tei);
if (tei == tm->l2->tei)
tei_id_chk_req_net(fi, event, arg);
}
static void
tei_id_ver_tout_net(struct FsmInst *fi, int event, void *arg)
{
struct teimgr *tm = fi->userdata;
if (tm->rcnt == 1) {
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi,
"check req for tei %d successful\n", tm->l2->tei);
mISDN_FsmChangeState(fi, ST_TEI_NOP);
} else if (tm->rcnt > 1) {
/* duplicate assignment; remove */
tei_l2remove(tm->l2);
} else if (--tm->nval) {
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(fi,
"id check req(%d) for tei %d",
3 - tm->nval, tm->l2->tei);
put_tei_msg(tm->mgr, ID_CHK_REQ, 0, tm->l2->tei);
mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 4);
} else {
tm->tei_m.printdebug(fi, "check req for tei %d failed",
tm->l2->tei);
mISDN_FsmChangeState(fi, ST_TEI_NOP);
tei_l2remove(tm->l2);
}
}
static struct FsmNode TeiFnListNet[] =
{
{ST_TEI_NOP, EV_ASSIGN_REQ, tei_assign_req},
{ST_TEI_NOP, EV_VERIFY, tei_id_verify_net},
{ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req_net},
{ST_TEI_IDVERIFY, EV_TIMER, tei_id_ver_tout_net},
{ST_TEI_IDVERIFY, EV_CHKRESP, tei_id_chk_resp},
};
static void
tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
{
if (test_bit(FLG_FIXED_TEI, &tm->l2->flag))
return;
if (*debug & DEBUG_L2_TEI)
tm->tei_m.printdebug(&tm->tei_m, "tei handler mt %x", mt);
if (mt == ID_ASSIGNED)
mISDN_FsmEvent(&tm->tei_m, EV_ASSIGN, dp);
else if (mt == ID_DENIED)
mISDN_FsmEvent(&tm->tei_m, EV_DENIED, dp);
else if (mt == ID_CHK_REQ)
mISDN_FsmEvent(&tm->tei_m, EV_CHKREQ, dp);
else if (mt == ID_REMOVE)
mISDN_FsmEvent(&tm->tei_m, EV_REMOVE, dp);
else if (mt == ID_VERIFY)
mISDN_FsmEvent(&tm->tei_m, EV_VERIFY, dp);
else if (mt == ID_CHK_RES)
mISDN_FsmEvent(&tm->tei_m, EV_CHKRESP, dp);
}
static struct layer2 *
create_new_tei(struct manager *mgr, int tei, int sapi)
{
unsigned long opt = 0;
unsigned long flags;
int id;
struct layer2 *l2;
struct channel_req rq;
if (!mgr->up)
return NULL;
if ((tei >= 0) && (tei < 64))
test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) |
(1 << ISDN_P_NT_E1))) {
test_and_set_bit(OPTION_L2_PMX, &opt);
rq.protocol = ISDN_P_NT_E1;
} else {
rq.protocol = ISDN_P_NT_S0;
}
l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi);
if (!l2) {
printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
return NULL;
}
l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
if (!l2->tm) {
kfree(l2);
printk(KERN_WARNING "%s:no memory for teimgr\n", __func__);
return NULL;
}
l2->tm->mgr = mgr;
l2->tm->l2 = l2;
l2->tm->tei_m.debug = *debug & DEBUG_L2_TEIFSM;
l2->tm->tei_m.userdata = l2->tm;
l2->tm->tei_m.printdebug = tei_debug;
l2->tm->tei_m.fsm = &teifsmn;
l2->tm->tei_m.state = ST_TEI_NOP;
l2->tm->tval = 2000; /* T202 2 sec */
mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
write_lock_irqsave(&mgr->lock, flags);
id = get_free_id(mgr);
list_add_tail(&l2->list, &mgr->layer2);
write_unlock_irqrestore(&mgr->lock, flags);
if (id < 0) {
l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
printk(KERN_WARNING "%s:no free id\n", __func__);
return NULL;
} else {
l2->ch.nr = id;
__add_layer2(&l2->ch, mgr->ch.st);
l2->ch.recv = mgr->ch.recv;
l2->ch.peer = mgr->ch.peer;
l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
/* We need open here L1 for the manager as well (refcounting) */
rq.adr.dev = mgr->ch.st->dev->id;
id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
if (id < 0) {
printk(KERN_WARNING "%s: cannot open L1\n", __func__);
l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
l2 = NULL;
}
}
return l2;
}
static void
new_tei_req(struct manager *mgr, u_char *dp)
{
int tei, ri;
struct layer2 *l2;
ri = dp[0] << 8;
ri += dp[1];
if (!mgr->up)
goto denied;
if (!(dp[3] & 1)) /* Extension bit != 1 */
goto denied;
if (dp[3] != 0xff)
tei = dp[3] >> 1; /* 3GPP TS 08.56 6.1.11.2 */
else
tei = get_free_tei(mgr);
if (tei < 0) {
printk(KERN_WARNING "%s:No free tei\n", __func__);
goto denied;
}
l2 = create_new_tei(mgr, tei, CTRL_SAPI);
if (!l2)
goto denied;
else
mISDN_FsmEvent(&l2->tm->tei_m, EV_ASSIGN_REQ, dp);
return;
denied:
put_tei_msg(mgr, ID_DENIED, ri, GROUP_TEI);
}
static int
ph_data_ind(struct manager *mgr, struct sk_buff *skb)
{
int ret = -EINVAL;
struct layer2 *l2, *nl2;
u_char mt;
if (skb->len < 8) {
if (*debug & DEBUG_L2_TEI)
printk(KERN_DEBUG "%s: short mgr frame %d/8\n",
__func__, skb->len);
goto done;
}
if ((skb->data[0] >> 2) != TEI_SAPI) /* not for us */
goto done;
if (skb->data[0] & 1) /* EA0 formal error */
goto done;
if (!(skb->data[1] & 1)) /* EA1 formal error */
goto done;
if ((skb->data[1] >> 1) != GROUP_TEI) /* not for us */
goto done;
if ((skb->data[2] & 0xef) != UI) /* not UI */
goto done;
if (skb->data[3] != TEI_ENTITY_ID) /* not tei entity */
goto done;
mt = skb->data[6];
switch (mt) {
case ID_REQUEST:
case ID_CHK_RES:
case ID_VERIFY:
if (!test_bit(MGR_OPT_NETWORK, &mgr->options))
goto done;
break;
case ID_ASSIGNED:
case ID_DENIED:
case ID_CHK_REQ:
case ID_REMOVE:
if (test_bit(MGR_OPT_NETWORK, &mgr->options))
goto done;
break;
default:
goto done;
}
ret = 0;
if (mt == ID_REQUEST) {
new_tei_req(mgr, &skb->data[4]);
goto done;
}
list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
tei_ph_data_ind(l2->tm, mt, &skb->data[4], skb->len - 4);
}
done:
return ret;
}
int
l2_tei(struct layer2 *l2, u_int cmd, u_long arg)
{
struct teimgr *tm = l2->tm;
if (test_bit(FLG_FIXED_TEI, &l2->flag))
return 0;
if (*debug & DEBUG_L2_TEI)
printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
switch (cmd) {
case MDL_ASSIGN_IND:
mISDN_FsmEvent(&tm->tei_m, EV_IDREQ, NULL);
break;
case MDL_ERROR_IND:
if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
mISDN_FsmEvent(&tm->tei_m, EV_CHKREQ, &l2->tei);
if (test_bit(MGR_OPT_USER, &tm->mgr->options))
mISDN_FsmEvent(&tm->tei_m, EV_VERIFY, NULL);
break;
case MDL_STATUS_UP_IND:
if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
mISDN_FsmEvent(&tm->mgr->deact, EV_ACTIVATE, NULL);
break;
case MDL_STATUS_DOWN_IND:
if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
mISDN_FsmEvent(&tm->mgr->deact, EV_DEACTIVATE, NULL);
break;
case MDL_STATUS_UI_IND:
if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
mISDN_FsmEvent(&tm->mgr->deact, EV_UI, NULL);
break;
}
return 0;
}
void
TEIrelease(struct layer2 *l2)
{
struct teimgr *tm = l2->tm;
u_long flags;
mISDN_FsmDelTimer(&tm->timer, 1);
write_lock_irqsave(&tm->mgr->lock, flags);
list_del(&l2->list);
write_unlock_irqrestore(&tm->mgr->lock, flags);
l2->tm = NULL;
kfree(tm);
}
static int
create_teimgr(struct manager *mgr, struct channel_req *crq)
{
struct layer2 *l2;
unsigned long opt = 0;
unsigned long flags;
int id;
struct channel_req l1rq;
if (*debug & DEBUG_L2_TEI)
printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
__func__, dev_name(&mgr->ch.st->dev->dev),
crq->protocol, crq->adr.dev, crq->adr.channel,
crq->adr.sapi, crq->adr.tei);
if (crq->adr.tei > GROUP_TEI)
return -EINVAL;
if (crq->adr.tei < 64)
test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
if (crq->adr.tei == 0)
test_and_set_bit(OPTION_L2_PTP, &opt);
if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
if (crq->protocol == ISDN_P_LAPD_TE)
return -EPROTONOSUPPORT;
if ((crq->adr.tei != 0) && (crq->adr.tei != 127))
return -EINVAL;
if (mgr->up) {
printk(KERN_WARNING
"%s: only one network manager is allowed\n",
__func__);
return -EBUSY;
}
} else if (test_bit(MGR_OPT_USER, &mgr->options)) {
if (crq->protocol == ISDN_P_LAPD_NT)
return -EPROTONOSUPPORT;
if ((crq->adr.tei >= 64) && (crq->adr.tei < GROUP_TEI))
return -EINVAL; /* dyn tei */
} else {
if (crq->protocol == ISDN_P_LAPD_NT)
test_and_set_bit(MGR_OPT_NETWORK, &mgr->options);
if (crq->protocol == ISDN_P_LAPD_TE)
test_and_set_bit(MGR_OPT_USER, &mgr->options);
}
l1rq.adr = crq->adr;
if (mgr->ch.st->dev->Dprotocols
& ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
test_and_set_bit(OPTION_L2_PMX, &opt);
if ((crq->protocol == ISDN_P_LAPD_NT) && (crq->adr.tei == 127)) {
mgr->up = crq->ch;
id = DL_INFO_L2_CONNECT;
teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
if (test_bit(MGR_PH_ACTIVE, &mgr->options))
teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
crq->ch = NULL;
if (!list_empty(&mgr->layer2)) {
read_lock_irqsave(&mgr->lock, flags);
list_for_each_entry(l2, &mgr->layer2, list) {
l2->up = mgr->up;
l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
}
read_unlock_irqrestore(&mgr->lock, flags);
}
return 0;
}
l2 = create_l2(crq->ch, crq->protocol, opt,
crq->adr.tei, crq->adr.sapi);
if (!l2)
return -ENOMEM;
l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
if (!l2->tm) {
kfree(l2);
printk(KERN_ERR "kmalloc teimgr failed\n");
return -ENOMEM;
}
l2->tm->mgr = mgr;
l2->tm->l2 = l2;
l2->tm->tei_m.debug = *debug & DEBUG_L2_TEIFSM;
l2->tm->tei_m.userdata = l2->tm;
l2->tm->tei_m.printdebug = tei_debug;
if (crq->protocol == ISDN_P_LAPD_TE) {
l2->tm->tei_m.fsm = &teifsmu;
l2->tm->tei_m.state = ST_TEI_NOP;
l2->tm->tval = 1000; /* T201 1 sec */
if (test_bit(OPTION_L2_PMX, &opt))
l1rq.protocol = ISDN_P_TE_E1;
else
l1rq.protocol = ISDN_P_TE_S0;
} else {
l2->tm->tei_m.fsm = &teifsmn;
l2->tm->tei_m.state = ST_TEI_NOP;
l2->tm->tval = 2000; /* T202 2 sec */
if (test_bit(OPTION_L2_PMX, &opt))
l1rq.protocol = ISDN_P_NT_E1;
else
l1rq.protocol = ISDN_P_NT_S0;
}
mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
write_lock_irqsave(&mgr->lock, flags);
id = get_free_id(mgr);
list_add_tail(&l2->list, &mgr->layer2);
write_unlock_irqrestore(&mgr->lock, flags);
if (id >= 0) {
l2->ch.nr = id;
l2->up->nr = id;
crq->ch = &l2->ch;
/* We need open here L1 for the manager as well (refcounting) */
id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL,
&l1rq);
}
if (id < 0)
l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
return id;
}
static int
mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct manager *mgr;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret = -EINVAL;
mgr = container_of(ch, struct manager, ch);
if (*debug & DEBUG_L2_RECV)
printk(KERN_DEBUG "%s: prim(%x) id(%x)\n",
__func__, hh->prim, hh->id);
switch (hh->prim) {
case PH_DATA_IND:
mISDN_FsmEvent(&mgr->deact, EV_UI, NULL);
ret = ph_data_ind(mgr, skb);
break;
case PH_DATA_CNF:
do_ack(mgr, hh->id);
ret = 0;
break;
case PH_ACTIVATE_IND:
test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
if (mgr->up)
teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
do_send(mgr);
ret = 0;
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
if (mgr->up)
teiup_create(mgr, PH_DEACTIVATE_IND, 0, NULL);
mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
ret = 0;
break;
case DL_UNITDATA_REQ:
return dl_unit_data(mgr, skb);
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
free_teimanager(struct manager *mgr)
{
struct layer2 *l2, *nl2;
test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
/* not locked lock is taken in release tei */
mgr->up = NULL;
if (test_bit(OPTION_L2_CLEANUP, &mgr->options)) {
list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
put_tei_msg(mgr, ID_REMOVE, 0, l2->tei);
mutex_lock(&mgr->ch.st->lmutex);
list_del(&l2->ch.list);
mutex_unlock(&mgr->ch.st->lmutex);
l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
}
test_and_clear_bit(MGR_OPT_NETWORK, &mgr->options);
} else {
list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
l2->up = NULL;
}
}
}
if (test_bit(MGR_OPT_USER, &mgr->options)) {
if (list_empty(&mgr->layer2))
test_and_clear_bit(MGR_OPT_USER, &mgr->options);
}
mgr->ch.st->dev->D.ctrl(&mgr->ch.st->dev->D, CLOSE_CHANNEL, NULL);
return 0;
}
static int
ctrl_teimanager(struct manager *mgr, void *arg)
{
/* currently we only have one option */
unsigned int *val = (unsigned int *)arg;
switch (val[0]) {
case IMCLEAR_L2:
if (val[1])
test_and_set_bit(OPTION_L2_CLEANUP, &mgr->options);
else
test_and_clear_bit(OPTION_L2_CLEANUP, &mgr->options);
break;
case IMHOLD_L1:
if (val[1])
test_and_set_bit(OPTION_L1_HOLD, &mgr->options);
else
test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
break;
default:
return -EINVAL;
}
return 0;
}
/* This function does create a L2 for fixed TEI in NT Mode */
static int
check_data(struct manager *mgr, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret, tei, sapi;
struct layer2 *l2;
if (*debug & DEBUG_L2_CTRL)
printk(KERN_DEBUG "%s: prim(%x) id(%x)\n",
__func__, hh->prim, hh->id);
if (test_bit(MGR_OPT_USER, &mgr->options))
return -ENOTCONN;
if (hh->prim != PH_DATA_IND)
return -ENOTCONN;
if (skb->len != 3)
return -ENOTCONN;
if (skb->data[0] & 3) /* EA0 and CR must be 0 */
return -EINVAL;
sapi = skb->data[0] >> 2;
if (!(skb->data[1] & 1)) /* invalid EA1 */
return -EINVAL;
tei = skb->data[1] >> 1;
if (tei > 63) /* not a fixed tei */
return -ENOTCONN;
if ((skb->data[2] & ~0x10) != SABME)
return -ENOTCONN;
/* We got a SABME for a fixed TEI */
if (*debug & DEBUG_L2_CTRL)
printk(KERN_DEBUG "%s: SABME sapi(%d) tei(%d)\n",
__func__, sapi, tei);
l2 = create_new_tei(mgr, tei, sapi);
if (!l2) {
if (*debug & DEBUG_L2_CTRL)
printk(KERN_DEBUG "%s: failed to create new tei\n",
__func__);
return -ENOMEM;
}
ret = l2->ch.send(&l2->ch, skb);
return ret;
}
void
delete_teimanager(struct mISDNchannel *ch)
{
struct manager *mgr;
struct layer2 *l2, *nl2;
mgr = container_of(ch, struct manager, ch);
/* not locked lock is taken in release tei */
list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
mutex_lock(&mgr->ch.st->lmutex);
list_del(&l2->ch.list);
mutex_unlock(&mgr->ch.st->lmutex);
l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
}
list_del(&mgr->ch.list);
list_del(&mgr->bcast.list);
skb_queue_purge(&mgr->sendq);
kfree(mgr);
}
static int
mgr_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct manager *mgr;
int ret = -EINVAL;
mgr = container_of(ch, struct manager, ch);
if (*debug & DEBUG_L2_CTRL)
printk(KERN_DEBUG "%s(%x, %p)\n", __func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
ret = create_teimgr(mgr, arg);
break;
case CLOSE_CHANNEL:
ret = free_teimanager(mgr);
break;
case CONTROL_CHANNEL:
ret = ctrl_teimanager(mgr, arg);
break;
case CHECK_DATA:
ret = check_data(mgr, arg);
break;
}
return ret;
}
static int
mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct manager *mgr = container_of(ch, struct manager, bcast);
struct mISDNhead *hhc, *hh = mISDN_HEAD_P(skb);
struct sk_buff *cskb = NULL;
struct layer2 *l2;
u_long flags;
int ret;
read_lock_irqsave(&mgr->lock, flags);
list_for_each_entry(l2, &mgr->layer2, list) {
if ((hh->id & MISDN_ID_SAPI_MASK) ==
(l2->ch.addr & MISDN_ID_SAPI_MASK)) {
if (list_is_last(&l2->list, &mgr->layer2)) {
cskb = skb;
skb = NULL;
} else {
if (!cskb)
cskb = skb_copy(skb, GFP_ATOMIC);
}
if (cskb) {
hhc = mISDN_HEAD_P(cskb);
/* save original header behind normal header */
hhc++;
*hhc = *hh;
hhc--;
hhc->prim = DL_INTERN_MSG;
hhc->id = l2->ch.nr;
ret = ch->st->own.recv(&ch->st->own, cskb);
if (ret) {
if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
"%s ch%d prim(%x) addr(%x)"
" err %d\n",
__func__, l2->ch.nr,
hh->prim, l2->ch.addr, ret);
} else
cskb = NULL;
} else {
printk(KERN_WARNING "%s ch%d addr %x no mem\n",
__func__, ch->nr, ch->addr);
goto out;
}
}
}
out:
read_unlock_irqrestore(&mgr->lock, flags);
dev_kfree_skb(cskb);
dev_kfree_skb(skb);
return 0;
}
static int
mgr_bcast_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
return -EINVAL;
}
int
create_teimanager(struct mISDNdevice *dev)
{
struct manager *mgr;
mgr = kzalloc(sizeof(struct manager), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
INIT_LIST_HEAD(&mgr->layer2);
rwlock_init(&mgr->lock);
skb_queue_head_init(&mgr->sendq);
mgr->nextid = 1;
mgr->lastid = MISDN_ID_NONE;
mgr->ch.send = mgr_send;
mgr->ch.ctrl = mgr_ctrl;
mgr->ch.st = dev->D.st;
set_channel_address(&mgr->ch, TEI_SAPI, GROUP_TEI);
add_layer2(&mgr->ch, dev->D.st);
mgr->bcast.send = mgr_bcast;
mgr->bcast.ctrl = mgr_bcast_ctrl;
mgr->bcast.st = dev->D.st;
set_channel_address(&mgr->bcast, 0, GROUP_TEI);
add_layer2(&mgr->bcast, dev->D.st);
mgr->deact.debug = *debug & DEBUG_MANAGER;
mgr->deact.userdata = mgr;
mgr->deact.printdebug = da_debug;
mgr->deact.fsm = &deactfsm;
mgr->deact.state = ST_L1_DEACT;
mISDN_FsmInitTimer(&mgr->deact, &mgr->datimer);
dev->teimgr = &mgr->ch;
return 0;
}
int TEIInit(u_int *deb)
{
int res;
debug = deb;
teifsmu.state_count = TEI_STATE_COUNT;
teifsmu.event_count = TEI_EVENT_COUNT;
teifsmu.strEvent = strTeiEvent;
teifsmu.strState = strTeiState;
res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
if (res)
goto error;
teifsmn.state_count = TEI_STATE_COUNT;
teifsmn.event_count = TEI_EVENT_COUNT;
teifsmn.strEvent = strTeiEvent;
teifsmn.strState = strTeiState;
res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
if (res)
goto error_smn;
deactfsm.state_count = DEACT_STATE_COUNT;
deactfsm.event_count = DEACT_EVENT_COUNT;
deactfsm.strEvent = strDeactEvent;
deactfsm.strState = strDeactState;
res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
if (res)
goto error_deact;
return 0;
error_deact:
mISDN_FsmFree(&teifsmn);
error_smn:
mISDN_FsmFree(&teifsmu);
error:
return res;
}
void TEIFree(void)
{
mISDN_FsmFree(&teifsmu);
mISDN_FsmFree(&teifsmn);
mISDN_FsmFree(&deactfsm);
}
| linux-master | drivers/isdn/mISDN/tei.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* finite state machine implementation
*
* Author Karsten Keil <[email protected]>
*
* Thanks to Jan den Ouden
* Fritz Elfert
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include "fsm.h"
#define FSM_TIMER_DEBUG 0
int
mISDN_FsmNew(struct Fsm *fsm,
struct FsmNode *fnlist, int fncount)
{
int i;
fsm->jumpmatrix =
kzalloc(array3_size(sizeof(FSMFNPTR), fsm->state_count,
fsm->event_count),
GFP_KERNEL);
if (fsm->jumpmatrix == NULL)
return -ENOMEM;
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) ||
(fnlist[i].event >= fsm->event_count)) {
printk(KERN_ERR
"mISDN_FsmNew Error: %d st(%ld/%ld) ev(%ld/%ld)\n",
i, (long)fnlist[i].state, (long)fsm->state_count,
(long)fnlist[i].event, (long)fsm->event_count);
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
return 0;
}
EXPORT_SYMBOL(mISDN_FsmNew);
void
mISDN_FsmFree(struct Fsm *fsm)
{
kfree((void *) fsm->jumpmatrix);
}
EXPORT_SYMBOL(mISDN_FsmFree);
int
mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg)
{
FSMFNPTR r;
if ((fi->state >= fi->fsm->state_count) ||
(event >= fi->fsm->event_count)) {
printk(KERN_ERR
"mISDN_FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n",
(long)fi->state, (long)fi->fsm->state_count, event,
(long)fi->fsm->event_count);
return 1;
}
r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state];
if (r) {
if (fi->debug)
fi->printdebug(fi, "State %s Event %s",
fi->fsm->strState[fi->state],
fi->fsm->strEvent[event]);
r(fi, event, arg);
return 0;
} else {
if (fi->debug)
fi->printdebug(fi, "State %s Event %s no action",
fi->fsm->strState[fi->state],
fi->fsm->strEvent[event]);
return 1;
}
}
EXPORT_SYMBOL(mISDN_FsmEvent);
void
mISDN_FsmChangeState(struct FsmInst *fi, int newstate)
{
fi->state = newstate;
if (fi->debug)
fi->printdebug(fi, "ChangeState %s",
fi->fsm->strState[newstate]);
}
EXPORT_SYMBOL(mISDN_FsmChangeState);
static void
FsmExpireTimer(struct timer_list *t)
{
struct FsmTimer *ft = from_timer(ft, t, tl);
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
#endif
mISDN_FsmEvent(ft->fi, ft->event, ft->arg);
}
void
mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
{
ft->fi = fi;
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft);
#endif
timer_setup(&ft->tl, FsmExpireTimer, 0);
}
EXPORT_SYMBOL(mISDN_FsmInitTimer);
void
mISDN_FsmDelTimer(struct FsmTimer *ft, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d",
(long) ft, where);
#endif
del_timer(&ft->tl);
}
EXPORT_SYMBOL(mISDN_FsmDelTimer);
int
mISDN_FsmAddTimer(struct FsmTimer *ft,
int millisec, int event, void *arg, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmAddTimer %lx %d %d",
(long) ft, millisec, where);
#endif
if (timer_pending(&ft->tl)) {
if (ft->fi->debug) {
printk(KERN_WARNING
"mISDN_FsmAddTimer: timer already active!\n");
ft->fi->printdebug(ft->fi,
"mISDN_FsmAddTimer already active!");
}
return -1;
}
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&ft->tl);
return 0;
}
EXPORT_SYMBOL(mISDN_FsmAddTimer);
void
mISDN_FsmRestartTimer(struct FsmTimer *ft,
int millisec, int event, void *arg, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmRestartTimer %lx %d %d",
(long) ft, millisec, where);
#endif
if (timer_pending(&ft->tl))
del_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&ft->tl);
}
EXPORT_SYMBOL(mISDN_FsmRestartTimer);
| linux-master | drivers/isdn/mISDN/fsm.c |
/*
* Audio crossconnecting/conferrencing (hardware level).
*
* Copyright 2002 by Andreas Eversberg ([email protected])
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
/*
* The process of adding and removing parties to/from a conference:
*
* There is a chain of struct dsp_conf which has one or more members in a chain
* of struct dsp_conf_member.
*
* After a party is added, the conference is checked for hardware capability.
* Also if a party is removed, the conference is checked again.
*
* There are 3 different solutions: -1 = software, 0 = hardware-crossconnect
* 1-n = hardware-conference. The n will give the conference number.
*
* Depending on the change after removal or insertion of a party, hardware
* commands are given.
*
* The current solution is stored within the struct dsp_conf entry.
*/
/*
* HOW THE CMX WORKS:
*
* There are 3 types of interaction: One member is alone, in this case only
* data flow from upper to lower layer is done.
* Two members will also exchange their data so they are crossconnected.
* Three or more members will be added in a conference and will hear each
* other but will not receive their own speech (echo) if not enabled.
*
* Features of CMX are:
* - Crossconnecting or even conference, if more than two members are together.
* - Force mixing of transmit data with other crossconnect/conference members.
* - Echo generation to benchmark the delay of audio processing.
* - Use hardware to minimize cpu load, disable FIFO load and minimize delay.
* - Dejittering and clock generation.
*
* There are 2 buffers:
*
*
* RX-Buffer
* R W
* | |
* ----------------+-------------+-------------------
*
* The rx-buffer is a ring buffer used to store the received data for each
* individual member. This is only the case if data needs to be dejittered
* or in case of a conference where different clocks require reclocking.
* The transmit-clock (R) will read the buffer.
* If the clock overruns the write-pointer, we will have a buffer underrun.
* If the write pointer always has a certain distance from the transmit-
* clock, we will have a delay. The delay will dynamically be increased and
* reduced.
*
*
* TX-Buffer
* R W
* | |
* -----------------+--------+-----------------------
*
* The tx-buffer is a ring buffer to queue the transmit data from user space
* until it will be mixed or sent. There are two pointers, R and W. If the write
* pointer W would reach or overrun R, the buffer would overrun. In this case
* (some) data is dropped so that it will not overrun.
* Additionally a dynamic dejittering can be enabled. this allows data from
* user space that have jitter and different clock source.
*
*
* Clock:
*
* A Clock is not required, if the data source has exactly one clock. In this
* case the data source is forwarded to the destination.
*
* A Clock is required, because the data source
* - has multiple clocks.
* - has no usable clock due to jitter or packet loss (VoIP).
* In this case the system's clock is used. The clock resolution depends on
* the jiffie resolution.
*
* If a member joins a conference:
*
* - If a member joins, its rx_buff is set to silence and change read pointer
* to transmit clock.
*
* The procedure of received data from card is explained in cmx_receive.
* The procedure of received data from user space is explained in cmx_transmit.
* The procedure of transmit data to card is cmx_send.
*
*
* Interaction with other features:
*
* DTMF:
* DTMF decoding is done before the data is crossconnected.
*
* Volume change:
* Changing rx-volume is done before the data is crossconnected. The tx-volume
* must be changed whenever data is transmitted to the card by the cmx.
*
* Tones:
* If a tone is enabled, it will be processed whenever data is transmitted to
* the card. It will replace the tx-data from the user space.
* If tones are generated by hardware, this conference member is removed for
* this time.
*
* Disable rx-data:
* If cmx is realized in hardware, rx data will be disabled if requested by
* the upper layer. If dtmf decoding is done by software and enabled, rx data
* will not be disabled but blocked to the upper layer.
*
* HFC conference engine:
* If it is possible to realize all features using hardware, hardware will be
* used if not forbidden by control command. Disabling rx-data provides
* absolutely traffic free audio processing. (except for the quick 1-frame
* upload of a tone loop, only once for a new tone)
*
*/
/* delay.h is required for hw_lock.h */
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
/*
* debugging of multi party conference,
* by using conference even with two members
*/
/* #define CMX_CONF_DEBUG */
/*#define CMX_DEBUG * massive read/write pointer output */
/*#define CMX_DELAY_DEBUG * gives rx-buffer delay overview */
/*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */
/*
* debug cmx memory structure
*/
void
dsp_cmx_debug(struct dsp *dsp)
{
struct dsp_conf *conf;
struct dsp_conf_member *member;
struct dsp *odsp;
printk(KERN_DEBUG "-----Current DSP\n");
list_for_each_entry(odsp, &dsp_ilist, list) {
printk(KERN_DEBUG "* %s hardecho=%d softecho=%d txmix=%d",
odsp->name, odsp->echo.hardware, odsp->echo.software,
odsp->tx_mix);
if (odsp->conf)
printk(" (Conf %d)", odsp->conf->id);
if (dsp == odsp)
printk(" *this*");
printk("\n");
}
printk(KERN_DEBUG "-----Current Conf:\n");
list_for_each_entry(conf, &conf_ilist, list) {
printk(KERN_DEBUG "* Conf %d (%p)\n", conf->id, conf);
list_for_each_entry(member, &conf->mlist, list) {
printk(KERN_DEBUG
" - member = %s (slot_tx %d, bank_tx %d, "
"slot_rx %d, bank_rx %d hfc_conf %d "
"tx_data %d rx_is_off %d)%s\n",
member->dsp->name, member->dsp->pcm_slot_tx,
member->dsp->pcm_bank_tx, member->dsp->pcm_slot_rx,
member->dsp->pcm_bank_rx, member->dsp->hfc_conf,
member->dsp->tx_data, member->dsp->rx_is_off,
(member->dsp == dsp) ? " *this*" : "");
}
}
printk(KERN_DEBUG "-----end\n");
}
/*
* search conference
*/
static struct dsp_conf *
dsp_cmx_search_conf(u32 id)
{
struct dsp_conf *conf;
if (!id) {
printk(KERN_WARNING "%s: conference ID is 0.\n", __func__);
return NULL;
}
/* search conference */
list_for_each_entry(conf, &conf_ilist, list)
if (conf->id == id)
return conf;
return NULL;
}
/*
* add member to conference
*/
static int
dsp_cmx_add_conf_member(struct dsp *dsp, struct dsp_conf *conf)
{
struct dsp_conf_member *member;
if (!conf || !dsp) {
printk(KERN_WARNING "%s: conf or dsp is 0.\n", __func__);
return -EINVAL;
}
if (dsp->member) {
printk(KERN_WARNING "%s: dsp is already member in a conf.\n",
__func__);
return -EINVAL;
}
if (dsp->conf) {
printk(KERN_WARNING "%s: dsp is already in a conf.\n",
__func__);
return -EINVAL;
}
member = kzalloc(sizeof(struct dsp_conf_member), GFP_ATOMIC);
if (!member) {
printk(KERN_ERR "kzalloc struct dsp_conf_member failed\n");
return -ENOMEM;
}
member->dsp = dsp;
/* clear rx buffer */
memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
dsp->rx_init = 1; /* rx_W and rx_R will be adjusted on first frame */
dsp->rx_W = 0;
dsp->rx_R = 0;
list_add_tail(&member->list, &conf->mlist);
dsp->conf = conf;
dsp->member = member;
return 0;
}
/*
* del member from conference
*/
int
dsp_cmx_del_conf_member(struct dsp *dsp)
{
struct dsp_conf_member *member;
if (!dsp) {
printk(KERN_WARNING "%s: dsp is 0.\n",
__func__);
return -EINVAL;
}
if (!dsp->conf) {
printk(KERN_WARNING "%s: dsp is not in a conf.\n",
__func__);
return -EINVAL;
}
if (list_empty(&dsp->conf->mlist)) {
printk(KERN_WARNING "%s: dsp has linked an empty conf.\n",
__func__);
return -EINVAL;
}
/* find us in conf */
list_for_each_entry(member, &dsp->conf->mlist, list) {
if (member->dsp == dsp) {
list_del(&member->list);
dsp->conf = NULL;
dsp->member = NULL;
kfree(member);
return 0;
}
}
printk(KERN_WARNING
"%s: dsp is not present in its own conf_member list.\n",
__func__);
return -EINVAL;
}
/*
* new conference
*/
static struct dsp_conf
*dsp_cmx_new_conf(u32 id)
{
struct dsp_conf *conf;
if (!id) {
printk(KERN_WARNING "%s: id is 0.\n",
__func__);
return NULL;
}
conf = kzalloc(sizeof(struct dsp_conf), GFP_ATOMIC);
if (!conf) {
printk(KERN_ERR "kzalloc struct dsp_conf failed\n");
return NULL;
}
INIT_LIST_HEAD(&conf->mlist);
conf->id = id;
list_add_tail(&conf->list, &conf_ilist);
return conf;
}
/*
* del conference
*/
int
dsp_cmx_del_conf(struct dsp_conf *conf)
{
if (!conf) {
printk(KERN_WARNING "%s: conf is null.\n",
__func__);
return -EINVAL;
}
if (!list_empty(&conf->mlist)) {
printk(KERN_WARNING "%s: conf not empty.\n",
__func__);
return -EINVAL;
}
list_del(&conf->list);
kfree(conf);
return 0;
}
/*
* send HW message to hfc card
*/
static void
dsp_cmx_hw_message(struct dsp *dsp, u32 message, u32 param1, u32 param2,
u32 param3, u32 param4)
{
struct mISDN_ctrl_req cq;
memset(&cq, 0, sizeof(cq));
cq.op = message;
cq.p1 = param1 | (param2 << 8);
cq.p2 = param3 | (param4 << 8);
if (dsp->ch.peer)
dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq);
}
/*
* do hardware update and set the software/hardware flag
*
* either a conference or a dsp instance can be given
* if only dsp instance is given, the instance is not associated with a conf
* and therefore removed. if a conference is given, the dsp is expected to
* be member of that conference.
*/
void
dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
{
struct dsp_conf_member *member, *nextm;
struct dsp *finddsp;
int memb = 0, i, ii, i1, i2;
int freeunits[8];
u_char freeslots[256];
int same_hfc = -1, same_pcm = -1, current_conf = -1,
all_conf = 1, tx_data = 0;
/* dsp gets updated (no conf) */
if (!conf) {
if (!dsp)
return;
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "%s checking dsp %s\n",
__func__, dsp->name);
one_member:
/* remove HFC conference if enabled */
if (dsp->hfc_conf >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s removing %s from HFC conf %d "
"because dsp is split\n", __func__,
dsp->name, dsp->hfc_conf);
dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_CONF_SPLIT,
0, 0, 0, 0);
dsp->hfc_conf = -1;
}
/* process hw echo */
if (dsp->features.pcm_banks < 1)
return;
if (!dsp->echo.software && !dsp->echo.hardware) {
/* NO ECHO: remove PCM slot if assigned */
if (dsp->pcm_slot_tx >= 0 || dsp->pcm_slot_rx >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "%s removing %s from"
" PCM slot %d (TX) %d (RX) because"
" dsp is split (no echo)\n",
__func__, dsp->name,
dsp->pcm_slot_tx, dsp->pcm_slot_rx);
dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_DISC,
0, 0, 0, 0);
dsp->pcm_slot_tx = -1;
dsp->pcm_bank_tx = -1;
dsp->pcm_slot_rx = -1;
dsp->pcm_bank_rx = -1;
}
return;
}
/* echo is enabled, find out if we use soft or hardware */
dsp->echo.software = dsp->tx_data;
dsp->echo.hardware = 0;
/* ECHO: already echo */
if (dsp->pcm_slot_tx >= 0 && dsp->pcm_slot_rx < 0 &&
dsp->pcm_bank_tx == 2 && dsp->pcm_bank_rx == 2) {
dsp->echo.hardware = 1;
return;
}
/* ECHO: if slot already assigned */
if (dsp->pcm_slot_tx >= 0) {
dsp->pcm_slot_rx = dsp->pcm_slot_tx;
dsp->pcm_bank_tx = 2; /* 2 means loop */
dsp->pcm_bank_rx = 2;
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s refresh %s for echo using slot %d\n",
__func__, dsp->name,
dsp->pcm_slot_tx);
dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
dsp->echo.hardware = 1;
return;
}
/* ECHO: find slot */
dsp->pcm_slot_tx = -1;
dsp->pcm_slot_rx = -1;
memset(freeslots, 1, sizeof(freeslots));
list_for_each_entry(finddsp, &dsp_ilist, list) {
if (finddsp->features.pcm_id == dsp->features.pcm_id) {
if (finddsp->pcm_slot_rx >= 0 &&
finddsp->pcm_slot_rx < sizeof(freeslots))
freeslots[finddsp->pcm_slot_rx] = 0;
if (finddsp->pcm_slot_tx >= 0 &&
finddsp->pcm_slot_tx < sizeof(freeslots))
freeslots[finddsp->pcm_slot_tx] = 0;
}
}
i = 0;
ii = dsp->features.pcm_slots;
while (i < ii) {
if (freeslots[i])
break;
i++;
}
if (i == ii) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s no slot available for echo\n",
__func__);
/* no more slots available */
dsp->echo.software = 1;
return;
}
/* assign free slot */
dsp->pcm_slot_tx = i;
dsp->pcm_slot_rx = i;
dsp->pcm_bank_tx = 2; /* loop */
dsp->pcm_bank_rx = 2;
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s assign echo for %s using slot %d\n",
__func__, dsp->name, dsp->pcm_slot_tx);
dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
dsp->echo.hardware = 1;
return;
}
/* conf gets updated (all members) */
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "%s checking conference %d\n",
__func__, conf->id);
if (list_empty(&conf->mlist)) {
printk(KERN_ERR "%s: conference without members\n",
__func__);
return;
}
member = list_entry(conf->mlist.next, struct dsp_conf_member, list);
same_hfc = member->dsp->features.hfc_id;
same_pcm = member->dsp->features.pcm_id;
/* check all members in our conference */
list_for_each_entry(member, &conf->mlist, list) {
/* check if member uses mixing */
if (member->dsp->tx_mix) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"tx_mix is turned on\n", __func__,
member->dsp->name);
conf_software:
list_for_each_entry(member, &conf->mlist, list) {
dsp = member->dsp;
/* remove HFC conference if enabled */
if (dsp->hfc_conf >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s removing %s from HFC "
"conf %d because not "
"possible with hardware\n",
__func__,
dsp->name,
dsp->hfc_conf);
dsp_cmx_hw_message(dsp,
MISDN_CTRL_HFC_CONF_SPLIT,
0, 0, 0, 0);
dsp->hfc_conf = -1;
}
/* remove PCM slot if assigned */
if (dsp->pcm_slot_tx >= 0 ||
dsp->pcm_slot_rx >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "%s removing "
"%s from PCM slot %d (TX)"
" slot %d (RX) because not"
" possible with hardware\n",
__func__,
dsp->name,
dsp->pcm_slot_tx,
dsp->pcm_slot_rx);
dsp_cmx_hw_message(dsp,
MISDN_CTRL_HFC_PCM_DISC,
0, 0, 0, 0);
dsp->pcm_slot_tx = -1;
dsp->pcm_bank_tx = -1;
dsp->pcm_slot_rx = -1;
dsp->pcm_bank_rx = -1;
}
}
conf->hardware = 0;
conf->software = 1;
return;
}
/* check if member has echo turned on */
if (member->dsp->echo.hardware || member->dsp->echo.software) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"echo is turned on\n", __func__,
member->dsp->name);
goto conf_software;
}
/* check if member has tx_mix turned on */
if (member->dsp->tx_mix) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"tx_mix is turned on\n",
__func__, member->dsp->name);
goto conf_software;
}
/* check if member changes volume at an not suppoted level */
if (member->dsp->tx_volume) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"tx_volume is changed\n",
__func__, member->dsp->name);
goto conf_software;
}
if (member->dsp->rx_volume) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"rx_volume is changed\n",
__func__, member->dsp->name);
goto conf_software;
}
/* check if tx-data turned on */
if (member->dsp->tx_data) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s tx_data is turned on\n",
__func__, member->dsp->name);
tx_data = 1;
}
/* check if pipeline exists */
if (member->dsp->pipeline.inuse) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"pipeline exists\n", __func__,
member->dsp->name);
goto conf_software;
}
/* check if encryption is enabled */
if (member->dsp->bf_enable) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "%s dsp %s cannot form a "
"conf, because encryption is enabled\n",
__func__, member->dsp->name);
goto conf_software;
}
/* check if member is on a card with PCM support */
if (member->dsp->features.pcm_id < 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"dsp has no PCM bus\n",
__func__, member->dsp->name);
goto conf_software;
}
/* check if relations are on the same PCM bus */
if (member->dsp->features.pcm_id != same_pcm) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s cannot form a conf, because "
"dsp is on a different PCM bus than the "
"first dsp\n",
__func__, member->dsp->name);
goto conf_software;
}
/* determine if members are on the same hfc chip */
if (same_hfc != member->dsp->features.hfc_id)
same_hfc = -1;
/* if there are members already in a conference */
if (current_conf < 0 && member->dsp->hfc_conf >= 0)
current_conf = member->dsp->hfc_conf;
/* if any member is not in a conference */
if (member->dsp->hfc_conf < 0)
all_conf = 0;
memb++;
}
/* if no member, this is an error */
if (memb < 1)
return;
/* one member */
if (memb == 1) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s conf %d cannot form a HW conference, "
"because dsp is alone\n", __func__, conf->id);
conf->hardware = 0;
conf->software = 0;
member = list_entry(conf->mlist.next, struct dsp_conf_member,
list);
dsp = member->dsp;
goto one_member;
}
/*
* ok, now we are sure that all members are on the same pcm.
* now we will see if we have only two members, so we can do
* crossconnections, which don't have any limitations.
*/
/* if we have only two members */
if (memb == 2) {
member = list_entry(conf->mlist.next, struct dsp_conf_member,
list);
nextm = list_entry(member->list.next, struct dsp_conf_member,
list);
/* remove HFC conference if enabled */
if (member->dsp->hfc_conf >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s removing %s from HFC conf %d because "
"two parties require only a PCM slot\n",
__func__, member->dsp->name,
member->dsp->hfc_conf);
dsp_cmx_hw_message(member->dsp,
MISDN_CTRL_HFC_CONF_SPLIT, 0, 0, 0, 0);
member->dsp->hfc_conf = -1;
}
if (nextm->dsp->hfc_conf >= 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s removing %s from HFC conf %d because "
"two parties require only a PCM slot\n",
__func__, nextm->dsp->name,
nextm->dsp->hfc_conf);
dsp_cmx_hw_message(nextm->dsp,
MISDN_CTRL_HFC_CONF_SPLIT, 0, 0, 0, 0);
nextm->dsp->hfc_conf = -1;
}
/* if members have two banks (and not on the same chip) */
if (member->dsp->features.pcm_banks > 1 &&
nextm->dsp->features.pcm_banks > 1 &&
member->dsp->features.hfc_id !=
nextm->dsp->features.hfc_id) {
/* if both members have same slots with crossed banks */
if (member->dsp->pcm_slot_tx >= 0 &&
member->dsp->pcm_slot_rx >= 0 &&
nextm->dsp->pcm_slot_tx >= 0 &&
nextm->dsp->pcm_slot_rx >= 0 &&
nextm->dsp->pcm_slot_tx ==
member->dsp->pcm_slot_rx &&
nextm->dsp->pcm_slot_rx ==
member->dsp->pcm_slot_tx &&
nextm->dsp->pcm_slot_tx ==
member->dsp->pcm_slot_tx &&
member->dsp->pcm_bank_tx !=
member->dsp->pcm_bank_rx &&
nextm->dsp->pcm_bank_tx !=
nextm->dsp->pcm_bank_rx) {
/* all members have same slot */
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s & %s stay joined on "
"PCM slot %d bank %d (TX) bank %d "
"(RX) (on different chips)\n",
__func__,
member->dsp->name,
nextm->dsp->name,
member->dsp->pcm_slot_tx,
member->dsp->pcm_bank_tx,
member->dsp->pcm_bank_rx);
conf->hardware = 1;
conf->software = tx_data;
return;
}
/* find a new slot */
memset(freeslots, 1, sizeof(freeslots));
list_for_each_entry(dsp, &dsp_ilist, list) {
if (dsp != member->dsp &&
dsp != nextm->dsp &&
member->dsp->features.pcm_id ==
dsp->features.pcm_id) {
if (dsp->pcm_slot_rx >= 0 &&
dsp->pcm_slot_rx <
sizeof(freeslots))
freeslots[dsp->pcm_slot_rx] = 0;
if (dsp->pcm_slot_tx >= 0 &&
dsp->pcm_slot_tx <
sizeof(freeslots))
freeslots[dsp->pcm_slot_tx] = 0;
}
}
i = 0;
ii = member->dsp->features.pcm_slots;
while (i < ii) {
if (freeslots[i])
break;
i++;
}
if (i == ii) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s no slot available for "
"%s & %s\n", __func__,
member->dsp->name,
nextm->dsp->name);
/* no more slots available */
goto conf_software;
}
/* assign free slot */
member->dsp->pcm_slot_tx = i;
member->dsp->pcm_slot_rx = i;
nextm->dsp->pcm_slot_tx = i;
nextm->dsp->pcm_slot_rx = i;
member->dsp->pcm_bank_rx = 0;
member->dsp->pcm_bank_tx = 1;
nextm->dsp->pcm_bank_rx = 1;
nextm->dsp->pcm_bank_tx = 0;
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s adding %s & %s to new PCM slot %d "
"(TX and RX on different chips) because "
"both members have not same slots\n",
__func__,
member->dsp->name,
nextm->dsp->name,
member->dsp->pcm_slot_tx);
dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx,
member->dsp->pcm_slot_rx, member->dsp->pcm_bank_rx);
dsp_cmx_hw_message(nextm->dsp, MISDN_CTRL_HFC_PCM_CONN,
nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
conf->hardware = 1;
conf->software = tx_data;
return;
/* if members have one bank (or on the same chip) */
} else {
/* if both members have different crossed slots */
if (member->dsp->pcm_slot_tx >= 0 &&
member->dsp->pcm_slot_rx >= 0 &&
nextm->dsp->pcm_slot_tx >= 0 &&
nextm->dsp->pcm_slot_rx >= 0 &&
nextm->dsp->pcm_slot_tx ==
member->dsp->pcm_slot_rx &&
nextm->dsp->pcm_slot_rx ==
member->dsp->pcm_slot_tx &&
member->dsp->pcm_slot_tx !=
member->dsp->pcm_slot_rx &&
member->dsp->pcm_bank_tx == 0 &&
member->dsp->pcm_bank_rx == 0 &&
nextm->dsp->pcm_bank_tx == 0 &&
nextm->dsp->pcm_bank_rx == 0) {
/* all members have same slot */
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s dsp %s & %s stay joined on PCM "
"slot %d (TX) %d (RX) on same chip "
"or one bank PCM)\n", __func__,
member->dsp->name,
nextm->dsp->name,
member->dsp->pcm_slot_tx,
member->dsp->pcm_slot_rx);
conf->hardware = 1;
conf->software = tx_data;
return;
}
/* find two new slot */
memset(freeslots, 1, sizeof(freeslots));
list_for_each_entry(dsp, &dsp_ilist, list) {
if (dsp != member->dsp &&
dsp != nextm->dsp &&
member->dsp->features.pcm_id ==
dsp->features.pcm_id) {
if (dsp->pcm_slot_rx >= 0 &&
dsp->pcm_slot_rx <
sizeof(freeslots))
freeslots[dsp->pcm_slot_rx] = 0;
if (dsp->pcm_slot_tx >= 0 &&
dsp->pcm_slot_tx <
sizeof(freeslots))
freeslots[dsp->pcm_slot_tx] = 0;
}
}
i1 = 0;
ii = member->dsp->features.pcm_slots;
while (i1 < ii) {
if (freeslots[i1])
break;
i1++;
}
if (i1 == ii) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s no slot available "
"for %s & %s\n", __func__,
member->dsp->name,
nextm->dsp->name);
/* no more slots available */
goto conf_software;
}
i2 = i1 + 1;
while (i2 < ii) {
if (freeslots[i2])
break;
i2++;
}
if (i2 == ii) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s no slot available "
"for %s & %s\n",
__func__,
member->dsp->name,
nextm->dsp->name);
/* no more slots available */
goto conf_software;
}
/* assign free slots */
member->dsp->pcm_slot_tx = i1;
member->dsp->pcm_slot_rx = i2;
nextm->dsp->pcm_slot_tx = i2;
nextm->dsp->pcm_slot_rx = i1;
member->dsp->pcm_bank_rx = 0;
member->dsp->pcm_bank_tx = 0;
nextm->dsp->pcm_bank_rx = 0;
nextm->dsp->pcm_bank_tx = 0;
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s adding %s & %s to new PCM slot %d "
"(TX) %d (RX) on same chip or one bank "
"PCM, because both members have not "
"crossed slots\n", __func__,
member->dsp->name,
nextm->dsp->name,
member->dsp->pcm_slot_tx,
member->dsp->pcm_slot_rx);
dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx,
member->dsp->pcm_slot_rx, member->dsp->pcm_bank_rx);
dsp_cmx_hw_message(nextm->dsp, MISDN_CTRL_HFC_PCM_CONN,
nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
conf->hardware = 1;
conf->software = tx_data;
return;
}
}
/*
* if we have more than two, we may check if we have a conference
* unit available on the chip. also all members must be on the same
*/
/* if not the same HFC chip */
if (same_hfc < 0) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s conference %d cannot be formed, because "
"members are on different chips or not "
"on HFC chip\n",
__func__, conf->id);
goto conf_software;
}
/* for more than two members.. */
/* if all members already have the same conference */
if (all_conf) {
conf->hardware = 1;
conf->software = tx_data;
return;
}
/*
* if there is an existing conference, but not all members have joined
*/
if (current_conf >= 0) {
join_members:
list_for_each_entry(member, &conf->mlist, list) {
/* if no conference engine on our chip, change to
* software */
if (!member->dsp->features.hfc_conf)
goto conf_software;
/* in case of hdlc, change to software */
if (member->dsp->hdlc)
goto conf_software;
/* join to current conference */
if (member->dsp->hfc_conf == current_conf)
continue;
/* get a free timeslot first */
memset(freeslots, 1, sizeof(freeslots));
list_for_each_entry(dsp, &dsp_ilist, list) {
/*
* not checking current member, because
* slot will be overwritten.
*/
if (
dsp != member->dsp &&
/* dsp must be on the same PCM */
member->dsp->features.pcm_id ==
dsp->features.pcm_id) {
/* dsp must be on a slot */
if (dsp->pcm_slot_tx >= 0 &&
dsp->pcm_slot_tx <
sizeof(freeslots))
freeslots[dsp->pcm_slot_tx] = 0;
if (dsp->pcm_slot_rx >= 0 &&
dsp->pcm_slot_rx <
sizeof(freeslots))
freeslots[dsp->pcm_slot_rx] = 0;
}
}
i = 0;
ii = member->dsp->features.pcm_slots;
while (i < ii) {
if (freeslots[i])
break;
i++;
}
if (i == ii) {
/* no more slots available */
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s conference %d cannot be formed,"
" because no slot free\n",
__func__, conf->id);
goto conf_software;
}
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s changing dsp %s to HW conference "
"%d slot %d\n", __func__,
member->dsp->name, current_conf, i);
/* assign free slot & set PCM & join conf */
member->dsp->pcm_slot_tx = i;
member->dsp->pcm_slot_rx = i;
member->dsp->pcm_bank_tx = 2; /* loop */
member->dsp->pcm_bank_rx = 2;
member->dsp->hfc_conf = current_conf;
dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
i, 2, i, 2);
dsp_cmx_hw_message(member->dsp,
MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0);
}
conf->hardware = 1;
conf->software = tx_data;
return;
}
/*
* no member is in a conference yet, so we find a free one
*/
memset(freeunits, 1, sizeof(freeunits));
list_for_each_entry(dsp, &dsp_ilist, list) {
/* dsp must be on the same chip */
if (dsp->features.hfc_id == same_hfc &&
/* dsp must have joined a HW conference */
dsp->hfc_conf >= 0 &&
/* slot must be within range */
dsp->hfc_conf < 8)
freeunits[dsp->hfc_conf] = 0;
}
i = 0;
ii = 8;
while (i < ii) {
if (freeunits[i])
break;
i++;
}
if (i == ii) {
/* no more conferences available */
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"%s conference %d cannot be formed, because "
"no conference number free\n",
__func__, conf->id);
goto conf_software;
}
/* join all members */
current_conf = i;
goto join_members;
}
/*
* conf_id != 0: join or change conference
* conf_id == 0: split from conference if not already
*/
int
dsp_cmx_conf(struct dsp *dsp, u32 conf_id)
{
int err;
struct dsp_conf *conf;
struct dsp_conf_member *member;
/* if conference doesn't change */
if (dsp->conf_id == conf_id)
return 0;
/* first remove us from current conf */
if (dsp->conf_id) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "removing us from conference %d\n",
dsp->conf->id);
/* remove us from conf */
conf = dsp->conf;
err = dsp_cmx_del_conf_member(dsp);
if (err)
return err;
dsp->conf_id = 0;
/* update hardware */
dsp_cmx_hardware(NULL, dsp);
/* conf now empty? */
if (list_empty(&conf->mlist)) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"conference is empty, so we remove it.\n");
err = dsp_cmx_del_conf(conf);
if (err)
return err;
} else {
/* update members left on conf */
dsp_cmx_hardware(conf, NULL);
}
}
/* if split */
if (!conf_id)
return 0;
/* now add us to conf */
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG "searching conference %d\n",
conf_id);
conf = dsp_cmx_search_conf(conf_id);
if (!conf) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"conference doesn't exist yet, creating.\n");
/* the conference doesn't exist, so we create */
conf = dsp_cmx_new_conf(conf_id);
if (!conf)
return -EINVAL;
} else if (!list_empty(&conf->mlist)) {
member = list_entry(conf->mlist.next, struct dsp_conf_member,
list);
if (dsp->hdlc && !member->dsp->hdlc) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"cannot join transparent conference.\n");
return -EINVAL;
}
if (!dsp->hdlc && member->dsp->hdlc) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"cannot join hdlc conference.\n");
return -EINVAL;
}
}
/* add conference member */
err = dsp_cmx_add_conf_member(dsp, conf);
if (err)
return err;
dsp->conf_id = conf_id;
/* if we are alone, we do nothing! */
if (list_empty(&conf->mlist)) {
if (dsp_debug & DEBUG_DSP_CMX)
printk(KERN_DEBUG
"we are alone in this conference, so exit.\n");
/* update hardware */
dsp_cmx_hardware(NULL, dsp);
return 0;
}
/* update members on conf */
dsp_cmx_hardware(conf, NULL);
return 0;
}
#ifdef CMX_DELAY_DEBUG
int delaycount;
static void
showdelay(struct dsp *dsp, int samples, int delay)
{
char bar[] = "--------------------------------------------------|";
int sdelay;
delaycount += samples;
if (delaycount < 8000)
return;
delaycount = 0;
sdelay = delay * 50 / (dsp_poll << 2);
printk(KERN_DEBUG "DELAY (%s) %3d >%s\n", dsp->name, delay,
sdelay > 50 ? "..." : bar + 50 - sdelay);
}
#endif
/*
* audio data is received from card
*/
void
dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
{
u8 *d, *p;
int len = skb->len;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int w, i, ii;
/* check if we have sompen */
if (len < 1)
return;
/* half of the buffer should be larger than maximum packet size */
if (len >= CMX_BUFF_HALF) {
printk(KERN_ERR
"%s line %d: packet from card is too large (%d bytes). "
"please make card send smaller packets OR increase "
"CMX_BUFF_SIZE\n", __FILE__, __LINE__, len);
return;
}
/*
* initialize pointers if not already -
* also add delay if requested by PH_SIGNAL
*/
if (dsp->rx_init) {
dsp->rx_init = 0;
if (dsp->features.unordered) {
dsp->rx_R = (hh->id & CMX_BUFF_MASK);
if (dsp->cmx_delay)
dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
& CMX_BUFF_MASK;
else
dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
& CMX_BUFF_MASK;
} else {
dsp->rx_R = 0;
if (dsp->cmx_delay)
dsp->rx_W = dsp->cmx_delay;
else
dsp->rx_W = dsp_poll >> 1;
}
}
/* if frame contains time code, write directly */
if (dsp->features.unordered) {
dsp->rx_W = (hh->id & CMX_BUFF_MASK);
/* printk(KERN_DEBUG "%s %08x\n", dsp->name, hh->id); */
}
/*
* if we underrun (or maybe overrun),
* we set our new read pointer, and write silence to buffer
*/
if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) {
if (dsp_debug & DEBUG_DSP_CLOCK)
printk(KERN_DEBUG
"cmx_receive(dsp=%lx): UNDERRUN (or overrun the "
"maximum delay), adjusting read pointer! "
"(inst %s)\n", (u_long)dsp, dsp->name);
/* flush rx buffer and set delay to dsp_poll / 2 */
if (dsp->features.unordered) {
dsp->rx_R = (hh->id & CMX_BUFF_MASK);
if (dsp->cmx_delay)
dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
& CMX_BUFF_MASK;
else
dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
& CMX_BUFF_MASK;
} else {
dsp->rx_R = 0;
if (dsp->cmx_delay)
dsp->rx_W = dsp->cmx_delay;
else
dsp->rx_W = dsp_poll >> 1;
}
memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
}
/* if we have reached double delay, jump back to middle */
if (dsp->cmx_delay)
if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >=
(dsp->cmx_delay << 1)) {
if (dsp_debug & DEBUG_DSP_CLOCK)
printk(KERN_DEBUG
"cmx_receive(dsp=%lx): OVERRUN (because "
"twice the delay is reached), adjusting "
"read pointer! (inst %s)\n",
(u_long)dsp, dsp->name);
/* flush buffer */
if (dsp->features.unordered) {
dsp->rx_R = (hh->id & CMX_BUFF_MASK);
dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
& CMX_BUFF_MASK;
} else {
dsp->rx_R = 0;
dsp->rx_W = dsp->cmx_delay;
}
memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
}
/* show where to write */
#ifdef CMX_DEBUG
printk(KERN_DEBUG
"cmx_receive(dsp=%lx): rx_R(dsp)=%05x rx_W(dsp)=%05x len=%d %s\n",
(u_long)dsp, dsp->rx_R, dsp->rx_W, len, dsp->name);
#endif
/* write data into rx_buffer */
p = skb->data;
d = dsp->rx_buff;
w = dsp->rx_W;
i = 0;
ii = len;
while (i < ii) {
d[w++ & CMX_BUFF_MASK] = *p++;
i++;
}
/* increase write-pointer */
dsp->rx_W = ((dsp->rx_W + len) & CMX_BUFF_MASK);
#ifdef CMX_DELAY_DEBUG
showdelay(dsp, len, (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK);
#endif
}
/*
* send (mixed) audio data to card and control jitter
*/
static void
dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
{
struct dsp_conf *conf = dsp->conf;
struct dsp *member, *other;
register s32 sample;
u8 *d, *p, *q, *o_q;
struct sk_buff *nskb, *txskb;
int r, rr, t, tt, o_r, o_rr;
int preload = 0;
struct mISDNhead *hh, *thh;
int tx_data_only = 0;
/* don't process if: */
if (!dsp->b_active) { /* if not active */
dsp->last_tx = 0;
return;
}
if (((dsp->conf && dsp->conf->hardware) || /* hardware conf */
dsp->echo.hardware) && /* OR hardware echo */
dsp->tx_R == dsp->tx_W && /* AND no tx-data */
!(dsp->tone.tone && dsp->tone.software)) { /* AND not soft tones */
if (!dsp->tx_data) { /* no tx_data for user space required */
dsp->last_tx = 0;
return;
}
if (dsp->conf && dsp->conf->software && dsp->conf->hardware)
tx_data_only = 1;
if (dsp->echo.software && dsp->echo.hardware)
tx_data_only = 1;
}
#ifdef CMX_DEBUG
printk(KERN_DEBUG
"SEND members=%d dsp=%s, conf=%p, rx_R=%05x rx_W=%05x\n",
members, dsp->name, conf, dsp->rx_R, dsp->rx_W);
#endif
/* preload if we have delay set */
if (dsp->cmx_delay && !dsp->last_tx) {
preload = len;
if (preload < 128)
preload = 128;
}
/* PREPARE RESULT */
nskb = mI_alloc_skb(len + preload, GFP_ATOMIC);
if (!nskb) {
printk(KERN_ERR
"FATAL ERROR in mISDN_dsp.o: cannot alloc %d bytes\n",
len + preload);
return;
}
hh = mISDN_HEAD_P(nskb);
hh->prim = PH_DATA_REQ;
hh->id = 0;
dsp->last_tx = 1;
/* set pointers, indexes and stuff */
member = dsp;
p = dsp->tx_buff; /* transmit data */
q = dsp->rx_buff; /* received data */
d = skb_put(nskb, preload + len); /* result */
t = dsp->tx_R; /* tx-pointers */
tt = dsp->tx_W;
r = dsp->rx_R; /* rx-pointers */
rr = (r + len) & CMX_BUFF_MASK;
/* preload with silence, if required */
if (preload) {
memset(d, dsp_silence, preload);
d += preload;
}
/* PROCESS TONES/TX-DATA ONLY */
if (dsp->tone.tone && dsp->tone.software) {
/* -> copy tone */
dsp_tone_copy(dsp, d, len);
dsp->tx_R = 0; /* clear tx buffer */
dsp->tx_W = 0;
goto send_packet;
}
/* if we have tx-data but do not use mixing */
if (!dsp->tx_mix && t != tt) {
/* -> send tx-data and continue when not enough */
#ifdef CMX_TX_DEBUG
sprintf(debugbuf, "TX sending (%04x-%04x)%p: ", t, tt, p);
#endif
while (r != rr && t != tt) {
#ifdef CMX_TX_DEBUG
if (strlen(debugbuf) < 48)
sprintf(debugbuf + strlen(debugbuf), " %02x",
p[t]);
#endif
*d++ = p[t]; /* write tx_buff */
t = (t + 1) & CMX_BUFF_MASK;
r = (r + 1) & CMX_BUFF_MASK;
}
if (r == rr) {
dsp->tx_R = t;
#ifdef CMX_TX_DEBUG
printk(KERN_DEBUG "%s\n", debugbuf);
#endif
goto send_packet;
}
}
#ifdef CMX_TX_DEBUG
printk(KERN_DEBUG "%s\n", debugbuf);
#endif
/* PROCESS DATA (one member / no conf) */
if (!conf || members <= 1) {
/* -> if echo is NOT enabled */
if (!dsp->echo.software) {
/* -> send tx-data if available or use 0-volume */
while (r != rr && t != tt) {
*d++ = p[t]; /* write tx_buff */
t = (t + 1) & CMX_BUFF_MASK;
r = (r + 1) & CMX_BUFF_MASK;
}
if (r != rr) {
if (dsp_debug & DEBUG_DSP_CLOCK)
printk(KERN_DEBUG "%s: RX empty\n",
__func__);
memset(d, dsp_silence, (rr - r) & CMX_BUFF_MASK);
}
/* -> if echo is enabled */
} else {
/*
* -> mix tx-data with echo if available,
* or use echo only
*/
while (r != rr && t != tt) {
*d++ = dsp_audio_mix_law[(p[t] << 8) | q[r]];
t = (t + 1) & CMX_BUFF_MASK;
r = (r + 1) & CMX_BUFF_MASK;
}
while (r != rr) {
*d++ = q[r]; /* echo */
r = (r + 1) & CMX_BUFF_MASK;
}
}
dsp->tx_R = t;
goto send_packet;
}
/* PROCESS DATA (two members) */
#ifdef CMX_CONF_DEBUG
if (0) {
#else
if (members == 2) {
#endif
/* "other" becomes other party */
other = (list_entry(conf->mlist.next,
struct dsp_conf_member, list))->dsp;
if (other == member)
other = (list_entry(conf->mlist.prev,
struct dsp_conf_member, list))->dsp;
o_q = other->rx_buff; /* received data */
o_rr = (other->rx_R + len) & CMX_BUFF_MASK;
/* end of rx-pointer */
o_r = (o_rr - rr + r) & CMX_BUFF_MASK;
/* start rx-pointer at current read position*/
/* -> if echo is NOT enabled */
if (!dsp->echo.software) {
/*
* -> copy other member's rx-data,
* if tx-data is available, mix
*/
while (o_r != o_rr && t != tt) {
*d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]];
t = (t + 1) & CMX_BUFF_MASK;
o_r = (o_r + 1) & CMX_BUFF_MASK;
}
while (o_r != o_rr) {
*d++ = o_q[o_r];
o_r = (o_r + 1) & CMX_BUFF_MASK;
}
/* -> if echo is enabled */
} else {
/*
* -> mix other member's rx-data with echo,
* if tx-data is available, mix
*/
while (r != rr && t != tt) {
sample = dsp_audio_law_to_s32[p[t]] +
dsp_audio_law_to_s32[q[r]] +
dsp_audio_law_to_s32[o_q[o_r]];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
*d++ = dsp_audio_s16_to_law[sample & 0xffff];
/* tx-data + rx_data + echo */
t = (t + 1) & CMX_BUFF_MASK;
r = (r + 1) & CMX_BUFF_MASK;
o_r = (o_r + 1) & CMX_BUFF_MASK;
}
while (r != rr) {
*d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]];
r = (r + 1) & CMX_BUFF_MASK;
o_r = (o_r + 1) & CMX_BUFF_MASK;
}
}
dsp->tx_R = t;
goto send_packet;
}
/* PROCESS DATA (three or more members) */
/* -> if echo is NOT enabled */
if (!dsp->echo.software) {
/*
* -> subtract rx-data from conf-data,
* if tx-data is available, mix
*/
while (r != rr && t != tt) {
sample = dsp_audio_law_to_s32[p[t]] + *c++ -
dsp_audio_law_to_s32[q[r]];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
*d++ = dsp_audio_s16_to_law[sample & 0xffff];
/* conf-rx+tx */
r = (r + 1) & CMX_BUFF_MASK;
t = (t + 1) & CMX_BUFF_MASK;
}
while (r != rr) {
sample = *c++ - dsp_audio_law_to_s32[q[r]];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
*d++ = dsp_audio_s16_to_law[sample & 0xffff];
/* conf-rx */
r = (r + 1) & CMX_BUFF_MASK;
}
/* -> if echo is enabled */
} else {
/*
* -> encode conf-data, if tx-data
* is available, mix
*/
while (r != rr && t != tt) {
sample = dsp_audio_law_to_s32[p[t]] + *c++;
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
*d++ = dsp_audio_s16_to_law[sample & 0xffff];
/* conf(echo)+tx */
t = (t + 1) & CMX_BUFF_MASK;
r = (r + 1) & CMX_BUFF_MASK;
}
while (r != rr) {
sample = *c++;
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
*d++ = dsp_audio_s16_to_law[sample & 0xffff];
/* conf(echo) */
r = (r + 1) & CMX_BUFF_MASK;
}
}
dsp->tx_R = t;
goto send_packet;
send_packet:
/*
* send tx-data if enabled - don't filter,
* because we want what we send, not what we filtered
*/
if (dsp->tx_data) {
if (tx_data_only) {
hh->prim = DL_DATA_REQ;
hh->id = 0;
/* queue and trigger */
skb_queue_tail(&dsp->sendq, nskb);
schedule_work(&dsp->workq);
/* exit because only tx_data is used */
return;
} else {
txskb = mI_alloc_skb(len, GFP_ATOMIC);
if (!txskb) {
printk(KERN_ERR
"FATAL ERROR in mISDN_dsp.o: "
"cannot alloc %d bytes\n", len);
} else {
thh = mISDN_HEAD_P(txskb);
thh->prim = DL_DATA_REQ;
thh->id = 0;
skb_put_data(txskb, nskb->data + preload, len);
/* queue (trigger later) */
skb_queue_tail(&dsp->sendq, txskb);
}
}
}
/* send data only to card, if we don't just calculated tx_data */
/* adjust volume */
if (dsp->tx_volume)
dsp_change_volume(nskb, dsp->tx_volume);
/* pipeline */
if (dsp->pipeline.inuse)
dsp_pipeline_process_tx(&dsp->pipeline, nskb->data,
nskb->len);
/* crypt */
if (dsp->bf_enable)
dsp_bf_encrypt(dsp, nskb->data, nskb->len);
/* queue and trigger */
skb_queue_tail(&dsp->sendq, nskb);
schedule_work(&dsp->workq);
}
static u32 jittercount; /* counter for jitter check */
struct timer_list dsp_spl_tl;
unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
static u16 dsp_count; /* last sample count */
static int dsp_count_valid; /* if we have last sample count */
void
dsp_cmx_send(struct timer_list *arg)
{
struct dsp_conf *conf;
struct dsp_conf_member *member;
struct dsp *dsp;
int mustmix, members;
static s32 mixbuffer[MAX_POLL + 100];
s32 *c;
u8 *p, *q;
int r, rr;
int jittercheck = 0, delay, i;
u_long flags;
u16 length, count;
/* lock */
spin_lock_irqsave(&dsp_lock, flags);
if (!dsp_count_valid) {
dsp_count = mISDN_clock_get();
length = dsp_poll;
dsp_count_valid = 1;
} else {
count = mISDN_clock_get();
length = count - dsp_count;
dsp_count = count;
}
if (length > MAX_POLL + 100)
length = MAX_POLL + 100;
/* printk(KERN_DEBUG "len=%d dsp_count=0x%x\n", length, dsp_count); */
/*
* check if jitter needs to be checked (this is every second)
*/
jittercount += length;
if (jittercount >= 8000) {
jittercount -= 8000;
jittercheck = 1;
}
/* loop all members that do not require conference mixing */
list_for_each_entry(dsp, &dsp_ilist, list) {
if (dsp->hdlc)
continue;
conf = dsp->conf;
mustmix = 0;
members = 0;
if (conf) {
members = list_count_nodes(&conf->mlist);
#ifdef CMX_CONF_DEBUG
if (conf->software && members > 1)
#else
if (conf->software && members > 2)
#endif
mustmix = 1;
}
/* transmission required */
if (!mustmix) {
dsp_cmx_send_member(dsp, length, mixbuffer, members);
/*
* unused mixbuffer is given to prevent a
* potential null-pointer-bug
*/
}
}
/* loop all members that require conference mixing */
list_for_each_entry(conf, &conf_ilist, list) {
/* count members and check hardware */
members = list_count_nodes(&conf->mlist);
#ifdef CMX_CONF_DEBUG
if (conf->software && members > 1) {
#else
if (conf->software && members > 2) {
#endif
/* check for hdlc conf */
member = list_entry(conf->mlist.next,
struct dsp_conf_member, list);
if (member->dsp->hdlc)
continue;
/* mix all data */
memset(mixbuffer, 0, length * sizeof(s32));
list_for_each_entry(member, &conf->mlist, list) {
dsp = member->dsp;
/* get range of data to mix */
c = mixbuffer;
q = dsp->rx_buff;
r = dsp->rx_R;
rr = (r + length) & CMX_BUFF_MASK;
/* add member's data */
while (r != rr) {
*c++ += dsp_audio_law_to_s32[q[r]];
r = (r + 1) & CMX_BUFF_MASK;
}
}
/* process each member */
list_for_each_entry(member, &conf->mlist, list) {
/* transmission */
dsp_cmx_send_member(member->dsp, length,
mixbuffer, members);
}
}
}
/* delete rx-data, increment buffers, change pointers */
list_for_each_entry(dsp, &dsp_ilist, list) {
if (dsp->hdlc)
continue;
p = dsp->rx_buff;
q = dsp->tx_buff;
r = dsp->rx_R;
/* move receive pointer when receiving */
if (!dsp->rx_is_off) {
rr = (r + length) & CMX_BUFF_MASK;
/* delete rx-data */
while (r != rr) {
p[r] = dsp_silence;
r = (r + 1) & CMX_BUFF_MASK;
}
/* increment rx-buffer pointer */
dsp->rx_R = r; /* write incremented read pointer */
}
/* check current rx_delay */
delay = (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK;
if (delay >= CMX_BUFF_HALF)
delay = 0; /* will be the delay before next write */
/* check for lower delay */
if (delay < dsp->rx_delay[0])
dsp->rx_delay[0] = delay;
/* check current tx_delay */
delay = (dsp->tx_W-dsp->tx_R) & CMX_BUFF_MASK;
if (delay >= CMX_BUFF_HALF)
delay = 0; /* will be the delay before next write */
/* check for lower delay */
if (delay < dsp->tx_delay[0])
dsp->tx_delay[0] = delay;
if (jittercheck) {
/* find the lowest of all rx_delays */
delay = dsp->rx_delay[0];
i = 1;
while (i < MAX_SECONDS_JITTER_CHECK) {
if (delay > dsp->rx_delay[i])
delay = dsp->rx_delay[i];
i++;
}
/*
* remove rx_delay only if we have delay AND we
* have not preset cmx_delay AND
* the delay is greater dsp_poll
*/
if (delay > dsp_poll && !dsp->cmx_delay) {
if (dsp_debug & DEBUG_DSP_CLOCK)
printk(KERN_DEBUG
"%s lowest rx_delay of %d bytes for"
" dsp %s are now removed.\n",
__func__, delay,
dsp->name);
r = dsp->rx_R;
rr = (r + delay - (dsp_poll >> 1))
& CMX_BUFF_MASK;
/* delete rx-data */
while (r != rr) {
p[r] = dsp_silence;
r = (r + 1) & CMX_BUFF_MASK;
}
/* increment rx-buffer pointer */
dsp->rx_R = r;
/* write incremented read pointer */
}
/* find the lowest of all tx_delays */
delay = dsp->tx_delay[0];
i = 1;
while (i < MAX_SECONDS_JITTER_CHECK) {
if (delay > dsp->tx_delay[i])
delay = dsp->tx_delay[i];
i++;
}
/*
* remove delay only if we have delay AND we
* have enabled tx_dejitter
*/
if (delay > dsp_poll && dsp->tx_dejitter) {
if (dsp_debug & DEBUG_DSP_CLOCK)
printk(KERN_DEBUG
"%s lowest tx_delay of %d bytes for"
" dsp %s are now removed.\n",
__func__, delay,
dsp->name);
r = dsp->tx_R;
rr = (r + delay - (dsp_poll >> 1))
& CMX_BUFF_MASK;
/* delete tx-data */
while (r != rr) {
q[r] = dsp_silence;
r = (r + 1) & CMX_BUFF_MASK;
}
/* increment rx-buffer pointer */
dsp->tx_R = r;
/* write incremented read pointer */
}
/* scroll up delays */
i = MAX_SECONDS_JITTER_CHECK - 1;
while (i) {
dsp->rx_delay[i] = dsp->rx_delay[i - 1];
dsp->tx_delay[i] = dsp->tx_delay[i - 1];
i--;
}
dsp->tx_delay[0] = CMX_BUFF_HALF; /* (infinite) delay */
dsp->rx_delay[0] = CMX_BUFF_HALF; /* (infinite) delay */
}
}
/* if next event would be in the past ... */
if ((s32)(dsp_spl_jiffies + dsp_tics-jiffies) <= 0)
dsp_spl_jiffies = jiffies + 1;
else
dsp_spl_jiffies += dsp_tics;
dsp_spl_tl.expires = dsp_spl_jiffies;
add_timer(&dsp_spl_tl);
/* unlock */
spin_unlock_irqrestore(&dsp_lock, flags);
}
/*
* audio data is transmitted from upper layer to the dsp
*/
void
dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb)
{
u_int w, ww;
u8 *d, *p;
int space; /* todo: , l = skb->len; */
#ifdef CMX_TX_DEBUG
char debugbuf[256] = "";
#endif
/* check if there is enough space, and then copy */
w = dsp->tx_W;
ww = dsp->tx_R;
p = dsp->tx_buff;
d = skb->data;
space = (ww - w - 1) & CMX_BUFF_MASK;
/* write-pointer should not overrun nor reach read pointer */
if (space < skb->len) {
/* write to the space we have left */
ww = (ww - 1) & CMX_BUFF_MASK; /* end one byte prior tx_R */
if (dsp_debug & DEBUG_DSP_CLOCK)
printk(KERN_DEBUG "%s: TX overflow space=%d skb->len="
"%d, w=0x%04x, ww=0x%04x\n", __func__, space,
skb->len, w, ww);
} else
/* write until all byte are copied */
ww = (w + skb->len) & CMX_BUFF_MASK;
dsp->tx_W = ww;
/* show current buffer */
#ifdef CMX_DEBUG
printk(KERN_DEBUG
"cmx_transmit(dsp=%lx) %d bytes to 0x%x-0x%x. %s\n",
(u_long)dsp, (ww - w) & CMX_BUFF_MASK, w, ww, dsp->name);
#endif
/* copy transmit data to tx-buffer */
#ifdef CMX_TX_DEBUG
sprintf(debugbuf, "TX getting (%04x-%04x)%p: ", w, ww, p);
#endif
while (w != ww) {
#ifdef CMX_TX_DEBUG
if (strlen(debugbuf) < 48)
sprintf(debugbuf + strlen(debugbuf), " %02x", *d);
#endif
p[w] = *d++;
w = (w + 1) & CMX_BUFF_MASK;
}
#ifdef CMX_TX_DEBUG
printk(KERN_DEBUG "%s\n", debugbuf);
#endif
}
/*
* hdlc data is received from card and sent to all members.
*/
void
dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb)
{
struct sk_buff *nskb = NULL;
struct dsp_conf_member *member;
struct mISDNhead *hh;
/* not if not active */
if (!dsp->b_active)
return;
/* check if we have sompen */
if (skb->len < 1)
return;
/* no conf */
if (!dsp->conf) {
/* in case of software echo */
if (dsp->echo.software) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
hh = mISDN_HEAD_P(nskb);
hh->prim = PH_DATA_REQ;
hh->id = 0;
skb_queue_tail(&dsp->sendq, nskb);
schedule_work(&dsp->workq);
}
}
return;
}
/* in case of hardware conference */
if (dsp->conf->hardware)
return;
list_for_each_entry(member, &dsp->conf->mlist, list) {
if (dsp->echo.software || member->dsp != dsp) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
hh = mISDN_HEAD_P(nskb);
hh->prim = PH_DATA_REQ;
hh->id = 0;
skb_queue_tail(&member->dsp->sendq, nskb);
schedule_work(&member->dsp->workq);
}
}
}
}
| linux-master | drivers/isdn/mISDN/dsp_cmx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Author Karsten Keil <[email protected]>
*
* Copyright 2008 by Karsten Keil <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/mISDNhw.h>
static void
dchannel_bh(struct work_struct *ws)
{
struct dchannel *dch = container_of(ws, struct dchannel, workq);
struct sk_buff *skb;
int err;
if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
while ((skb = skb_dequeue(&dch->rqueue))) {
if (likely(dch->dev.D.peer)) {
err = dch->dev.D.recv(dch->dev.D.peer, skb);
if (err)
dev_kfree_skb(skb);
} else
dev_kfree_skb(skb);
}
}
if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
if (dch->phfunc)
dch->phfunc(dch);
}
}
static void
bchannel_bh(struct work_struct *ws)
{
struct bchannel *bch = container_of(ws, struct bchannel, workq);
struct sk_buff *skb;
int err;
if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
while ((skb = skb_dequeue(&bch->rqueue))) {
bch->rcount--;
if (likely(bch->ch.peer)) {
err = bch->ch.recv(bch->ch.peer, skb);
if (err)
dev_kfree_skb(skb);
} else
dev_kfree_skb(skb);
}
}
}
int
mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
{
test_and_set_bit(FLG_HDLC, &ch->Flags);
ch->maxlen = maxlen;
ch->hw = NULL;
ch->rx_skb = NULL;
ch->tx_skb = NULL;
ch->tx_idx = 0;
ch->phfunc = phf;
skb_queue_head_init(&ch->squeue);
skb_queue_head_init(&ch->rqueue);
INIT_LIST_HEAD(&ch->dev.bchannels);
INIT_WORK(&ch->workq, dchannel_bh);
return 0;
}
EXPORT_SYMBOL(mISDN_initdchannel);
int
mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
unsigned short minlen)
{
ch->Flags = 0;
ch->minlen = minlen;
ch->next_minlen = minlen;
ch->init_minlen = minlen;
ch->maxlen = maxlen;
ch->next_maxlen = maxlen;
ch->init_maxlen = maxlen;
ch->hw = NULL;
ch->rx_skb = NULL;
ch->tx_skb = NULL;
ch->tx_idx = 0;
skb_queue_head_init(&ch->rqueue);
ch->rcount = 0;
ch->next_skb = NULL;
INIT_WORK(&ch->workq, bchannel_bh);
return 0;
}
EXPORT_SYMBOL(mISDN_initbchannel);
int
mISDN_freedchannel(struct dchannel *ch)
{
if (ch->tx_skb) {
dev_kfree_skb(ch->tx_skb);
ch->tx_skb = NULL;
}
if (ch->rx_skb) {
dev_kfree_skb(ch->rx_skb);
ch->rx_skb = NULL;
}
skb_queue_purge(&ch->squeue);
skb_queue_purge(&ch->rqueue);
flush_work(&ch->workq);
return 0;
}
EXPORT_SYMBOL(mISDN_freedchannel);
void
mISDN_clear_bchannel(struct bchannel *ch)
{
if (ch->tx_skb) {
dev_kfree_skb(ch->tx_skb);
ch->tx_skb = NULL;
}
ch->tx_idx = 0;
if (ch->rx_skb) {
dev_kfree_skb(ch->rx_skb);
ch->rx_skb = NULL;
}
if (ch->next_skb) {
dev_kfree_skb(ch->next_skb);
ch->next_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
ch->dropcnt = 0;
ch->minlen = ch->init_minlen;
ch->next_minlen = ch->init_minlen;
ch->maxlen = ch->init_maxlen;
ch->next_maxlen = ch->init_maxlen;
skb_queue_purge(&ch->rqueue);
ch->rcount = 0;
}
EXPORT_SYMBOL(mISDN_clear_bchannel);
void
mISDN_freebchannel(struct bchannel *ch)
{
cancel_work_sync(&ch->workq);
mISDN_clear_bchannel(ch);
}
EXPORT_SYMBOL(mISDN_freebchannel);
int
mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
MISDN_CTRL_RX_OFF;
break;
case MISDN_CTRL_FILL_EMPTY:
if (cq->p1) {
memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
} else {
test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
}
break;
case MISDN_CTRL_RX_OFF:
/* read back dropped byte count */
cq->p2 = bch->dropcnt;
if (cq->p1)
test_and_set_bit(FLG_RX_OFF, &bch->Flags);
else
test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
bch->dropcnt = 0;
break;
case MISDN_CTRL_RX_BUFFER:
if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
bch->next_maxlen = cq->p2;
if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
bch->next_minlen = cq->p1;
/* we return the old values */
cq->p1 = bch->minlen;
cq->p2 = bch->maxlen;
break;
default:
pr_info("mISDN unhandled control %x operation\n", cq->op);
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(mISDN_ctrl_bchannel);
static inline u_int
get_sapi_tei(u_char *p)
{
u_int sapi, tei;
sapi = *p >> 2;
tei = p[1] >> 1;
return sapi | (tei << 8);
}
void
recv_Dchannel(struct dchannel *dch)
{
struct mISDNhead *hh;
if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
dev_kfree_skb(dch->rx_skb);
dch->rx_skb = NULL;
return;
}
hh = mISDN_HEAD_P(dch->rx_skb);
hh->prim = PH_DATA_IND;
hh->id = get_sapi_tei(dch->rx_skb->data);
skb_queue_tail(&dch->rqueue, dch->rx_skb);
dch->rx_skb = NULL;
schedule_event(dch, FLG_RECVQUEUE);
}
EXPORT_SYMBOL(recv_Dchannel);
void
recv_Echannel(struct dchannel *ech, struct dchannel *dch)
{
struct mISDNhead *hh;
if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
dev_kfree_skb(ech->rx_skb);
ech->rx_skb = NULL;
return;
}
hh = mISDN_HEAD_P(ech->rx_skb);
hh->prim = PH_DATA_E_IND;
hh->id = get_sapi_tei(ech->rx_skb->data);
skb_queue_tail(&dch->rqueue, ech->rx_skb);
ech->rx_skb = NULL;
schedule_event(dch, FLG_RECVQUEUE);
}
EXPORT_SYMBOL(recv_Echannel);
void
recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
{
struct mISDNhead *hh;
/* if allocation did fail upper functions still may call us */
if (unlikely(!bch->rx_skb))
return;
if (unlikely(!bch->rx_skb->len)) {
/* we have no data to send - this may happen after recovery
* from overflow or too small allocation.
* We need to free the buffer here */
dev_kfree_skb(bch->rx_skb);
bch->rx_skb = NULL;
} else {
if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
(bch->rx_skb->len < bch->minlen) && !force)
return;
hh = mISDN_HEAD_P(bch->rx_skb);
hh->prim = PH_DATA_IND;
hh->id = id;
if (bch->rcount >= 64) {
printk(KERN_WARNING
"B%d receive queue overflow - flushing!\n",
bch->nr);
skb_queue_purge(&bch->rqueue);
}
bch->rcount++;
skb_queue_tail(&bch->rqueue, bch->rx_skb);
bch->rx_skb = NULL;
schedule_event(bch, FLG_RECVQUEUE);
}
}
EXPORT_SYMBOL(recv_Bchannel);
void
recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
{
skb_queue_tail(&dch->rqueue, skb);
schedule_event(dch, FLG_RECVQUEUE);
}
EXPORT_SYMBOL(recv_Dchannel_skb);
void
recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
{
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
"flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
}
bch->rcount++;
skb_queue_tail(&bch->rqueue, skb);
schedule_event(bch, FLG_RECVQUEUE);
}
EXPORT_SYMBOL(recv_Bchannel_skb);
static void
confirm_Dsend(struct dchannel *dch)
{
struct sk_buff *skb;
skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
0, NULL, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "%s: no skb id %x\n", __func__,
mISDN_HEAD_ID(dch->tx_skb));
return;
}
skb_queue_tail(&dch->rqueue, skb);
schedule_event(dch, FLG_RECVQUEUE);
}
int
get_next_dframe(struct dchannel *dch)
{
dch->tx_idx = 0;
dch->tx_skb = skb_dequeue(&dch->squeue);
if (dch->tx_skb) {
confirm_Dsend(dch);
return 1;
}
dch->tx_skb = NULL;
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
return 0;
}
EXPORT_SYMBOL(get_next_dframe);
static void
confirm_Bsend(struct bchannel *bch)
{
struct sk_buff *skb;
if (bch->rcount >= 64) {
printk(KERN_WARNING "B-channel %p receive queue overflow, "
"flushing!\n", bch);
skb_queue_purge(&bch->rqueue);
bch->rcount = 0;
}
skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
0, NULL, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "%s: no skb id %x\n", __func__,
mISDN_HEAD_ID(bch->tx_skb));
return;
}
bch->rcount++;
skb_queue_tail(&bch->rqueue, skb);
schedule_event(bch, FLG_RECVQUEUE);
}
int
get_next_bframe(struct bchannel *bch)
{
bch->tx_idx = 0;
if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
bch->tx_skb = bch->next_skb;
if (bch->tx_skb) {
bch->next_skb = NULL;
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
/* confirm imediately to allow next data */
confirm_Bsend(bch);
return 1;
} else {
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
printk(KERN_WARNING "B TX_NEXT without skb\n");
}
}
bch->tx_skb = NULL;
test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
return 0;
}
EXPORT_SYMBOL(get_next_bframe);
void
queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
{
struct mISDNhead *hh;
if (!skb) {
_queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
} else {
if (ch->peer) {
hh = mISDN_HEAD_P(skb);
hh->prim = pr;
hh->id = id;
if (!ch->recv(ch->peer, skb))
return;
}
dev_kfree_skb(skb);
}
}
EXPORT_SYMBOL(queue_ch_frame);
int
dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
{
/* check oversize */
if (skb->len <= 0) {
printk(KERN_WARNING "%s: skb too small\n", __func__);
return -EINVAL;
}
if (skb->len > ch->maxlen) {
printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
__func__, skb->len, ch->maxlen);
return -EINVAL;
}
/* HW lock must be obtained */
if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
skb_queue_tail(&ch->squeue, skb);
return 0;
} else {
/* write to fifo */
ch->tx_skb = skb;
ch->tx_idx = 0;
return 1;
}
}
EXPORT_SYMBOL(dchannel_senddata);
int
bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
{
/* check oversize */
if (skb->len <= 0) {
printk(KERN_WARNING "%s: skb too small\n", __func__);
return -EINVAL;
}
if (skb->len > ch->maxlen) {
printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
__func__, skb->len, ch->maxlen);
return -EINVAL;
}
/* HW lock must be obtained */
/* check for pending next_skb */
if (ch->next_skb) {
printk(KERN_WARNING
"%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
__func__, skb->len, ch->next_skb->len);
return -EBUSY;
}
if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
ch->next_skb = skb;
return 0;
} else {
/* write to fifo */
ch->tx_skb = skb;
ch->tx_idx = 0;
confirm_Bsend(ch);
return 1;
}
}
EXPORT_SYMBOL(bchannel_senddata);
/* The function allocates a new receive skb on demand with a size for the
* requirements of the current protocol. It returns the tailroom of the
* receive skb or an error.
*/
int
bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
{
int len;
if (bch->rx_skb) {
len = skb_tailroom(bch->rx_skb);
if (len < reqlen) {
pr_warn("B%d no space for %d (only %d) bytes\n",
bch->nr, reqlen, len);
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
/* send what we have now and try a new buffer */
recv_Bchannel(bch, 0, true);
} else {
/* on HDLC we have to drop too big frames */
return -EMSGSIZE;
}
} else {
return len;
}
}
/* update current min/max length first */
if (unlikely(bch->maxlen != bch->next_maxlen))
bch->maxlen = bch->next_maxlen;
if (unlikely(bch->minlen != bch->next_minlen))
bch->minlen = bch->next_minlen;
if (unlikely(reqlen > bch->maxlen))
return -EMSGSIZE;
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
if (reqlen >= bch->minlen) {
len = reqlen;
} else {
len = 2 * bch->minlen;
if (len > bch->maxlen)
len = bch->maxlen;
}
} else {
/* with HDLC we do not know the length yet */
len = bch->maxlen;
}
bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!bch->rx_skb) {
pr_warn("B%d receive no memory for %d bytes\n", bch->nr, len);
len = -ENOMEM;
}
return len;
}
EXPORT_SYMBOL(bchannel_get_rxbuf);
| linux-master | drivers/isdn/mISDN/hwchannel.c |
/*
* Audio support data for mISDN_dsp.
*
* Copyright 2002/2003 by Andreas Eversberg ([email protected])
* Rewritten by Peter
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/delay.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include <linux/export.h>
#include <linux/bitrev.h>
#include "core.h"
#include "dsp.h"
/* ulaw[unsigned char] -> signed 16-bit */
s32 dsp_audio_ulaw_to_s32[256];
/* alaw[unsigned char] -> signed 16-bit */
s32 dsp_audio_alaw_to_s32[256];
s32 *dsp_audio_law_to_s32;
EXPORT_SYMBOL(dsp_audio_law_to_s32);
/* signed 16-bit -> law */
u8 dsp_audio_s16_to_law[65536];
EXPORT_SYMBOL(dsp_audio_s16_to_law);
/* alaw -> ulaw */
u8 dsp_audio_alaw_to_ulaw[256];
/* ulaw -> alaw */
static u8 dsp_audio_ulaw_to_alaw[256];
u8 dsp_silence;
/*****************************************************
* generate table for conversion of s16 to alaw/ulaw *
*****************************************************/
#define AMI_MASK 0x55
static inline unsigned char linear2alaw(short int linear)
{
int mask;
int seg;
int pcm_val;
static int seg_end[8] = {
0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF
};
pcm_val = linear;
if (pcm_val >= 0) {
/* Sign (7th) bit = 1 */
mask = AMI_MASK | 0x80;
} else {
/* Sign bit = 0 */
mask = AMI_MASK;
pcm_val = -pcm_val;
}
/* Convert the scaled magnitude to segment number. */
for (seg = 0; seg < 8; seg++) {
if (pcm_val <= seg_end[seg])
break;
}
/* Combine the sign, segment, and quantization bits. */
return ((seg << 4) |
((pcm_val >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask;
}
static inline short int alaw2linear(unsigned char alaw)
{
int i;
int seg;
alaw ^= AMI_MASK;
i = ((alaw & 0x0F) << 4) + 8 /* rounding error */;
seg = (((int) alaw & 0x70) >> 4);
if (seg)
i = (i + 0x100) << (seg - 1);
return (short int) ((alaw & 0x80) ? i : -i);
}
static inline short int ulaw2linear(unsigned char ulaw)
{
short mu, e, f, y;
static short etab[] = {0, 132, 396, 924, 1980, 4092, 8316, 16764};
mu = 255 - ulaw;
e = (mu & 0x70) / 16;
f = mu & 0x0f;
y = f * (1 << (e + 3));
y += etab[e];
if (mu & 0x80)
y = -y;
return y;
}
#define BIAS 0x84 /*!< define the add-in bias for 16 bit samples */
static unsigned char linear2ulaw(short sample)
{
static int exp_lut[256] = {
0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
int sign, exponent, mantissa;
unsigned char ulawbyte;
/* Get the sample into sign-magnitude. */
sign = (sample >> 8) & 0x80; /* set aside the sign */
if (sign != 0)
sample = -sample; /* get magnitude */
/* Convert from 16 bit linear to ulaw. */
sample = sample + BIAS;
exponent = exp_lut[(sample >> 7) & 0xFF];
mantissa = (sample >> (exponent + 3)) & 0x0F;
ulawbyte = ~(sign | (exponent << 4) | mantissa);
return ulawbyte;
}
void dsp_audio_generate_law_tables(void)
{
int i;
for (i = 0; i < 256; i++)
dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i));
for (i = 0; i < 256; i++)
dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i));
for (i = 0; i < 256; i++) {
dsp_audio_alaw_to_ulaw[i] =
linear2ulaw(dsp_audio_alaw_to_s32[i]);
dsp_audio_ulaw_to_alaw[i] =
linear2alaw(dsp_audio_ulaw_to_s32[i]);
}
}
void
dsp_audio_generate_s2law_table(void)
{
int i;
if (dsp_options & DSP_OPT_ULAW) {
/* generating ulaw-table */
for (i = -32768; i < 32768; i++) {
dsp_audio_s16_to_law[i & 0xffff] =
bitrev8(linear2ulaw(i));
}
} else {
/* generating alaw-table */
for (i = -32768; i < 32768; i++) {
dsp_audio_s16_to_law[i & 0xffff] =
bitrev8(linear2alaw(i));
}
}
}
/*
* the seven bit sample is the number of every second alaw-sample ordered by
* aplitude. 0x00 is negative, 0x7f is positive amplitude.
*/
u8 dsp_audio_seven2law[128];
u8 dsp_audio_law2seven[256];
/********************************************************************
* generate table for conversion law from/to 7-bit alaw-like sample *
********************************************************************/
void
dsp_audio_generate_seven(void)
{
int i, j, k;
u8 spl;
u8 sorted_alaw[256];
/* generate alaw table, sorted by the linear value */
for (i = 0; i < 256; i++) {
j = 0;
for (k = 0; k < 256; k++) {
if (dsp_audio_alaw_to_s32[k]
< dsp_audio_alaw_to_s32[i])
j++;
}
sorted_alaw[j] = i;
}
/* generate tabels */
for (i = 0; i < 256; i++) {
/* spl is the source: the law-sample (converted to alaw) */
spl = i;
if (dsp_options & DSP_OPT_ULAW)
spl = dsp_audio_ulaw_to_alaw[i];
/* find the 7-bit-sample */
for (j = 0; j < 256; j++) {
if (sorted_alaw[j] == spl)
break;
}
/* write 7-bit audio value */
dsp_audio_law2seven[i] = j >> 1;
}
for (i = 0; i < 128; i++) {
spl = sorted_alaw[i << 1];
if (dsp_options & DSP_OPT_ULAW)
spl = dsp_audio_alaw_to_ulaw[spl];
dsp_audio_seven2law[i] = spl;
}
}
/* mix 2*law -> law */
u8 dsp_audio_mix_law[65536];
/******************************************************
* generate mix table to mix two law samples into one *
******************************************************/
void
dsp_audio_generate_mix_table(void)
{
int i, j;
s32 sample;
i = 0;
while (i < 256) {
j = 0;
while (j < 256) {
sample = dsp_audio_law_to_s32[i];
sample += dsp_audio_law_to_s32[j];
if (sample > 32767)
sample = 32767;
if (sample < -32768)
sample = -32768;
dsp_audio_mix_law[(i << 8) | j] =
dsp_audio_s16_to_law[sample & 0xffff];
j++;
}
i++;
}
}
/*************************************
* generate different volume changes *
*************************************/
static u8 dsp_audio_reduce8[256];
static u8 dsp_audio_reduce7[256];
static u8 dsp_audio_reduce6[256];
static u8 dsp_audio_reduce5[256];
static u8 dsp_audio_reduce4[256];
static u8 dsp_audio_reduce3[256];
static u8 dsp_audio_reduce2[256];
static u8 dsp_audio_reduce1[256];
static u8 dsp_audio_increase1[256];
static u8 dsp_audio_increase2[256];
static u8 dsp_audio_increase3[256];
static u8 dsp_audio_increase4[256];
static u8 dsp_audio_increase5[256];
static u8 dsp_audio_increase6[256];
static u8 dsp_audio_increase7[256];
static u8 dsp_audio_increase8[256];
static u8 *dsp_audio_volume_change[16] = {
dsp_audio_reduce8,
dsp_audio_reduce7,
dsp_audio_reduce6,
dsp_audio_reduce5,
dsp_audio_reduce4,
dsp_audio_reduce3,
dsp_audio_reduce2,
dsp_audio_reduce1,
dsp_audio_increase1,
dsp_audio_increase2,
dsp_audio_increase3,
dsp_audio_increase4,
dsp_audio_increase5,
dsp_audio_increase6,
dsp_audio_increase7,
dsp_audio_increase8,
};
void
dsp_audio_generate_volume_changes(void)
{
register s32 sample;
int i;
int num[] = { 110, 125, 150, 175, 200, 300, 400, 500 };
int denum[] = { 100, 100, 100, 100, 100, 100, 100, 100 };
i = 0;
while (i < 256) {
dsp_audio_reduce8[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[7] / num[7]) & 0xffff];
dsp_audio_reduce7[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[6] / num[6]) & 0xffff];
dsp_audio_reduce6[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[5] / num[5]) & 0xffff];
dsp_audio_reduce5[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[4] / num[4]) & 0xffff];
dsp_audio_reduce4[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[3] / num[3]) & 0xffff];
dsp_audio_reduce3[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[2] / num[2]) & 0xffff];
dsp_audio_reduce2[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[1] / num[1]) & 0xffff];
dsp_audio_reduce1[i] = dsp_audio_s16_to_law[
(dsp_audio_law_to_s32[i] * denum[0] / num[0]) & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[0] / denum[0];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase1[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[1] / denum[1];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase2[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[2] / denum[2];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase3[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[3] / denum[3];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase4[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[4] / denum[4];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase5[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[5] / denum[5];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase6[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[6] / denum[6];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase7[i] = dsp_audio_s16_to_law[sample & 0xffff];
sample = dsp_audio_law_to_s32[i] * num[7] / denum[7];
if (sample < -32768)
sample = -32768;
else if (sample > 32767)
sample = 32767;
dsp_audio_increase8[i] = dsp_audio_s16_to_law[sample & 0xffff];
i++;
}
}
/**************************************
* change the volume of the given skb *
**************************************/
/* this is a helper function for changing volume of skb. the range may be
* -8 to 8, which is a shift to the power of 2. 0 == no volume, 3 == volume*8
*/
void
dsp_change_volume(struct sk_buff *skb, int volume)
{
u8 *volume_change;
int i, ii;
u8 *p;
int shift;
if (volume == 0)
return;
/* get correct conversion table */
if (volume < 0) {
shift = volume + 8;
if (shift < 0)
shift = 0;
} else {
shift = volume + 7;
if (shift > 15)
shift = 15;
}
volume_change = dsp_audio_volume_change[shift];
i = 0;
ii = skb->len;
p = skb->data;
/* change volume */
while (i < ii) {
*p = volume_change[*p];
p++;
i++;
}
}
| linux-master | drivers/isdn/mISDN/dsp_audio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic serial GNSS receiver driver
*
* Copyright (C) 2018 Johan Hovold <[email protected]>
*/
#include <linux/errno.h>
#include <linux/gnss.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include "serial.h"
static int gnss_serial_open(struct gnss_device *gdev)
{
struct gnss_serial *gserial = gnss_get_drvdata(gdev);
struct serdev_device *serdev = gserial->serdev;
int ret;
ret = serdev_device_open(serdev);
if (ret)
return ret;
serdev_device_set_baudrate(serdev, gserial->speed);
serdev_device_set_flow_control(serdev, false);
ret = pm_runtime_get_sync(&serdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&serdev->dev);
goto err_close;
}
return 0;
err_close:
serdev_device_close(serdev);
return ret;
}
static void gnss_serial_close(struct gnss_device *gdev)
{
struct gnss_serial *gserial = gnss_get_drvdata(gdev);
struct serdev_device *serdev = gserial->serdev;
serdev_device_close(serdev);
pm_runtime_put(&serdev->dev);
}
static int gnss_serial_write_raw(struct gnss_device *gdev,
const unsigned char *buf, size_t count)
{
struct gnss_serial *gserial = gnss_get_drvdata(gdev);
struct serdev_device *serdev = gserial->serdev;
int ret;
/* write is only buffered synchronously */
ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
if (ret < 0 || ret < count)
return ret;
/* FIXME: determine if interrupted? */
serdev_device_wait_until_sent(serdev, 0);
return count;
}
static const struct gnss_operations gnss_serial_gnss_ops = {
.open = gnss_serial_open,
.close = gnss_serial_close,
.write_raw = gnss_serial_write_raw,
};
static int gnss_serial_receive_buf(struct serdev_device *serdev,
const unsigned char *buf, size_t count)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
struct gnss_device *gdev = gserial->gdev;
return gnss_insert_raw(gdev, buf, count);
}
static const struct serdev_device_ops gnss_serial_serdev_ops = {
.receive_buf = gnss_serial_receive_buf,
.write_wakeup = serdev_device_write_wakeup,
};
static int gnss_serial_set_power(struct gnss_serial *gserial,
enum gnss_serial_pm_state state)
{
if (!gserial->ops || !gserial->ops->set_power)
return 0;
return gserial->ops->set_power(gserial, state);
}
/*
* FIXME: need to provide subdriver defaults or separate dt parsing from
* allocation.
*/
static int gnss_serial_parse_dt(struct serdev_device *serdev)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
struct device_node *node = serdev->dev.of_node;
u32 speed = 4800;
of_property_read_u32(node, "current-speed", &speed);
gserial->speed = speed;
return 0;
}
struct gnss_serial *gnss_serial_allocate(struct serdev_device *serdev,
size_t data_size)
{
struct gnss_serial *gserial;
struct gnss_device *gdev;
int ret;
gserial = kzalloc(sizeof(*gserial) + data_size, GFP_KERNEL);
if (!gserial)
return ERR_PTR(-ENOMEM);
gdev = gnss_allocate_device(&serdev->dev);
if (!gdev) {
ret = -ENOMEM;
goto err_free_gserial;
}
gdev->ops = &gnss_serial_gnss_ops;
gnss_set_drvdata(gdev, gserial);
gserial->serdev = serdev;
gserial->gdev = gdev;
serdev_device_set_drvdata(serdev, gserial);
serdev_device_set_client_ops(serdev, &gnss_serial_serdev_ops);
ret = gnss_serial_parse_dt(serdev);
if (ret)
goto err_put_device;
return gserial;
err_put_device:
gnss_put_device(gserial->gdev);
err_free_gserial:
kfree(gserial);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(gnss_serial_allocate);
void gnss_serial_free(struct gnss_serial *gserial)
{
gnss_put_device(gserial->gdev);
kfree(gserial);
}
EXPORT_SYMBOL_GPL(gnss_serial_free);
int gnss_serial_register(struct gnss_serial *gserial)
{
struct serdev_device *serdev = gserial->serdev;
int ret;
if (IS_ENABLED(CONFIG_PM)) {
pm_runtime_enable(&serdev->dev);
} else {
ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
if (ret < 0)
return ret;
}
ret = gnss_register_device(gserial->gdev);
if (ret)
goto err_disable_rpm;
return 0;
err_disable_rpm:
if (IS_ENABLED(CONFIG_PM))
pm_runtime_disable(&serdev->dev);
else
gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
return ret;
}
EXPORT_SYMBOL_GPL(gnss_serial_register);
void gnss_serial_deregister(struct gnss_serial *gserial)
{
struct serdev_device *serdev = gserial->serdev;
gnss_deregister_device(gserial->gdev);
if (IS_ENABLED(CONFIG_PM))
pm_runtime_disable(&serdev->dev);
else
gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
}
EXPORT_SYMBOL_GPL(gnss_serial_deregister);
#ifdef CONFIG_PM
static int gnss_serial_runtime_suspend(struct device *dev)
{
struct gnss_serial *gserial = dev_get_drvdata(dev);
return gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
}
static int gnss_serial_runtime_resume(struct device *dev)
{
struct gnss_serial *gserial = dev_get_drvdata(dev);
return gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
}
#endif /* CONFIG_PM */
static int gnss_serial_prepare(struct device *dev)
{
if (pm_runtime_suspended(dev))
return 1;
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int gnss_serial_suspend(struct device *dev)
{
struct gnss_serial *gserial = dev_get_drvdata(dev);
int ret = 0;
/*
* FIXME: serdev currently lacks support for managing the underlying
* device's wakeup settings. A workaround would be to close the serdev
* device here if it is open.
*/
if (!pm_runtime_suspended(dev))
ret = gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
return ret;
}
static int gnss_serial_resume(struct device *dev)
{
struct gnss_serial *gserial = dev_get_drvdata(dev);
int ret = 0;
if (!pm_runtime_suspended(dev))
ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
const struct dev_pm_ops gnss_serial_pm_ops = {
.prepare = gnss_serial_prepare,
SET_SYSTEM_SLEEP_PM_OPS(gnss_serial_suspend, gnss_serial_resume)
SET_RUNTIME_PM_OPS(gnss_serial_runtime_suspend, gnss_serial_runtime_resume, NULL)
};
EXPORT_SYMBOL_GPL(gnss_serial_pm_ops);
MODULE_AUTHOR("Johan Hovold <[email protected]>");
MODULE_DESCRIPTION("Generic serial GNSS receiver driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gnss/serial.c |
// SPDX-License-Identifier: GPL-2.0
/*
* u-blox GNSS receiver driver
*
* Copyright (C) 2018 Johan Hovold <[email protected]>
*/
#include <linux/errno.h>
#include <linux/gnss.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include "serial.h"
struct ubx_data {
struct regulator *v_bckp;
struct regulator *vcc;
};
static int ubx_set_active(struct gnss_serial *gserial)
{
struct ubx_data *data = gnss_serial_get_drvdata(gserial);
int ret;
ret = regulator_enable(data->vcc);
if (ret)
return ret;
return 0;
}
static int ubx_set_standby(struct gnss_serial *gserial)
{
struct ubx_data *data = gnss_serial_get_drvdata(gserial);
int ret;
ret = regulator_disable(data->vcc);
if (ret)
return ret;
return 0;
}
static int ubx_set_power(struct gnss_serial *gserial,
enum gnss_serial_pm_state state)
{
switch (state) {
case GNSS_SERIAL_ACTIVE:
return ubx_set_active(gserial);
case GNSS_SERIAL_OFF:
case GNSS_SERIAL_STANDBY:
return ubx_set_standby(gserial);
}
return -EINVAL;
}
static const struct gnss_serial_ops ubx_gserial_ops = {
.set_power = ubx_set_power,
};
static int ubx_probe(struct serdev_device *serdev)
{
struct gnss_serial *gserial;
struct ubx_data *data;
int ret;
gserial = gnss_serial_allocate(serdev, sizeof(*data));
if (IS_ERR(gserial)) {
ret = PTR_ERR(gserial);
return ret;
}
gserial->ops = &ubx_gserial_ops;
gserial->gdev->type = GNSS_TYPE_UBX;
data = gnss_serial_get_drvdata(gserial);
data->vcc = devm_regulator_get(&serdev->dev, "vcc");
if (IS_ERR(data->vcc)) {
ret = PTR_ERR(data->vcc);
goto err_free_gserial;
}
data->v_bckp = devm_regulator_get_optional(&serdev->dev, "v-bckp");
if (IS_ERR(data->v_bckp)) {
ret = PTR_ERR(data->v_bckp);
if (ret == -ENODEV)
data->v_bckp = NULL;
else
goto err_free_gserial;
}
if (data->v_bckp) {
ret = regulator_enable(data->v_bckp);
if (ret)
goto err_free_gserial;
}
ret = gnss_serial_register(gserial);
if (ret)
goto err_disable_v_bckp;
return 0;
err_disable_v_bckp:
if (data->v_bckp)
regulator_disable(data->v_bckp);
err_free_gserial:
gnss_serial_free(gserial);
return ret;
}
static void ubx_remove(struct serdev_device *serdev)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
struct ubx_data *data = gnss_serial_get_drvdata(gserial);
gnss_serial_deregister(gserial);
if (data->v_bckp)
regulator_disable(data->v_bckp);
gnss_serial_free(gserial);
}
#ifdef CONFIG_OF
static const struct of_device_id ubx_of_match[] = {
{ .compatible = "u-blox,neo-6m" },
{ .compatible = "u-blox,neo-8" },
{ .compatible = "u-blox,neo-m8" },
{},
};
MODULE_DEVICE_TABLE(of, ubx_of_match);
#endif
static struct serdev_device_driver ubx_driver = {
.driver = {
.name = "gnss-ubx",
.of_match_table = of_match_ptr(ubx_of_match),
.pm = &gnss_serial_pm_ops,
},
.probe = ubx_probe,
.remove = ubx_remove,
};
module_serdev_device_driver(ubx_driver);
MODULE_AUTHOR("Johan Hovold <[email protected]>");
MODULE_DESCRIPTION("u-blox GNSS receiver driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gnss/ubx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic USB GNSS receiver driver
*
* Copyright (C) 2021 Johan Hovold <[email protected]>
*/
#include <linux/errno.h>
#include <linux/gnss.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#define GNSS_USB_READ_BUF_LEN 512
#define GNSS_USB_WRITE_TIMEOUT 1000
static const struct usb_device_id gnss_usb_id_table[] = {
{ USB_DEVICE(0x1199, 0xb000) }, /* Sierra Wireless XM1210 */
{ }
};
MODULE_DEVICE_TABLE(usb, gnss_usb_id_table);
struct gnss_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct gnss_device *gdev;
struct urb *read_urb;
unsigned int write_pipe;
};
static void gnss_usb_rx_complete(struct urb *urb)
{
struct gnss_usb *gusb = urb->context;
struct gnss_device *gdev = gusb->gdev;
int status = urb->status;
int len;
int ret;
switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&gdev->dev, "urb stopped: %d\n", status);
return;
case -EPIPE:
dev_err(&gdev->dev, "urb stopped: %d\n", status);
return;
default:
dev_dbg(&gdev->dev, "nonzero urb status: %d\n", status);
goto resubmit;
}
len = urb->actual_length;
if (len == 0)
goto resubmit;
ret = gnss_insert_raw(gdev, urb->transfer_buffer, len);
if (ret < len)
dev_dbg(&gdev->dev, "dropped %d bytes\n", len - ret);
resubmit:
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret && ret != -EPERM && ret != -ENODEV)
dev_err(&gdev->dev, "failed to resubmit urb: %d\n", ret);
}
static int gnss_usb_open(struct gnss_device *gdev)
{
struct gnss_usb *gusb = gnss_get_drvdata(gdev);
int ret;
ret = usb_submit_urb(gusb->read_urb, GFP_KERNEL);
if (ret) {
if (ret != -EPERM && ret != -ENODEV)
dev_err(&gdev->dev, "failed to submit urb: %d\n", ret);
return ret;
}
return 0;
}
static void gnss_usb_close(struct gnss_device *gdev)
{
struct gnss_usb *gusb = gnss_get_drvdata(gdev);
usb_kill_urb(gusb->read_urb);
}
static int gnss_usb_write_raw(struct gnss_device *gdev,
const unsigned char *buf, size_t count)
{
struct gnss_usb *gusb = gnss_get_drvdata(gdev);
void *tbuf;
int ret;
tbuf = kmemdup(buf, count, GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
ret = usb_bulk_msg(gusb->udev, gusb->write_pipe, tbuf, count, NULL,
GNSS_USB_WRITE_TIMEOUT);
kfree(tbuf);
if (ret)
return ret;
return count;
}
static const struct gnss_operations gnss_usb_gnss_ops = {
.open = gnss_usb_open,
.close = gnss_usb_close,
.write_raw = gnss_usb_write_raw,
};
static int gnss_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *in, *out;
struct gnss_device *gdev;
struct gnss_usb *gusb;
struct urb *urb;
size_t buf_len;
void *buf;
int ret;
ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL,
NULL);
if (ret)
return ret;
gusb = kzalloc(sizeof(*gusb), GFP_KERNEL);
if (!gusb)
return -ENOMEM;
gdev = gnss_allocate_device(&intf->dev);
if (!gdev) {
ret = -ENOMEM;
goto err_free_gusb;
}
gdev->ops = &gnss_usb_gnss_ops;
gdev->type = GNSS_TYPE_NMEA;
gnss_set_drvdata(gdev, gusb);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
ret = -ENOMEM;
goto err_put_gdev;
}
buf_len = max(usb_endpoint_maxp(in), GNSS_USB_READ_BUF_LEN);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_free_urb;
}
usb_fill_bulk_urb(urb, udev,
usb_rcvbulkpipe(udev, usb_endpoint_num(in)),
buf, buf_len, gnss_usb_rx_complete, gusb);
gusb->intf = intf;
gusb->udev = udev;
gusb->gdev = gdev;
gusb->read_urb = urb;
gusb->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out));
ret = gnss_register_device(gdev);
if (ret)
goto err_free_buf;
usb_set_intfdata(intf, gusb);
return 0;
err_free_buf:
kfree(buf);
err_free_urb:
usb_free_urb(urb);
err_put_gdev:
gnss_put_device(gdev);
err_free_gusb:
kfree(gusb);
return ret;
}
static void gnss_usb_disconnect(struct usb_interface *intf)
{
struct gnss_usb *gusb = usb_get_intfdata(intf);
gnss_deregister_device(gusb->gdev);
kfree(gusb->read_urb->transfer_buffer);
usb_free_urb(gusb->read_urb);
gnss_put_device(gusb->gdev);
kfree(gusb);
}
static struct usb_driver gnss_usb_driver = {
.name = "gnss-usb",
.probe = gnss_usb_probe,
.disconnect = gnss_usb_disconnect,
.id_table = gnss_usb_id_table,
};
module_usb_driver(gnss_usb_driver);
MODULE_AUTHOR("Johan Hovold <[email protected]>");
MODULE_DESCRIPTION("Generic USB GNSS receiver driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gnss/usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SiRFstar GNSS receiver driver
*
* Copyright (C) 2018 Johan Hovold <[email protected]>
*/
#include <linux/errno.h>
#include <linux/gnss.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <linux/wait.h>
#define SIRF_BOOT_DELAY 500
#define SIRF_ON_OFF_PULSE_TIME 100
#define SIRF_ACTIVATE_TIMEOUT 200
#define SIRF_HIBERNATE_TIMEOUT 200
/*
* If no data arrives for this time, we assume that the chip is off.
* REVISIT: The report cycle is configurable and can be several minutes long,
* so this will only work reliably if the report cycle is set to a reasonable
* low value. Also power saving settings (like send data only on movement)
* might things work even worse.
* Workaround might be to parse shutdown or bootup messages.
*/
#define SIRF_REPORT_CYCLE 2000
struct sirf_data {
struct gnss_device *gdev;
struct serdev_device *serdev;
speed_t speed;
struct regulator *vcc;
struct regulator *lna;
struct gpio_desc *on_off;
struct gpio_desc *wakeup;
int irq;
bool active;
struct mutex gdev_mutex;
bool open;
struct mutex serdev_mutex;
int serdev_count;
wait_queue_head_t power_wait;
};
static int sirf_serdev_open(struct sirf_data *data)
{
int ret = 0;
mutex_lock(&data->serdev_mutex);
if (++data->serdev_count == 1) {
ret = serdev_device_open(data->serdev);
if (ret) {
data->serdev_count--;
goto out_unlock;
}
serdev_device_set_baudrate(data->serdev, data->speed);
serdev_device_set_flow_control(data->serdev, false);
}
out_unlock:
mutex_unlock(&data->serdev_mutex);
return ret;
}
static void sirf_serdev_close(struct sirf_data *data)
{
mutex_lock(&data->serdev_mutex);
if (--data->serdev_count == 0)
serdev_device_close(data->serdev);
mutex_unlock(&data->serdev_mutex);
}
static int sirf_open(struct gnss_device *gdev)
{
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
int ret;
mutex_lock(&data->gdev_mutex);
data->open = true;
mutex_unlock(&data->gdev_mutex);
ret = sirf_serdev_open(data);
if (ret) {
mutex_lock(&data->gdev_mutex);
data->open = false;
mutex_unlock(&data->gdev_mutex);
return ret;
}
ret = pm_runtime_get_sync(&serdev->dev);
if (ret < 0) {
dev_err(&gdev->dev, "failed to runtime resume: %d\n", ret);
pm_runtime_put_noidle(&serdev->dev);
goto err_close;
}
return 0;
err_close:
sirf_serdev_close(data);
mutex_lock(&data->gdev_mutex);
data->open = false;
mutex_unlock(&data->gdev_mutex);
return ret;
}
static void sirf_close(struct gnss_device *gdev)
{
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
sirf_serdev_close(data);
pm_runtime_put(&serdev->dev);
mutex_lock(&data->gdev_mutex);
data->open = false;
mutex_unlock(&data->gdev_mutex);
}
static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
size_t count)
{
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
int ret;
/* write is only buffered synchronously */
ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
if (ret < 0 || ret < count)
return ret;
/* FIXME: determine if interrupted? */
serdev_device_wait_until_sent(serdev, 0);
return count;
}
static const struct gnss_operations sirf_gnss_ops = {
.open = sirf_open,
.close = sirf_close,
.write_raw = sirf_write_raw,
};
static int sirf_receive_buf(struct serdev_device *serdev,
const unsigned char *buf, size_t count)
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
struct gnss_device *gdev = data->gdev;
int ret = 0;
if (!data->wakeup && !data->active) {
data->active = true;
wake_up_interruptible(&data->power_wait);
}
mutex_lock(&data->gdev_mutex);
if (data->open)
ret = gnss_insert_raw(gdev, buf, count);
mutex_unlock(&data->gdev_mutex);
return ret;
}
static const struct serdev_device_ops sirf_serdev_ops = {
.receive_buf = sirf_receive_buf,
.write_wakeup = serdev_device_write_wakeup,
};
static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
{
struct sirf_data *data = dev_id;
struct device *dev = &data->serdev->dev;
int ret;
ret = gpiod_get_value_cansleep(data->wakeup);
dev_dbg(dev, "%s - wakeup = %d\n", __func__, ret);
if (ret < 0)
goto out;
data->active = ret;
wake_up_interruptible(&data->power_wait);
out:
return IRQ_HANDLED;
}
static int sirf_wait_for_power_state_nowakeup(struct sirf_data *data,
bool active,
unsigned long timeout)
{
int ret;
/* Wait for state change (including any shutdown messages). */
msleep(timeout);
/* Wait for data reception or timeout. */
data->active = false;
ret = wait_event_interruptible_timeout(data->power_wait,
data->active, msecs_to_jiffies(SIRF_REPORT_CYCLE));
if (ret < 0)
return ret;
if (ret > 0 && !active)
return -ETIMEDOUT;
if (ret == 0 && active)
return -ETIMEDOUT;
return 0;
}
static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
unsigned long timeout)
{
int ret;
if (!data->wakeup)
return sirf_wait_for_power_state_nowakeup(data, active, timeout);
ret = wait_event_interruptible_timeout(data->power_wait,
data->active == active, msecs_to_jiffies(timeout));
if (ret < 0)
return ret;
if (ret == 0) {
dev_warn(&data->serdev->dev, "timeout waiting for active state = %d\n",
active);
return -ETIMEDOUT;
}
return 0;
}
static void sirf_pulse_on_off(struct sirf_data *data)
{
gpiod_set_value_cansleep(data->on_off, 1);
msleep(SIRF_ON_OFF_PULSE_TIME);
gpiod_set_value_cansleep(data->on_off, 0);
}
static int sirf_set_active(struct sirf_data *data, bool active)
{
unsigned long timeout;
int retries = 3;
int ret;
if (active)
timeout = SIRF_ACTIVATE_TIMEOUT;
else
timeout = SIRF_HIBERNATE_TIMEOUT;
if (!data->wakeup) {
ret = sirf_serdev_open(data);
if (ret)
return ret;
}
do {
sirf_pulse_on_off(data);
ret = sirf_wait_for_power_state(data, active, timeout);
} while (ret == -ETIMEDOUT && retries--);
if (!data->wakeup)
sirf_serdev_close(data);
if (ret)
return ret;
return 0;
}
static int sirf_runtime_suspend(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
int ret2;
int ret;
if (data->on_off)
ret = sirf_set_active(data, false);
else
ret = regulator_disable(data->vcc);
if (ret)
return ret;
ret = regulator_disable(data->lna);
if (ret)
goto err_reenable;
return 0;
err_reenable:
if (data->on_off)
ret2 = sirf_set_active(data, true);
else
ret2 = regulator_enable(data->vcc);
if (ret2)
dev_err(dev,
"failed to reenable power on failed suspend: %d\n",
ret2);
return ret;
}
static int sirf_runtime_resume(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
int ret;
ret = regulator_enable(data->lna);
if (ret)
return ret;
if (data->on_off)
ret = sirf_set_active(data, true);
else
ret = regulator_enable(data->vcc);
if (ret)
goto err_disable_lna;
return 0;
err_disable_lna:
regulator_disable(data->lna);
return ret;
}
static int __maybe_unused sirf_suspend(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
int ret = 0;
if (!pm_runtime_suspended(dev))
ret = sirf_runtime_suspend(dev);
if (data->wakeup)
disable_irq(data->irq);
return ret;
}
static int __maybe_unused sirf_resume(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
int ret = 0;
if (data->wakeup)
enable_irq(data->irq);
if (!pm_runtime_suspended(dev))
ret = sirf_runtime_resume(dev);
return ret;
}
static const struct dev_pm_ops sirf_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sirf_suspend, sirf_resume)
SET_RUNTIME_PM_OPS(sirf_runtime_suspend, sirf_runtime_resume, NULL)
};
static int sirf_parse_dt(struct serdev_device *serdev)
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
struct device_node *node = serdev->dev.of_node;
u32 speed = 9600;
of_property_read_u32(node, "current-speed", &speed);
data->speed = speed;
return 0;
}
static int sirf_probe(struct serdev_device *serdev)
{
struct device *dev = &serdev->dev;
struct gnss_device *gdev;
struct sirf_data *data;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
gdev = gnss_allocate_device(dev);
if (!gdev)
return -ENOMEM;
gdev->type = GNSS_TYPE_SIRF;
gdev->ops = &sirf_gnss_ops;
gnss_set_drvdata(gdev, data);
data->serdev = serdev;
data->gdev = gdev;
mutex_init(&data->gdev_mutex);
mutex_init(&data->serdev_mutex);
init_waitqueue_head(&data->power_wait);
serdev_device_set_drvdata(serdev, data);
serdev_device_set_client_ops(serdev, &sirf_serdev_ops);
ret = sirf_parse_dt(serdev);
if (ret)
goto err_put_device;
data->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(data->vcc)) {
ret = PTR_ERR(data->vcc);
goto err_put_device;
}
data->lna = devm_regulator_get(dev, "lna");
if (IS_ERR(data->lna)) {
ret = PTR_ERR(data->lna);
goto err_put_device;
}
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
if (IS_ERR(data->on_off)) {
ret = PTR_ERR(data->on_off);
goto err_put_device;
}
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
if (IS_ERR(data->wakeup)) {
ret = PTR_ERR(data->wakeup);
goto err_put_device;
}
ret = regulator_enable(data->vcc);
if (ret)
goto err_put_device;
/* Wait for chip to boot into hibernate mode. */
msleep(SIRF_BOOT_DELAY);
}
if (data->wakeup) {
ret = gpiod_get_value_cansleep(data->wakeup);
if (ret < 0)
goto err_disable_vcc;
data->active = ret;
ret = gpiod_to_irq(data->wakeup);
if (ret < 0)
goto err_disable_vcc;
data->irq = ret;
ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"wakeup", data);
if (ret)
goto err_disable_vcc;
}
if (data->on_off) {
if (!data->wakeup) {
data->active = false;
ret = sirf_serdev_open(data);
if (ret)
goto err_disable_vcc;
msleep(SIRF_REPORT_CYCLE);
sirf_serdev_close(data);
}
/* Force hibernate mode if already active. */
if (data->active) {
ret = sirf_set_active(data, false);
if (ret) {
dev_err(dev, "failed to set hibernate mode: %d\n",
ret);
goto err_free_irq;
}
}
}
if (IS_ENABLED(CONFIG_PM)) {
pm_runtime_set_suspended(dev); /* clear runtime_error flag */
pm_runtime_enable(dev);
} else {
ret = sirf_runtime_resume(dev);
if (ret < 0)
goto err_free_irq;
}
ret = gnss_register_device(gdev);
if (ret)
goto err_disable_rpm;
return 0;
err_disable_rpm:
if (IS_ENABLED(CONFIG_PM))
pm_runtime_disable(dev);
else
sirf_runtime_suspend(dev);
err_free_irq:
if (data->wakeup)
free_irq(data->irq, data);
err_disable_vcc:
if (data->on_off)
regulator_disable(data->vcc);
err_put_device:
gnss_put_device(data->gdev);
return ret;
}
static void sirf_remove(struct serdev_device *serdev)
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
gnss_deregister_device(data->gdev);
if (IS_ENABLED(CONFIG_PM))
pm_runtime_disable(&serdev->dev);
else
sirf_runtime_suspend(&serdev->dev);
if (data->wakeup)
free_irq(data->irq, data);
if (data->on_off)
regulator_disable(data->vcc);
gnss_put_device(data->gdev);
}
#ifdef CONFIG_OF
static const struct of_device_id sirf_of_match[] = {
{ .compatible = "fastrax,uc430" },
{ .compatible = "linx,r4" },
{ .compatible = "wi2wi,w2sg0004" },
{ .compatible = "wi2wi,w2sg0008i" },
{ .compatible = "wi2wi,w2sg0084i" },
{},
};
MODULE_DEVICE_TABLE(of, sirf_of_match);
#endif
static struct serdev_device_driver sirf_driver = {
.driver = {
.name = "gnss-sirf",
.of_match_table = of_match_ptr(sirf_of_match),
.pm = &sirf_pm_ops,
},
.probe = sirf_probe,
.remove = sirf_remove,
};
module_serdev_device_driver(sirf_driver);
MODULE_AUTHOR("Johan Hovold <[email protected]>");
MODULE_DESCRIPTION("SiRFstar GNSS receiver driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gnss/sirf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mediatek GNSS receiver driver
*
* Copyright (C) 2018 Johan Hovold <[email protected]>
*/
#include <linux/errno.h>
#include <linux/gnss.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include "serial.h"
struct mtk_data {
struct regulator *vbackup;
struct regulator *vcc;
};
static int mtk_set_active(struct gnss_serial *gserial)
{
struct mtk_data *data = gnss_serial_get_drvdata(gserial);
int ret;
ret = regulator_enable(data->vcc);
if (ret)
return ret;
return 0;
}
static int mtk_set_standby(struct gnss_serial *gserial)
{
struct mtk_data *data = gnss_serial_get_drvdata(gserial);
int ret;
ret = regulator_disable(data->vcc);
if (ret)
return ret;
return 0;
}
static int mtk_set_power(struct gnss_serial *gserial,
enum gnss_serial_pm_state state)
{
switch (state) {
case GNSS_SERIAL_ACTIVE:
return mtk_set_active(gserial);
case GNSS_SERIAL_OFF:
case GNSS_SERIAL_STANDBY:
return mtk_set_standby(gserial);
}
return -EINVAL;
}
static const struct gnss_serial_ops mtk_gserial_ops = {
.set_power = mtk_set_power,
};
static int mtk_probe(struct serdev_device *serdev)
{
struct gnss_serial *gserial;
struct mtk_data *data;
int ret;
gserial = gnss_serial_allocate(serdev, sizeof(*data));
if (IS_ERR(gserial)) {
ret = PTR_ERR(gserial);
return ret;
}
gserial->ops = &mtk_gserial_ops;
gserial->gdev->type = GNSS_TYPE_MTK;
data = gnss_serial_get_drvdata(gserial);
data->vcc = devm_regulator_get(&serdev->dev, "vcc");
if (IS_ERR(data->vcc)) {
ret = PTR_ERR(data->vcc);
goto err_free_gserial;
}
data->vbackup = devm_regulator_get_optional(&serdev->dev, "vbackup");
if (IS_ERR(data->vbackup)) {
ret = PTR_ERR(data->vbackup);
if (ret == -ENODEV)
data->vbackup = NULL;
else
goto err_free_gserial;
}
if (data->vbackup) {
ret = regulator_enable(data->vbackup);
if (ret)
goto err_free_gserial;
}
ret = gnss_serial_register(gserial);
if (ret)
goto err_disable_vbackup;
return 0;
err_disable_vbackup:
if (data->vbackup)
regulator_disable(data->vbackup);
err_free_gserial:
gnss_serial_free(gserial);
return ret;
}
static void mtk_remove(struct serdev_device *serdev)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
struct mtk_data *data = gnss_serial_get_drvdata(gserial);
gnss_serial_deregister(gserial);
if (data->vbackup)
regulator_disable(data->vbackup);
gnss_serial_free(gserial);
}
#ifdef CONFIG_OF
static const struct of_device_id mtk_of_match[] = {
{ .compatible = "globaltop,pa6h" },
{},
};
MODULE_DEVICE_TABLE(of, mtk_of_match);
#endif
static struct serdev_device_driver mtk_driver = {
.driver = {
.name = "gnss-mtk",
.of_match_table = of_match_ptr(mtk_of_match),
.pm = &gnss_serial_pm_ops,
},
.probe = mtk_probe,
.remove = mtk_remove,
};
module_serdev_device_driver(mtk_driver);
MODULE_AUTHOR("Loys Ollivier <[email protected]>");
MODULE_DESCRIPTION("Mediatek GNSS receiver driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gnss/mtk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* GNSS receiver core
*
* Copyright (C) 2018 Johan Hovold <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cdev.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gnss.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#define GNSS_FLAG_HAS_WRITE_RAW BIT(0)
#define GNSS_MINORS 16
static DEFINE_IDA(gnss_minors);
static dev_t gnss_first;
/* FIFO size must be a power of two */
#define GNSS_READ_FIFO_SIZE 4096
#define GNSS_WRITE_BUF_SIZE 1024
#define to_gnss_device(d) container_of((d), struct gnss_device, dev)
static int gnss_open(struct inode *inode, struct file *file)
{
struct gnss_device *gdev;
int ret = 0;
gdev = container_of(inode->i_cdev, struct gnss_device, cdev);
get_device(&gdev->dev);
stream_open(inode, file);
file->private_data = gdev;
down_write(&gdev->rwsem);
if (gdev->disconnected) {
ret = -ENODEV;
goto unlock;
}
if (gdev->count++ == 0) {
ret = gdev->ops->open(gdev);
if (ret)
gdev->count--;
}
unlock:
up_write(&gdev->rwsem);
if (ret)
put_device(&gdev->dev);
return ret;
}
static int gnss_release(struct inode *inode, struct file *file)
{
struct gnss_device *gdev = file->private_data;
down_write(&gdev->rwsem);
if (gdev->disconnected)
goto unlock;
if (--gdev->count == 0) {
gdev->ops->close(gdev);
kfifo_reset(&gdev->read_fifo);
}
unlock:
up_write(&gdev->rwsem);
put_device(&gdev->dev);
return 0;
}
static ssize_t gnss_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct gnss_device *gdev = file->private_data;
unsigned int copied;
int ret;
mutex_lock(&gdev->read_mutex);
while (kfifo_is_empty(&gdev->read_fifo)) {
mutex_unlock(&gdev->read_mutex);
if (gdev->disconnected)
return 0;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(gdev->read_queue,
gdev->disconnected ||
!kfifo_is_empty(&gdev->read_fifo));
if (ret)
return -ERESTARTSYS;
mutex_lock(&gdev->read_mutex);
}
ret = kfifo_to_user(&gdev->read_fifo, buf, count, &copied);
if (ret == 0)
ret = copied;
mutex_unlock(&gdev->read_mutex);
return ret;
}
static ssize_t gnss_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct gnss_device *gdev = file->private_data;
size_t written = 0;
int ret;
if (gdev->disconnected)
return -EIO;
if (!count)
return 0;
if (!(gdev->flags & GNSS_FLAG_HAS_WRITE_RAW))
return -EIO;
/* Ignoring O_NONBLOCK, write_raw() is synchronous. */
ret = mutex_lock_interruptible(&gdev->write_mutex);
if (ret)
return -ERESTARTSYS;
for (;;) {
size_t n = count - written;
if (n > GNSS_WRITE_BUF_SIZE)
n = GNSS_WRITE_BUF_SIZE;
if (copy_from_user(gdev->write_buf, buf, n)) {
ret = -EFAULT;
goto out_unlock;
}
/*
* Assumes write_raw can always accept GNSS_WRITE_BUF_SIZE
* bytes.
*
* FIXME: revisit
*/
down_read(&gdev->rwsem);
if (!gdev->disconnected)
ret = gdev->ops->write_raw(gdev, gdev->write_buf, n);
else
ret = -EIO;
up_read(&gdev->rwsem);
if (ret < 0)
break;
written += ret;
buf += ret;
if (written == count)
break;
}
if (written)
ret = written;
out_unlock:
mutex_unlock(&gdev->write_mutex);
return ret;
}
static __poll_t gnss_poll(struct file *file, poll_table *wait)
{
struct gnss_device *gdev = file->private_data;
__poll_t mask = 0;
poll_wait(file, &gdev->read_queue, wait);
if (!kfifo_is_empty(&gdev->read_fifo))
mask |= EPOLLIN | EPOLLRDNORM;
if (gdev->disconnected)
mask |= EPOLLHUP;
return mask;
}
static const struct file_operations gnss_fops = {
.owner = THIS_MODULE,
.open = gnss_open,
.release = gnss_release,
.read = gnss_read,
.write = gnss_write,
.poll = gnss_poll,
.llseek = no_llseek,
};
static struct class *gnss_class;
static void gnss_device_release(struct device *dev)
{
struct gnss_device *gdev = to_gnss_device(dev);
kfree(gdev->write_buf);
kfifo_free(&gdev->read_fifo);
ida_free(&gnss_minors, gdev->id);
kfree(gdev);
}
struct gnss_device *gnss_allocate_device(struct device *parent)
{
struct gnss_device *gdev;
struct device *dev;
int id;
int ret;
gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
if (!gdev)
return NULL;
id = ida_alloc_max(&gnss_minors, GNSS_MINORS - 1, GFP_KERNEL);
if (id < 0) {
kfree(gdev);
return NULL;
}
gdev->id = id;
dev = &gdev->dev;
device_initialize(dev);
dev->devt = gnss_first + id;
dev->class = gnss_class;
dev->parent = parent;
dev->release = gnss_device_release;
dev_set_drvdata(dev, gdev);
dev_set_name(dev, "gnss%d", id);
init_rwsem(&gdev->rwsem);
mutex_init(&gdev->read_mutex);
mutex_init(&gdev->write_mutex);
init_waitqueue_head(&gdev->read_queue);
ret = kfifo_alloc(&gdev->read_fifo, GNSS_READ_FIFO_SIZE, GFP_KERNEL);
if (ret)
goto err_put_device;
gdev->write_buf = kzalloc(GNSS_WRITE_BUF_SIZE, GFP_KERNEL);
if (!gdev->write_buf)
goto err_put_device;
cdev_init(&gdev->cdev, &gnss_fops);
gdev->cdev.owner = THIS_MODULE;
return gdev;
err_put_device:
put_device(dev);
return NULL;
}
EXPORT_SYMBOL_GPL(gnss_allocate_device);
void gnss_put_device(struct gnss_device *gdev)
{
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gnss_put_device);
int gnss_register_device(struct gnss_device *gdev)
{
int ret;
/* Set a flag which can be accessed without holding the rwsem. */
if (gdev->ops->write_raw != NULL)
gdev->flags |= GNSS_FLAG_HAS_WRITE_RAW;
ret = cdev_device_add(&gdev->cdev, &gdev->dev);
if (ret) {
dev_err(&gdev->dev, "failed to add device: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(gnss_register_device);
void gnss_deregister_device(struct gnss_device *gdev)
{
down_write(&gdev->rwsem);
gdev->disconnected = true;
if (gdev->count) {
wake_up_interruptible(&gdev->read_queue);
gdev->ops->close(gdev);
}
up_write(&gdev->rwsem);
cdev_device_del(&gdev->cdev, &gdev->dev);
}
EXPORT_SYMBOL_GPL(gnss_deregister_device);
/*
* Caller guarantees serialisation.
*
* Must not be called for a closed device.
*/
int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
size_t count)
{
int ret;
ret = kfifo_in(&gdev->read_fifo, buf, count);
wake_up_interruptible(&gdev->read_queue);
return ret;
}
EXPORT_SYMBOL_GPL(gnss_insert_raw);
static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
[GNSS_TYPE_NMEA] = "NMEA",
[GNSS_TYPE_SIRF] = "SiRF",
[GNSS_TYPE_UBX] = "UBX",
[GNSS_TYPE_MTK] = "MTK",
};
static const char *gnss_type_name(const struct gnss_device *gdev)
{
const char *name = NULL;
if (gdev->type < GNSS_TYPE_COUNT)
name = gnss_type_names[gdev->type];
if (!name)
dev_WARN(&gdev->dev, "type name not defined\n");
return name;
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gnss_device *gdev = to_gnss_device(dev);
return sprintf(buf, "%s\n", gnss_type_name(gdev));
}
static DEVICE_ATTR_RO(type);
static struct attribute *gnss_attrs[] = {
&dev_attr_type.attr,
NULL,
};
ATTRIBUTE_GROUPS(gnss);
static int gnss_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct gnss_device *gdev = to_gnss_device(dev);
int ret;
ret = add_uevent_var(env, "GNSS_TYPE=%s", gnss_type_name(gdev));
if (ret)
return ret;
return 0;
}
static int __init gnss_module_init(void)
{
int ret;
ret = alloc_chrdev_region(&gnss_first, 0, GNSS_MINORS, "gnss");
if (ret < 0) {
pr_err("failed to allocate device numbers: %d\n", ret);
return ret;
}
gnss_class = class_create("gnss");
if (IS_ERR(gnss_class)) {
ret = PTR_ERR(gnss_class);
pr_err("failed to create class: %d\n", ret);
goto err_unregister_chrdev;
}
gnss_class->dev_groups = gnss_groups;
gnss_class->dev_uevent = gnss_uevent;
pr_info("GNSS driver registered with major %d\n", MAJOR(gnss_first));
return 0;
err_unregister_chrdev:
unregister_chrdev_region(gnss_first, GNSS_MINORS);
return ret;
}
module_init(gnss_module_init);
static void __exit gnss_module_exit(void)
{
class_destroy(gnss_class);
unregister_chrdev_region(gnss_first, GNSS_MINORS);
ida_destroy(&gnss_minors);
}
module_exit(gnss_module_exit);
MODULE_AUTHOR("Johan Hovold <[email protected]>");
MODULE_DESCRIPTION("GNSS receiver core");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gnss/core.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/ntb.h>
#include <linux/msi.h>
#include <linux/pci.h>
struct ntb_msi {
u64 base_addr;
u64 end_addr;
void (*desc_changed)(void *ctx);
u32 __iomem *peer_mws[];
};
/**
* ntb_msi_init() - Initialize the MSI context
* @ntb: NTB device context
*
* This function must be called before any other ntb_msi function.
* It initializes the context for MSI operations and maps
* the peer memory windows.
*
* This function reserves the last N outbound memory windows (where N
* is the number of peers).
*
* Return: Zero on success, otherwise a negative error number.
*/
int ntb_msi_init(struct ntb_dev *ntb,
void (*desc_changed)(void *ctx))
{
phys_addr_t mw_phys_addr;
resource_size_t mw_size;
int peer_widx;
int peers;
int ret;
int i;
peers = ntb_peer_port_count(ntb);
if (peers <= 0)
return -EINVAL;
ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers),
GFP_KERNEL);
if (!ntb->msi)
return -ENOMEM;
ntb->msi->desc_changed = desc_changed;
for (i = 0; i < peers; i++) {
peer_widx = ntb_peer_mw_count(ntb) - 1 - i;
ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr,
&mw_size);
if (ret)
goto unroll;
ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr,
mw_size);
if (!ntb->msi->peer_mws[i]) {
ret = -EFAULT;
goto unroll;
}
}
return 0;
unroll:
for (i = 0; i < peers; i++)
if (ntb->msi->peer_mws[i])
devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]);
devm_kfree(&ntb->dev, ntb->msi);
ntb->msi = NULL;
return ret;
}
EXPORT_SYMBOL(ntb_msi_init);
/**
* ntb_msi_setup_mws() - Initialize the MSI inbound memory windows
* @ntb: NTB device context
*
* This function sets up the required inbound memory windows. It should be
* called from a work function after a link up event.
*
* Over the entire network, this function will reserves the last N
* inbound memory windows for each peer (where N is the number of peers).
*
* ntb_msi_init() must be called before this function.
*
* Return: Zero on success, otherwise a negative error number.
*/
int ntb_msi_setup_mws(struct ntb_dev *ntb)
{
struct msi_desc *desc;
u64 addr;
int peer, peer_widx;
resource_size_t addr_align, size_align, size_max;
resource_size_t mw_size = SZ_32K;
resource_size_t mw_min_size = mw_size;
int i;
int ret;
if (!ntb->msi)
return -EINVAL;
msi_lock_descs(&ntb->pdev->dev);
desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
msi_unlock_descs(&ntb->pdev->dev);
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
if (peer_widx < 0)
return peer_widx;
ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align,
NULL, NULL);
if (ret)
return ret;
addr &= ~(addr_align - 1);
}
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
if (peer_widx < 0) {
ret = peer_widx;
goto error_out;
}
ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL,
&size_align, &size_max);
if (ret)
goto error_out;
mw_size = round_up(mw_size, size_align);
mw_size = max(mw_size, size_max);
if (mw_size < mw_min_size)
mw_min_size = mw_size;
ret = ntb_mw_set_trans(ntb, peer, peer_widx,
addr, mw_size);
if (ret)
goto error_out;
}
ntb->msi->base_addr = addr;
ntb->msi->end_addr = addr + mw_min_size;
return 0;
error_out:
for (i = 0; i < peer; i++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
if (peer_widx < 0)
continue;
ntb_mw_clear_trans(ntb, i, peer_widx);
}
return ret;
}
EXPORT_SYMBOL(ntb_msi_setup_mws);
/**
* ntb_msi_clear_mws() - Clear all inbound memory windows
* @ntb: NTB device context
*
* This function tears down the resources used by ntb_msi_setup_mws().
*/
void ntb_msi_clear_mws(struct ntb_dev *ntb)
{
int peer;
int peer_widx;
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
if (peer_widx < 0)
continue;
ntb_mw_clear_trans(ntb, peer, peer_widx);
}
}
EXPORT_SYMBOL(ntb_msi_clear_mws);
struct ntb_msi_devres {
struct ntb_dev *ntb;
struct msi_desc *entry;
struct ntb_msi_desc *msi_desc;
};
static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry,
struct ntb_msi_desc *msi_desc)
{
u64 addr;
addr = entry->msg.address_lo +
((uint64_t)entry->msg.address_hi << 32);
if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) {
dev_warn_once(&ntb->dev,
"IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n",
entry->irq, addr, ntb->msi->base_addr,
ntb->msi->end_addr);
return -EFAULT;
}
msi_desc->addr_offset = addr - ntb->msi->base_addr;
msi_desc->data = entry->msg.data;
return 0;
}
static void ntb_msi_write_msg(struct msi_desc *entry, void *data)
{
struct ntb_msi_devres *dr = data;
WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc));
if (dr->ntb->msi->desc_changed)
dr->ntb->msi->desc_changed(dr->ntb->ctx);
}
static void ntbm_msi_callback_release(struct device *dev, void *res)
{
struct ntb_msi_devres *dr = res;
dr->entry->write_msi_msg = NULL;
dr->entry->write_msi_msg_data = NULL;
}
static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry,
struct ntb_msi_desc *msi_desc)
{
struct ntb_msi_devres *dr;
dr = devres_alloc(ntbm_msi_callback_release,
sizeof(struct ntb_msi_devres), GFP_KERNEL);
if (!dr)
return -ENOMEM;
dr->ntb = ntb;
dr->entry = entry;
dr->msi_desc = msi_desc;
devres_add(&ntb->dev, dr);
dr->entry->write_msi_msg = ntb_msi_write_msg;
dr->entry->write_msi_msg_data = dr;
return 0;
}
/**
* ntbm_msi_request_threaded_irq() - allocate an MSI interrupt
* @ntb: NTB device context
* @handler: Function to be called when the IRQ occurs
* @thread_fn: Function to be called in a threaded interrupt context. NULL
* for clients which handle everything in @handler
* @name: An ascii name for the claiming device, dev_name(dev) if NULL
* @dev_id: A cookie passed back to the handler function
* @msi_desc: MSI descriptor data which triggers the interrupt
*
* This function assigns an interrupt handler to an unused
* MSI interrupt and returns the descriptor used to trigger
* it. The descriptor can then be sent to a peer to trigger
* the interrupt.
*
* The interrupt resource is managed with devres so it will
* be automatically freed when the NTB device is torn down.
*
* If an IRQ allocated with this function needs to be freed
* separately, ntbm_free_irq() must be used.
*
* Return: IRQ number assigned on success, otherwise a negative error number.
*/
int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
irq_handler_t thread_fn,
const char *name, void *dev_id,
struct ntb_msi_desc *msi_desc)
{
struct device *dev = &ntb->pdev->dev;
struct msi_desc *entry;
int ret;
if (!ntb->msi)
return -EINVAL;
msi_lock_descs(dev);
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler,
thread_fn, 0, name, dev_id);
if (ret)
continue;
if (ntb_msi_set_desc(ntb, entry, msi_desc)) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
continue;
}
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
goto unlock;
}
ret = entry->irq;
goto unlock;
}
ret = -ENODEV;
unlock:
msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
static int ntbm_msi_callback_match(struct device *dev, void *res, void *data)
{
struct ntb_dev *ntb = dev_ntb(dev);
struct ntb_msi_devres *dr = res;
return dr->ntb == ntb && dr->entry == data;
}
/**
* ntbm_msi_free_irq() - free an interrupt
* @ntb: NTB device context
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* This function should be used to manually free IRQs allocated with
* ntbm_request_[threaded_]irq().
*/
void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
entry->write_msi_msg = NULL;
entry->write_msi_msg_data = NULL;
WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release,
ntbm_msi_callback_match, entry));
devm_free_irq(&ntb->dev, irq, dev_id);
}
EXPORT_SYMBOL(ntbm_msi_free_irq);
/**
* ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer
* @ntb: NTB device context
* @peer: Peer index
* @desc: MSI descriptor data which triggers the interrupt
*
* This function triggers an interrupt on a peer. It requires
* the descriptor structure to have been passed from that peer
* by some other means.
*
* Return: Zero on success, otherwise a negative error number.
*/
int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
struct ntb_msi_desc *desc)
{
int idx;
if (!ntb->msi)
return -EINVAL;
idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]);
iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]);
return 0;
}
EXPORT_SYMBOL(ntb_msi_peer_trigger);
/**
* ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt
* @ntb: NTB device context
* @peer: Peer index
* @desc: MSI descriptor data which triggers the interrupt
* @msi_addr: Physical address to trigger the interrupt
*
* This function allows using DMA engines to trigger an interrupt
* (for example, trigger an interrupt to process the data after
* sending it). To trigger the interrupt, write @desc.data to the address
* returned in @msi_addr
*
* Return: Zero on success, otherwise a negative error number.
*/
int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
struct ntb_msi_desc *desc,
phys_addr_t *msi_addr)
{
int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer;
phys_addr_t mw_phys_addr;
int ret;
ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL);
if (ret)
return ret;
if (msi_addr)
*msi_addr = mw_phys_addr + desc->addr_offset;
return 0;
}
EXPORT_SYMBOL(ntb_msi_peer_addr);
| linux-master | drivers/ntb/msi.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* PCIe NTB Transport Linux driver
*
* Contact Information:
* Jon Mason <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
#define NTB_TRANSPORT_VERSION 4
#define NTB_TRANSPORT_VER "4"
#define NTB_TRANSPORT_NAME "ntb_transport"
#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2)
MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
static unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
static unsigned int transport_mtu = 0x10000;
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
static unsigned char max_num_clients;
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
static bool use_dma;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
static bool use_msi;
#ifdef CONFIG_NTB_MSI
module_param(use_msi, bool, 0644);
MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells");
#endif
static struct dentry *nt_debugfs_dir;
/* Only two-ports NTB devices are supported */
#define PIDX NTB_DEF_PEER_IDX
struct ntb_queue_entry {
/* ntb_queue list reference */
struct list_head entry;
/* pointers to data to be transferred */
void *cb_data;
void *buf;
unsigned int len;
unsigned int flags;
int retries;
int errors;
unsigned int tx_index;
unsigned int rx_index;
struct ntb_transport_qp *qp;
union {
struct ntb_payload_header __iomem *tx_hdr;
struct ntb_payload_header *rx_hdr;
};
};
struct ntb_rx_info {
unsigned int entry;
};
struct ntb_transport_qp {
struct ntb_transport_ctx *transport;
struct ntb_dev *ndev;
void *cb_data;
struct dma_chan *tx_dma_chan;
struct dma_chan *rx_dma_chan;
bool client_ready;
bool link_is_up;
bool active;
u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
u64 qp_bit;
struct ntb_rx_info __iomem *rx_info;
struct ntb_rx_info *remote_rx_info;
void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
struct list_head tx_free_q;
spinlock_t ntb_tx_free_q_lock;
void __iomem *tx_mw;
phys_addr_t tx_mw_phys;
size_t tx_mw_size;
dma_addr_t tx_mw_dma_addr;
unsigned int tx_index;
unsigned int tx_max_entry;
unsigned int tx_max_frame;
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
struct list_head rx_post_q;
struct list_head rx_pend_q;
struct list_head rx_free_q;
/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
spinlock_t ntb_rx_q_lock;
void *rx_buff;
unsigned int rx_index;
unsigned int rx_max_entry;
unsigned int rx_max_frame;
unsigned int rx_alloc_entry;
dma_cookie_t last_cookie;
struct tasklet_struct rxc_db_work;
void (*event_handler)(void *data, int status);
struct delayed_work link_work;
struct work_struct link_cleanup;
struct dentry *debugfs_dir;
struct dentry *debugfs_stats;
/* Stats */
u64 rx_bytes;
u64 rx_pkts;
u64 rx_ring_empty;
u64 rx_err_no_buf;
u64 rx_err_oflow;
u64 rx_err_ver;
u64 rx_memcpy;
u64 rx_async;
u64 tx_bytes;
u64 tx_pkts;
u64 tx_ring_full;
u64 tx_err_no_buf;
u64 tx_memcpy;
u64 tx_async;
bool use_msi;
int msi_irq;
struct ntb_msi_desc msi_desc;
struct ntb_msi_desc peer_msi_desc;
};
struct ntb_transport_mw {
phys_addr_t phys_addr;
resource_size_t phys_size;
void __iomem *vbase;
size_t xlat_size;
size_t buff_size;
size_t alloc_size;
void *alloc_addr;
void *virt_addr;
dma_addr_t dma_addr;
};
struct ntb_transport_client_dev {
struct list_head entry;
struct ntb_transport_ctx *nt;
struct device dev;
};
struct ntb_transport_ctx {
struct list_head entry;
struct list_head client_devs;
struct ntb_dev *ndev;
struct ntb_transport_mw *mw_vec;
struct ntb_transport_qp *qp_vec;
unsigned int mw_count;
unsigned int qp_count;
u64 qp_bitmap;
u64 qp_bitmap_free;
bool use_msi;
unsigned int msi_spad_offset;
u64 msi_db_mask;
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
struct dentry *debugfs_node_dir;
};
enum {
DESC_DONE_FLAG = BIT(0),
LINK_DOWN_FLAG = BIT(1),
};
struct ntb_payload_header {
unsigned int ver;
unsigned int len;
unsigned int flags;
};
enum {
VERSION = 0,
QP_LINKS,
NUM_QPS,
NUM_MWS,
MW0_SZ_HIGH,
MW0_SZ_LOW,
};
#define dev_client_dev(__dev) \
container_of((__dev), struct ntb_transport_client_dev, dev)
#define drv_client(__drv) \
container_of((__drv), struct ntb_transport_client, driver)
#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;
static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry);
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv)
{
return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}
static int ntb_transport_bus_probe(struct device *dev)
{
const struct ntb_transport_client *client;
int rc;
get_device(dev);
client = drv_client(dev->driver);
rc = client->probe(dev);
if (rc)
put_device(dev);
return rc;
}
static void ntb_transport_bus_remove(struct device *dev)
{
const struct ntb_transport_client *client;
client = drv_client(dev->driver);
client->remove(dev);
put_device(dev);
}
static struct bus_type ntb_transport_bus = {
.name = "ntb_transport",
.match = ntb_transport_bus_match,
.probe = ntb_transport_bus_probe,
.remove = ntb_transport_bus_remove,
};
static LIST_HEAD(ntb_transport_list);
static int ntb_bus_init(struct ntb_transport_ctx *nt)
{
list_add_tail(&nt->entry, &ntb_transport_list);
return 0;
}
static void ntb_bus_remove(struct ntb_transport_ctx *nt)
{
struct ntb_transport_client_dev *client_dev, *cd;
list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
dev_name(&client_dev->dev));
list_del(&client_dev->entry);
device_unregister(&client_dev->dev);
}
list_del(&nt->entry);
}
static void ntb_transport_client_release(struct device *dev)
{
struct ntb_transport_client_dev *client_dev;
client_dev = dev_client_dev(dev);
kfree(client_dev);
}
/**
* ntb_transport_unregister_client_dev - Unregister NTB client device
* @device_name: Name of NTB client device
*
* Unregister an NTB client device with the NTB transport layer
*/
void ntb_transport_unregister_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client, *cd;
struct ntb_transport_ctx *nt;
list_for_each_entry(nt, &ntb_transport_list, entry)
list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
if (!strncmp(dev_name(&client->dev), device_name,
strlen(device_name))) {
list_del(&client->entry);
device_unregister(&client->dev);
}
}
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
/**
* ntb_transport_register_client_dev - Register NTB client device
* @device_name: Name of NTB client device
*
* Register an NTB client device with the NTB transport layer
*/
int ntb_transport_register_client_dev(char *device_name)
{
struct ntb_transport_client_dev *client_dev;
struct ntb_transport_ctx *nt;
int node;
int rc, i = 0;
if (list_empty(&ntb_transport_list))
return -ENODEV;
list_for_each_entry(nt, &ntb_transport_list, entry) {
struct device *dev;
node = dev_to_node(&nt->ndev->dev);
client_dev = kzalloc_node(sizeof(*client_dev),
GFP_KERNEL, node);
if (!client_dev) {
rc = -ENOMEM;
goto err;
}
dev = &client_dev->dev;
/* setup and register client devices */
dev_set_name(dev, "%s%d", device_name, i);
dev->bus = &ntb_transport_bus;
dev->release = ntb_transport_client_release;
dev->parent = &nt->ndev->dev;
rc = device_register(dev);
if (rc) {
put_device(dev);
goto err;
}
list_add_tail(&client_dev->entry, &nt->client_devs);
i++;
}
return 0;
err:
ntb_transport_unregister_client_dev(device_name);
return rc;
}
EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
/**
* ntb_transport_register_client - Register NTB client driver
* @drv: NTB client driver to be registered
*
* Register an NTB client driver with the NTB transport layer
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int ntb_transport_register_client(struct ntb_transport_client *drv)
{
drv->driver.bus = &ntb_transport_bus;
if (list_empty(&ntb_transport_list))
return -ENODEV;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(ntb_transport_register_client);
/**
* ntb_transport_unregister_client - Unregister NTB client driver
* @drv: NTB client driver to be unregistered
*
* Unregister an NTB client driver with the NTB transport layer
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
void ntb_transport_unregister_client(struct ntb_transport_client *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
loff_t *offp)
{
struct ntb_transport_qp *qp;
char *buf;
ssize_t ret, out_offset, out_count;
qp = filp->private_data;
if (!qp || !qp->link_is_up)
return 0;
out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
out_offset = 0;
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"\nNTB QP stats:\n\n");
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_bytes - \t%llu\n", qp->rx_bytes);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_pkts - \t%llu\n", qp->rx_pkts);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_memcpy - \t%llu\n", qp->rx_memcpy);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_async - \t%llu\n", qp->rx_async);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_ring_empty - %llu\n", qp->rx_ring_empty);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_err_ver - \t%llu\n", qp->rx_err_ver);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_buff - \t0x%p\n", qp->rx_buff);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_index - \t%u\n", qp->rx_index);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_max_entry - \t%u\n", qp->rx_max_entry);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_bytes - \t%llu\n", qp->tx_bytes);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_pkts - \t%llu\n", qp->tx_pkts);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_memcpy - \t%llu\n", qp->tx_memcpy);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_async - \t%llu\n", qp->tx_async);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_ring_full - \t%llu\n", qp->tx_ring_full);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_mw - \t0x%p\n", qp->tx_mw);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_index (H) - \t%u\n", qp->tx_index);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"RRI (T) - \t%u\n",
qp->remote_rx_info->entry);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"tx_max_entry - \t%u\n", qp->tx_max_entry);
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"free tx - \t%u\n",
ntb_transport_tx_free_entry(qp));
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"\n");
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Using TX DMA - \t%s\n",
qp->tx_dma_chan ? "Yes" : "No");
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Using RX DMA - \t%s\n",
qp->rx_dma_chan ? "Yes" : "No");
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"QP Link - \t%s\n",
qp->link_is_up ? "Up" : "Down");
out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"\n");
if (out_offset > out_count)
out_offset = out_count;
ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
kfree(buf);
return ret;
}
static const struct file_operations ntb_qp_debugfs_stats = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_read,
};
static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
struct list_head *list)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
list_add_tail(entry, list);
spin_unlock_irqrestore(lock, flags);
}
static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
struct list_head *list)
{
struct ntb_queue_entry *entry;
unsigned long flags;
spin_lock_irqsave(lock, flags);
if (list_empty(list)) {
entry = NULL;
goto out;
}
entry = list_first_entry(list, struct ntb_queue_entry, entry);
list_del(&entry->entry);
out:
spin_unlock_irqrestore(lock, flags);
return entry;
}
static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
struct list_head *list,
struct list_head *to_list)
{
struct ntb_queue_entry *entry;
unsigned long flags;
spin_lock_irqsave(lock, flags);
if (list_empty(list)) {
entry = NULL;
} else {
entry = list_first_entry(list, struct ntb_queue_entry, entry);
list_move_tail(&entry->entry, to_list);
}
spin_unlock_irqrestore(lock, flags);
return entry;
}
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
struct ntb_transport_mw *mw;
struct ntb_dev *ndev = nt->ndev;
struct ntb_queue_entry *entry;
unsigned int rx_size, num_qps_mw;
unsigned int mw_num, mw_count, qp_count;
unsigned int i;
int node;
mw_count = nt->mw_count;
qp_count = nt->qp_count;
mw_num = QP_TO_MW(nt, qp_num);
mw = &nt->mw_vec[mw_num];
if (!mw->virt_addr)
return -ENOMEM;
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
rx_size -= sizeof(struct ntb_rx_info);
qp->remote_rx_info = qp->rx_buff + rx_size;
/* Due to housekeeping, there must be atleast 2 buffs */
qp->rx_max_frame = min(transport_mtu, rx_size / 2);
qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0;
/*
* Checking to see if we have more entries than the default.
* We should add additional entries if that is the case so we
* can be in sync with the transport frames.
*/
node = dev_to_node(&ndev->dev);
for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
if (!entry)
return -ENOMEM;
entry->qp = qp;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
qp->rx_alloc_entry++;
}
qp->remote_rx_info->entry = qp->rx_max_entry - 1;
/* setup the hdr offsets with 0's */
for (i = 0; i < qp->rx_max_entry; i++) {
void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
sizeof(struct ntb_payload_header));
memset(offset, 0, sizeof(struct ntb_payload_header));
}
qp->rx_pkts = 0;
qp->tx_pkts = 0;
qp->tx_index = 0;
return 0;
}
static irqreturn_t ntb_transport_isr(int irq, void *dev)
{
struct ntb_transport_qp *qp = dev;
tasklet_schedule(&qp->rxc_db_work);
return IRQ_HANDLED;
}
static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
int spad = qp_num * 2 + nt->msi_spad_offset;
if (!nt->use_msi)
return;
if (spad >= ntb_spad_count(nt->ndev))
return;
qp->peer_msi_desc.addr_offset =
ntb_peer_spad_read(qp->ndev, PIDX, spad);
qp->peer_msi_desc.data =
ntb_peer_spad_read(qp->ndev, PIDX, spad + 1);
dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n",
qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data);
if (qp->peer_msi_desc.addr_offset) {
qp->use_msi = true;
dev_info(&qp->ndev->pdev->dev,
"Using MSI interrupts for QP%d\n", qp_num);
}
}
static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
int spad = qp_num * 2 + nt->msi_spad_offset;
int rc;
if (!nt->use_msi)
return;
if (spad >= ntb_spad_count(nt->ndev)) {
dev_warn_once(&qp->ndev->pdev->dev,
"Not enough SPADS to use MSI interrupts\n");
return;
}
ntb_spad_write(qp->ndev, spad, 0);
ntb_spad_write(qp->ndev, spad + 1, 0);
if (!qp->msi_irq) {
qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr,
KBUILD_MODNAME, qp,
&qp->msi_desc);
if (qp->msi_irq < 0) {
dev_warn(&qp->ndev->pdev->dev,
"Unable to allocate MSI interrupt for qp%d\n",
qp_num);
return;
}
}
rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset);
if (rc)
goto err_free_interrupt;
rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data);
if (rc)
goto err_free_interrupt;
dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n",
qp_num, qp->msi_irq, qp->msi_desc.addr_offset,
qp->msi_desc.data);
return;
err_free_interrupt:
devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp);
}
static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt)
{
int i;
dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed");
for (i = 0; i < nt->qp_count; i++)
ntb_transport_setup_qp_peer_msi(nt, i);
}
static void ntb_transport_msi_desc_changed(void *data)
{
struct ntb_transport_ctx *nt = data;
int i;
dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed");
for (i = 0; i < nt->qp_count; i++)
ntb_transport_setup_qp_msi(nt, i);
ntb_peer_db_set(nt->ndev, nt->msi_db_mask);
}
static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
if (!mw->virt_addr)
return;
ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
dma_free_coherent(&pdev->dev, mw->alloc_size,
mw->alloc_addr, mw->dma_addr);
mw->xlat_size = 0;
mw->buff_size = 0;
mw->alloc_size = 0;
mw->alloc_addr = NULL;
mw->virt_addr = NULL;
}
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
struct device *dma_dev, size_t align)
{
dma_addr_t dma_addr;
void *alloc_addr, *virt_addr;
int rc;
alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
&dma_addr, GFP_KERNEL);
if (!alloc_addr) {
dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
mw->alloc_size);
return -ENOMEM;
}
virt_addr = alloc_addr;
/*
* we must ensure that the memory address allocated is BAR size
* aligned in order for the XLAT register to take the value. This
* is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB.
*/
if (!IS_ALIGNED(dma_addr, align)) {
if (mw->alloc_size > mw->buff_size) {
virt_addr = PTR_ALIGN(alloc_addr, align);
dma_addr = ALIGN(dma_addr, align);
} else {
rc = -ENOMEM;
goto err;
}
}
mw->alloc_addr = alloc_addr;
mw->virt_addr = virt_addr;
mw->dma_addr = dma_addr;
return 0;
err:
dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc;
}
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
resource_size_t size)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
size_t xlat_size, buff_size;
resource_size_t xlat_align;
resource_size_t xlat_align_size;
int rc;
if (!size)
return -EINVAL;
rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
&xlat_align_size, NULL);
if (rc)
return rc;
xlat_size = round_up(size, xlat_align_size);
buff_size = round_up(size, xlat_align);
/* No need to re-setup */
if (mw->xlat_size == xlat_size)
return 0;
if (mw->buff_size)
ntb_free_mw(nt, num_mw);
/* Alloc memory for receiving data. Must be aligned */
mw->xlat_size = xlat_size;
mw->buff_size = buff_size;
mw->alloc_size = buff_size;
rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align);
if (rc) {
mw->alloc_size *= 2;
rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align);
if (rc) {
dev_err(&pdev->dev,
"Unable to alloc aligned MW buff\n");
mw->xlat_size = 0;
mw->buff_size = 0;
mw->alloc_size = 0;
return rc;
}
}
/* Notify HW the memory location of the receive buffer */
rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr,
mw->xlat_size);
if (rc) {
dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
ntb_free_mw(nt, num_mw);
return -EIO;
}
return 0;
}
static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
{
qp->link_is_up = false;
qp->active = false;
qp->tx_index = 0;
qp->rx_index = 0;
qp->rx_bytes = 0;
qp->rx_pkts = 0;
qp->rx_ring_empty = 0;
qp->rx_err_no_buf = 0;
qp->rx_err_oflow = 0;
qp->rx_err_ver = 0;
qp->rx_memcpy = 0;
qp->rx_async = 0;
qp->tx_bytes = 0;
qp->tx_pkts = 0;
qp->tx_ring_full = 0;
qp->tx_err_no_buf = 0;
qp->tx_memcpy = 0;
qp->tx_async = 0;
}
static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
{
ntb_qp_link_context_reset(qp);
if (qp->remote_rx_info)
qp->remote_rx_info->entry = qp->rx_max_entry - 1;
}
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev = nt->ndev->pdev;
dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
cancel_delayed_work_sync(&qp->link_work);
ntb_qp_link_down_reset(qp);
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
}
static void ntb_qp_link_cleanup_work(struct work_struct *work)
{
struct ntb_transport_qp *qp = container_of(work,
struct ntb_transport_qp,
link_cleanup);
struct ntb_transport_ctx *nt = qp->transport;
ntb_qp_link_cleanup(qp);
if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
static void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
schedule_work(&qp->link_cleanup);
}
static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{
struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc;
unsigned int i, count;
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
/* Pass along the info to any clients */
for (i = 0; i < nt->qp_count; i++)
if (qp_bitmap_alloc & BIT_ULL(i)) {
qp = &nt->qp_vec[i];
ntb_qp_link_cleanup(qp);
cancel_work_sync(&qp->link_cleanup);
cancel_delayed_work_sync(&qp->link_work);
}
if (!nt->link_is_up)
cancel_delayed_work_sync(&nt->link_work);
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
/* The scratchpad registers keep the values if the remote side
* goes down, blast them now to give them a sane value the next
* time they are accessed
*/
count = ntb_spad_count(nt->ndev);
for (i = 0; i < count; i++)
ntb_spad_write(nt->ndev, i, 0);
}
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
struct ntb_transport_ctx *nt =
container_of(work, struct ntb_transport_ctx, link_cleanup);
ntb_transport_link_cleanup(nt);
}
static void ntb_transport_event_callback(void *data)
{
struct ntb_transport_ctx *nt = data;
if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work, 0);
else
schedule_work(&nt->link_cleanup);
}
static void ntb_transport_link_work(struct work_struct *work)
{
struct ntb_transport_ctx *nt =
container_of(work, struct ntb_transport_ctx, link_work.work);
struct ntb_dev *ndev = nt->ndev;
struct pci_dev *pdev = ndev->pdev;
resource_size_t size;
u32 val;
int rc = 0, i, spad;
/* send the local info, in the opposite order of the way we read it */
if (nt->use_msi) {
rc = ntb_msi_setup_mws(ndev);
if (rc) {
dev_warn(&pdev->dev,
"Failed to register MSI memory window: %d\n",
rc);
nt->use_msi = false;
}
}
for (i = 0; i < nt->qp_count; i++)
ntb_transport_setup_qp_msi(nt, i);
for (i = 0; i < nt->mw_count; i++) {
size = nt->mw_vec[i].phys_size;
if (max_mw_size && size > max_mw_size)
size = max_mw_size;
spad = MW0_SZ_HIGH + (i * 2);
ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size));
spad = MW0_SZ_LOW + (i * 2);
ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size));
}
ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count);
ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count);
ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION);
/* Query the remote side for its info */
val = ntb_spad_read(ndev, VERSION);
dev_dbg(&pdev->dev, "Remote version = %d\n", val);
if (val != NTB_TRANSPORT_VERSION)
goto out;
val = ntb_spad_read(ndev, NUM_QPS);
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
if (val != nt->qp_count)
goto out;
val = ntb_spad_read(ndev, NUM_MWS);
dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
if (val != nt->mw_count)
goto out;
for (i = 0; i < nt->mw_count; i++) {
u64 val64;
val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
val64 = (u64)val << 32;
val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
val64 |= val;
dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
rc = ntb_set_mw(nt, i, val64);
if (rc)
goto out1;
}
nt->link_is_up = true;
for (i = 0; i < nt->qp_count; i++) {
struct ntb_transport_qp *qp = &nt->qp_vec[i];
ntb_transport_setup_qp_mw(nt, i);
ntb_transport_setup_qp_peer_msi(nt, i);
if (qp->client_ready)
schedule_delayed_work(&qp->link_work, 0);
}
return;
out1:
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
/* if there's an actual failure, we should just bail */
if (rc < 0)
return;
out:
if (ntb_link_is_up(ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
static void ntb_qp_link_work(struct work_struct *work)
{
struct ntb_transport_qp *qp = container_of(work,
struct ntb_transport_qp,
link_work.work);
struct pci_dev *pdev = qp->ndev->pdev;
struct ntb_transport_ctx *nt = qp->transport;
int val;
WARN_ON(!nt->link_is_up);
val = ntb_spad_read(nt->ndev, QP_LINKS);
ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num));
/* query remote spad for qp ready bits */
dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
/* See if the remote side is up */
if (val & BIT(qp->qp_num)) {
dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
qp->link_is_up = true;
qp->active = true;
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
if (qp->active)
tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp;
phys_addr_t mw_base;
resource_size_t mw_size;
unsigned int num_qps_mw, tx_size;
unsigned int mw_num, mw_count, qp_count;
u64 qp_offset;
mw_count = nt->mw_count;
qp_count = nt->qp_count;
mw_num = QP_TO_MW(nt, qp_num);
qp = &nt->qp_vec[qp_num];
qp->qp_num = qp_num;
qp->transport = nt;
qp->ndev = nt->ndev;
qp->client_ready = false;
qp->event_handler = NULL;
ntb_qp_link_context_reset(qp);
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
mw_base = nt->mw_vec[mw_num].phys_addr;
mw_size = nt->mw_vec[mw_num].phys_size;
if (max_mw_size && mw_size > max_mw_size)
mw_size = max_mw_size;
tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * (qp_num / mw_count);
qp->tx_mw_size = tx_size;
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw)
return -EINVAL;
qp->tx_mw_phys = mw_base + qp_offset;
if (!qp->tx_mw_phys)
return -EINVAL;
tx_size -= sizeof(struct ntb_rx_info);
qp->rx_info = qp->tx_mw + tx_size;
/* Due to housekeeping, there must be atleast 2 buffs */
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
if (nt->debugfs_node_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
nt->debugfs_node_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
&ntb_qp_debugfs_stats);
} else {
qp->debugfs_dir = NULL;
qp->debugfs_stats = NULL;
}
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
spin_lock_init(&qp->ntb_rx_q_lock);
spin_lock_init(&qp->ntb_tx_free_q_lock);
INIT_LIST_HEAD(&qp->rx_post_q);
INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
(unsigned long)qp);
return 0;
}
static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport_ctx *nt;
struct ntb_transport_mw *mw;
unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads;
u64 qp_bitmap;
int node;
int rc, i;
mw_count = ntb_peer_mw_count(ndev);
if (!ndev->ops->mw_set_trans) {
dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
return -EINVAL;
}
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
"doorbell is unsafe, proceed anyway...\n");
if (ntb_spad_is_unsafe(ndev))
dev_dbg(&ndev->dev,
"scratchpad is unsafe, proceed anyway...\n");
if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT)
dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n");
node = dev_to_node(&ndev->dev);
nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
if (!nt)
return -ENOMEM;
nt->ndev = ndev;
/*
* If we are using MSI, and have at least one extra memory window,
* we will reserve the last MW for the MSI window.
*/
if (use_msi && mw_count > 1) {
rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed);
if (!rc) {
mw_count -= 1;
nt->use_msi = true;
}
}
spad_count = ntb_spad_count(ndev);
/* Limit the MW's based on the availability of scratchpads */
if (spad_count < NTB_TRANSPORT_MIN_SPADS) {
nt->mw_count = 0;
rc = -EINVAL;
goto err;
}
max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
nt->mw_count = min(mw_count, max_mw_count_for_spads);
nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH;
nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
GFP_KERNEL, node);
if (!nt->mw_vec) {
rc = -ENOMEM;
goto err;
}
for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i];
rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
&mw->phys_size);
if (rc)
goto err1;
mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
if (!mw->vbase) {
rc = -ENOMEM;
goto err1;
}
mw->buff_size = 0;
mw->xlat_size = 0;
mw->virt_addr = NULL;
mw->dma_addr = 0;
}
qp_bitmap = ntb_db_valid_mask(ndev);
qp_count = ilog2(qp_bitmap);
if (nt->use_msi) {
qp_count -= 1;
nt->msi_db_mask = 1 << qp_count;
ntb_db_clear_mask(ndev, nt->msi_db_mask);
}
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
else if (nt->mw_count < qp_count)
qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;
nt->qp_count = qp_count;
nt->qp_bitmap = qp_bitmap;
nt->qp_bitmap_free = qp_bitmap;
nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec),
GFP_KERNEL, node);
if (!nt->qp_vec) {
rc = -ENOMEM;
goto err1;
}
if (nt_debugfs_dir) {
nt->debugfs_node_dir =
debugfs_create_dir(pci_name(ndev->pdev),
nt_debugfs_dir);
}
for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
goto err2;
}
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
if (rc)
goto err2;
INIT_LIST_HEAD(&nt->client_devs);
rc = ntb_bus_init(nt);
if (rc)
goto err3;
nt->link_is_up = false;
ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
ntb_link_event(ndev);
return 0;
err3:
ntb_clear_ctx(ndev);
err2:
kfree(nt->qp_vec);
err1:
while (i--) {
mw = &nt->mw_vec[i];
iounmap(mw->vbase);
}
kfree(nt->mw_vec);
err:
kfree(nt);
return rc;
}
static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport_ctx *nt = ndev->ctx;
struct ntb_transport_qp *qp;
u64 qp_bitmap_alloc;
int i;
ntb_transport_link_cleanup(nt);
cancel_work_sync(&nt->link_cleanup);
cancel_delayed_work_sync(&nt->link_work);
qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
/* verify that all the qp's are freed */
for (i = 0; i < nt->qp_count; i++) {
qp = &nt->qp_vec[i];
if (qp_bitmap_alloc & BIT_ULL(i))
ntb_transport_free_queue(qp);
debugfs_remove_recursive(qp->debugfs_dir);
}
ntb_link_disable(ndev);
ntb_clear_ctx(ndev);
ntb_bus_remove(nt);
for (i = nt->mw_count; i--; ) {
ntb_free_mw(nt, i);
iounmap(nt->mw_vec[i].vbase);
}
kfree(nt->qp_vec);
kfree(nt->mw_vec);
kfree(nt);
}
static void ntb_complete_rxc(struct ntb_transport_qp *qp)
{
struct ntb_queue_entry *entry;
void *cb_data;
unsigned int len;
unsigned long irqflags;
spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
while (!list_empty(&qp->rx_post_q)) {
entry = list_first_entry(&qp->rx_post_q,
struct ntb_queue_entry, entry);
if (!(entry->flags & DESC_DONE_FLAG))
break;
entry->rx_hdr->flags = 0;
iowrite32(entry->rx_index, &qp->rx_info->entry);
cb_data = entry->cb_data;
len = entry->len;
list_move_tail(&entry->entry, &qp->rx_free_q);
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
if (qp->rx_handler && qp->client_ready)
qp->rx_handler(qp, qp->cb_data, cb_data, len);
spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
}
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
}
static void ntb_rx_copy_callback(void *data,
const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
/* we need to check DMA results if we are using DMA */
if (res) {
enum dmaengine_tx_result dma_err = res->result;
switch (dma_err) {
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
fallthrough;
case DMA_TRANS_ABORTED:
{
struct ntb_transport_qp *qp = entry->qp;
void *offset = qp->rx_buff + qp->rx_max_frame *
qp->rx_index;
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
return;
}
case DMA_TRANS_NOERROR:
default:
break;
}
}
entry->flags |= DESC_DONE_FLAG;
ntb_complete_rxc(entry->qp);
}
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
{
void *buf = entry->buf;
size_t len = entry->len;
memcpy(buf, offset, len);
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
ntb_rx_copy_callback(entry, NULL);
}
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->rx_dma_chan;
struct dma_device *device;
size_t pay_off, buff_off, len;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
len = entry->len;
device = chan->device;
pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
goto err;
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (!unmap)
goto err;
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
pay_off, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[0]))
goto err_get_unmap;
unmap->to_cnt = 1;
unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
buff_off, len, DMA_FROM_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[1]))
goto err_get_unmap;
unmap->from_cnt = 1;
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (!txd)
goto err_get_unmap;
txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
goto err_set_unmap;
dmaengine_unmap_put(unmap);
qp->last_cookie = cookie;
qp->rx_async++;
return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
return -ENXIO;
}
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->rx_dma_chan;
int res;
if (!chan)
goto err;
if (entry->len < copy_bytes)
goto err;
res = ntb_async_rx_submit(entry, offset);
if (res < 0)
goto err;
if (!entry->retries)
qp->rx_async++;
return;
err:
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
}
static int ntb_process_rxc(struct ntb_transport_qp *qp)
{
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
qp->qp_num, hdr->ver, hdr->len, hdr->flags);
if (!(hdr->flags & DESC_DONE_FLAG)) {
dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
qp->rx_ring_empty++;
return -EAGAIN;
}
if (hdr->flags & LINK_DOWN_FLAG) {
dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
ntb_qp_link_down(qp);
hdr->flags = 0;
return -EAGAIN;
}
if (hdr->ver != (u32)qp->rx_pkts) {
dev_dbg(&qp->ndev->pdev->dev,
"version mismatch, expected %llu - got %u\n",
qp->rx_pkts, hdr->ver);
qp->rx_err_ver++;
return -EIO;
}
entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++;
return -EAGAIN;
}
entry->rx_hdr = hdr;
entry->rx_index = qp->rx_index;
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
qp->rx_err_oflow++;
entry->len = -EIO;
entry->flags |= DESC_DONE_FLAG;
ntb_complete_rxc(qp);
} else {
dev_dbg(&qp->ndev->pdev->dev,
"RX OK index %u ver %u size %d into buf size %d\n",
qp->rx_index, hdr->ver, hdr->len, entry->len);
qp->rx_bytes += hdr->len;
qp->rx_pkts++;
entry->len = hdr->len;
ntb_async_rx(entry, offset);
}
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
}
static void ntb_transport_rxc_db(unsigned long data)
{
struct ntb_transport_qp *qp = (void *)data;
int rc, i;
dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
__func__, qp->qp_num);
/* Limit the number of packets processed in a single interrupt to
* provide fairness to others
*/
for (i = 0; i < qp->rx_max_entry; i++) {
rc = ntb_process_rxc(qp);
if (rc)
break;
}
if (i && qp->rx_dma_chan)
dma_async_issue_pending(qp->rx_dma_chan);
if (i == qp->rx_max_entry) {
/* there is more work to do */
if (qp->active)
tasklet_schedule(&qp->rxc_db_work);
} else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
/* the doorbell bit is set: clear it */
ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
/* ntb_db_read ensures ntb_db_clear write is committed */
ntb_db_read(qp->ndev);
/* an interrupt may have arrived between finishing
* ntb_process_rxc and clearing the doorbell bit:
* there might be some more work to do.
*/
if (qp->active)
tasklet_schedule(&qp->rxc_db_work);
}
}
static void ntb_tx_copy_callback(void *data,
const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
/* we need to check DMA results if we are using DMA */
if (res) {
enum dmaengine_tx_result dma_err = res->result;
switch (dma_err) {
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
fallthrough;
case DMA_TRANS_ABORTED:
{
void __iomem *offset =
qp->tx_mw + qp->tx_max_frame *
entry->tx_index;
/* resubmit via CPU */
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
return;
}
case DMA_TRANS_NOERROR:
default:
break;
}
}
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
if (qp->use_msi)
ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc);
else
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
/* The entry length can only be zero if the packet is intended to be a
* "link down" or similar. Since no payload is being sent in these
* cases, there is nothing to add to the completion queue.
*/
if (entry->len > 0) {
qp->tx_bytes += entry->len;
if (qp->tx_handler)
qp->tx_handler(qp, qp->cb_data, entry->cb_data,
entry->len);
}
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
}
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
{
#ifdef ARCH_HAS_NOCACHE_UACCESS
/*
* Using non-temporal mov to improve performance on non-cached
* writes, even though we aren't actually copying from user space.
*/
__copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
#else
memcpy_toio(offset, entry->buf, entry->len);
#endif
/* Ensure that the data is fully copied out before setting the flags */
wmb();
ntb_tx_copy_callback(entry, NULL);
}
static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
struct dma_async_tx_descriptor *txd;
struct dma_chan *chan = qp->tx_dma_chan;
struct dma_device *device;
size_t len = entry->len;
void *buf = entry->buf;
size_t dest_off, buff_off;
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
device = chan->device;
dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
if (!unmap)
goto err;
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
buff_off, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[0]))
goto err_get_unmap;
unmap->to_cnt = 1;
txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (!txd)
goto err_get_unmap;
txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
goto err_set_unmap;
dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
return -ENXIO;
}
static void ntb_async_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
struct ntb_payload_header __iomem *hdr;
struct dma_chan *chan = qp->tx_dma_chan;
void __iomem *offset;
int res;
entry->tx_index = qp->tx_index;
offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
entry->tx_hdr = hdr;
iowrite32(entry->len, &hdr->len);
iowrite32((u32)qp->tx_pkts, &hdr->ver);
if (!chan)
goto err;
if (entry->len < copy_bytes)
goto err;
res = ntb_async_tx_submit(qp, entry);
if (res < 0)
goto err;
if (!entry->retries)
qp->tx_async++;
return;
err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
}
static int ntb_process_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
if (!ntb_transport_tx_free_entry(qp)) {
qp->tx_ring_full++;
return -EAGAIN;
}
if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
if (qp->tx_handler)
qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
&qp->tx_free_q);
return 0;
}
ntb_async_tx(qp, entry);
qp->tx_index++;
qp->tx_index %= qp->tx_max_entry;
qp->tx_pkts++;
return 0;
}
static void ntb_send_link_down(struct ntb_transport_qp *qp)
{
struct pci_dev *pdev = qp->ndev->pdev;
struct ntb_queue_entry *entry;
int i, rc;
if (!qp->link_is_up)
return;
dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (entry)
break;
msleep(100);
}
if (!entry)
return;
entry->cb_data = NULL;
entry->buf = NULL;
entry->len = 0;
entry->flags = LINK_DOWN_FLAG;
rc = ntb_process_tx(qp, entry);
if (rc)
dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
qp->qp_num);
ntb_qp_link_down_reset(qp);
}
static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
{
return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
}
/**
* ntb_transport_create_queue - Create a new NTB transport layer queue
* @rx_handler: receive callback function
* @tx_handler: transmit callback function
* @event_handler: event callback function
*
* Create a new NTB transport layer queue and provide the queue with a callback
* routine for both transmit and receive. The receive callback routine will be
* used to pass up data when the transport has received it on the queue. The
* transmit callback routine will be called when the transport has completed the
* transmission of the data on the queue and the data is ready to be freed.
*
* RETURNS: pointer to newly created ntb_queue, NULL on error.
*/
struct ntb_transport_qp *
ntb_transport_create_queue(void *data, struct device *client_dev,
const struct ntb_queue_handlers *handlers)
{
struct ntb_dev *ndev;
struct pci_dev *pdev;
struct ntb_transport_ctx *nt;
struct ntb_queue_entry *entry;
struct ntb_transport_qp *qp;
u64 qp_bit;
unsigned int free_queue;
dma_cap_mask_t dma_mask;
int node;
int i;
ndev = dev_ntb(client_dev->parent);
pdev = ndev->pdev;
nt = ndev->ctx;
node = dev_to_node(&ndev->dev);
free_queue = ffs(nt->qp_bitmap_free);
if (!free_queue)
goto err;
/* decrement free_queue to make it zero based */
free_queue--;
qp = &nt->qp_vec[free_queue];
qp_bit = BIT_ULL(qp->qp_num);
nt->qp_bitmap_free &= ~qp_bit;
qp->cb_data = data;
qp->rx_handler = handlers->rx_handler;
qp->tx_handler = handlers->tx_handler;
qp->event_handler = handlers->event_handler;
dma_cap_zero(dma_mask);
dma_cap_set(DMA_MEMCPY, dma_mask);
if (use_dma) {
qp->tx_dma_chan =
dma_request_channel(dma_mask, ntb_dma_filter_fn,
(void *)(unsigned long)node);
if (!qp->tx_dma_chan)
dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
qp->rx_dma_chan =
dma_request_channel(dma_mask, ntb_dma_filter_fn,
(void *)(unsigned long)node);
if (!qp->rx_dma_chan)
dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
} else {
qp->tx_dma_chan = NULL;
qp->rx_dma_chan = NULL;
}
qp->tx_mw_dma_addr = 0;
if (qp->tx_dma_chan) {
qp->tx_mw_dma_addr =
dma_map_resource(qp->tx_dma_chan->device->dev,
qp->tx_mw_phys, qp->tx_mw_size,
DMA_FROM_DEVICE, 0);
if (dma_mapping_error(qp->tx_dma_chan->device->dev,
qp->tx_mw_dma_addr)) {
qp->tx_mw_dma_addr = 0;
goto err1;
}
}
dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
qp->tx_dma_chan ? "DMA" : "CPU");
dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
qp->rx_dma_chan ? "DMA" : "CPU");
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
if (!entry)
goto err1;
entry->qp = qp;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
for (i = 0; i < qp->tx_max_entry; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
if (!entry)
goto err2;
entry->qp = qp;
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
&qp->tx_free_q);
}
ntb_db_clear(qp->ndev, qp_bit);
ntb_db_clear_mask(qp->ndev, qp_bit);
dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
return qp;
err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->tx_mw_dma_addr)
dma_unmap_resource(qp->tx_dma_chan->device->dev,
qp->tx_mw_dma_addr, qp->tx_mw_size,
DMA_FROM_DEVICE, 0);
if (qp->tx_dma_chan)
dma_release_channel(qp->tx_dma_chan);
if (qp->rx_dma_chan)
dma_release_channel(qp->rx_dma_chan);
nt->qp_bitmap_free |= qp_bit;
err:
return NULL;
}
EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
/**
* ntb_transport_free_queue - Frees NTB transport queue
* @qp: NTB queue to be freed
*
* Frees NTB transport queue
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
if (!qp)
return;
pdev = qp->ndev->pdev;
qp->active = false;
if (qp->tx_dma_chan) {
struct dma_chan *chan = qp->tx_dma_chan;
/* Putting the dma_chan to NULL will force any new traffic to be
* processed by the CPU instead of the DAM engine
*/
qp->tx_dma_chan = NULL;
/* Try to be nice and wait for any queued DMA engine
* transactions to process before smashing it with a rock
*/
dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan);
dma_unmap_resource(chan->device->dev,
qp->tx_mw_dma_addr, qp->tx_mw_size,
DMA_FROM_DEVICE, 0);
dma_release_channel(chan);
}
if (qp->rx_dma_chan) {
struct dma_chan *chan = qp->rx_dma_chan;
/* Putting the dma_chan to NULL will force any new traffic to be
* processed by the CPU instead of the DAM engine
*/
qp->rx_dma_chan = NULL;
/* Try to be nice and wait for any queued DMA engine
* transactions to process before smashing it with a rock
*/
dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan);
dma_release_channel(chan);
}
qp_bit = BIT_ULL(qp->qp_num);
ntb_db_set_mask(qp->ndev, qp_bit);
tasklet_kill(&qp->rxc_db_work);
cancel_delayed_work_sync(&qp->link_work);
qp->cb_data = NULL;
qp->rx_handler = NULL;
qp->tx_handler = NULL;
qp->event_handler = NULL;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
qp->transport->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
/**
* ntb_transport_rx_remove - Dequeues enqueued rx packet
* @qp: NTB queue to be freed
* @len: pointer to variable to write enqueued buffers length
*
* Dequeues unused buffers from receive queue. Should only be used during
* shutdown of qp.
*
* RETURNS: NULL error value on error, or void* for success.
*/
void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
{
struct ntb_queue_entry *entry;
void *buf;
if (!qp || qp->client_ready)
return NULL;
entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
if (!entry)
return NULL;
buf = entry->cb_data;
*len = entry->len;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
return buf;
}
EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
/**
* ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
* @qp: NTB transport layer queue the entry is to be enqueued on
* @cb: per buffer pointer for callback function to use
* @data: pointer to data buffer that incoming packets will be copied into
* @len: length of the data buffer
*
* Enqueue a new receive buffer onto the transport queue into which a NTB
* payload can be received into.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
unsigned int len)
{
struct ntb_queue_entry *entry;
if (!qp)
return -EINVAL;
entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
if (!entry)
return -ENOMEM;
entry->cb_data = cb;
entry->buf = data;
entry->len = len;
entry->flags = 0;
entry->retries = 0;
entry->errors = 0;
entry->rx_index = 0;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
if (qp->active)
tasklet_schedule(&qp->rxc_db_work);
return 0;
}
EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
/**
* ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
* @qp: NTB transport layer queue the entry is to be enqueued on
* @cb: per buffer pointer for callback function to use
* @data: pointer to data buffer that will be sent
* @len: length of the data buffer
*
* Enqueue a new transmit buffer onto the transport queue from which a NTB
* payload will be transmitted. This assumes that a lock is being held to
* serialize access to the qp.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
unsigned int len)
{
struct ntb_queue_entry *entry;
int rc;
if (!qp || !len)
return -EINVAL;
/* If the qp link is down already, just ignore. */
if (!qp->link_is_up)
return 0;
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (!entry) {
qp->tx_err_no_buf++;
return -EBUSY;
}
entry->cb_data = cb;
entry->buf = data;
entry->len = len;
entry->flags = 0;
entry->errors = 0;
entry->retries = 0;
entry->tx_index = 0;
rc = ntb_process_tx(qp, entry);
if (rc)
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
&qp->tx_free_q);
return rc;
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
/**
* ntb_transport_link_up - Notify NTB transport of client readiness to use queue
* @qp: NTB transport layer queue to be enabled
*
* Notify NTB transport layer of client readiness to use queue
*/
void ntb_transport_link_up(struct ntb_transport_qp *qp)
{
if (!qp)
return;
qp->client_ready = true;
if (qp->transport->link_is_up)
schedule_delayed_work(&qp->link_work, 0);
}
EXPORT_SYMBOL_GPL(ntb_transport_link_up);
/**
* ntb_transport_link_down - Notify NTB transport to no longer enqueue data
* @qp: NTB transport layer queue to be disabled
*
* Notify NTB transport layer of client's desire to no longer receive data on
* transport queue specified. It is the client's responsibility to ensure all
* entries on queue are purged or otherwise handled appropriately.
*/
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
int val;
if (!qp)
return;
qp->client_ready = false;
val = ntb_spad_read(qp->ndev, QP_LINKS);
ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num));
if (qp->link_is_up)
ntb_send_link_down(qp);
else
cancel_delayed_work_sync(&qp->link_work);
}
EXPORT_SYMBOL_GPL(ntb_transport_link_down);
/**
* ntb_transport_link_query - Query transport link state
* @qp: NTB transport layer queue to be queried
*
* Query connectivity to the remote system of the NTB transport queue
*
* RETURNS: true for link up or false for link down
*/
bool ntb_transport_link_query(struct ntb_transport_qp *qp)
{
if (!qp)
return false;
return qp->link_is_up;
}
EXPORT_SYMBOL_GPL(ntb_transport_link_query);
/**
* ntb_transport_qp_num - Query the qp number
* @qp: NTB transport layer queue to be queried
*
* Query qp number of the NTB transport queue
*
* RETURNS: a zero based number specifying the qp number
*/
unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
{
if (!qp)
return 0;
return qp->qp_num;
}
EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
/**
* ntb_transport_max_size - Query the max payload size of a qp
* @qp: NTB transport layer queue to be queried
*
* Query the maximum payload size permissible on the given qp
*
* RETURNS: the max payload size of a qp
*/
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
{
unsigned int max_size;
unsigned int copy_align;
struct dma_chan *rx_chan, *tx_chan;
if (!qp)
return 0;
rx_chan = qp->rx_dma_chan;
tx_chan = qp->tx_dma_chan;
copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
tx_chan ? tx_chan->device->copy_align : 0);
/* If DMA engine usage is possible, try to find the max size for that */
max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
max_size = round_down(max_size, 1 << copy_align);
return max_size;
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
{
unsigned int head = qp->tx_index;
unsigned int tail = qp->remote_rx_info->entry;
return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
static void ntb_transport_doorbell_callback(void *data, int vector)
{
struct ntb_transport_ctx *nt = data;
struct ntb_transport_qp *qp;
u64 db_bits;
unsigned int qp_num;
if (ntb_db_read(nt->ndev) & nt->msi_db_mask) {
ntb_transport_msi_peer_desc_changed(nt);
ntb_db_clear(nt->ndev, nt->msi_db_mask);
}
db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
ntb_db_vector_mask(nt->ndev, vector));
while (db_bits) {
qp_num = __ffs(db_bits);
qp = &nt->qp_vec[qp_num];
if (qp->active)
tasklet_schedule(&qp->rxc_db_work);
db_bits &= ~BIT_ULL(qp_num);
}
}
static const struct ntb_ctx_ops ntb_transport_ops = {
.link_event = ntb_transport_event_callback,
.db_event = ntb_transport_doorbell_callback,
};
static struct ntb_client ntb_transport_client = {
.ops = {
.probe = ntb_transport_probe,
.remove = ntb_transport_free,
},
};
static int __init ntb_transport_init(void)
{
int rc;
pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
if (debugfs_initialized())
nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
rc = bus_register(&ntb_transport_bus);
if (rc)
goto err_bus;
rc = ntb_register_client(&ntb_transport_client);
if (rc)
goto err_client;
return 0;
err_client:
bus_unregister(&ntb_transport_bus);
err_bus:
debugfs_remove_recursive(nt_debugfs_dir);
return rc;
}
module_init(ntb_transport_init);
static void __exit ntb_transport_exit(void)
{
ntb_unregister_client(&ntb_transport_client);
bus_unregister(&ntb_transport_bus);
debugfs_remove_recursive(nt_debugfs_dir);
}
module_exit(ntb_transport_exit);
| linux-master | drivers/ntb/ntb_transport.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* PCIe NTB Linux driver
*
* Contact Information:
* Allen Hubbe <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ntb.h>
#include <linux/pci.h>
#define DRIVER_NAME "ntb"
#define DRIVER_DESCRIPTION "PCIe NTB Driver Framework"
#define DRIVER_VERSION "1.0"
#define DRIVER_RELDATE "24 March 2015"
#define DRIVER_AUTHOR "Allen Hubbe <[email protected]>"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
static struct bus_type ntb_bus;
static void ntb_dev_release(struct device *dev);
int __ntb_register_client(struct ntb_client *client, struct module *mod,
const char *mod_name)
{
if (!client)
return -EINVAL;
if (!ntb_client_ops_is_valid(&client->ops))
return -EINVAL;
memset(&client->drv, 0, sizeof(client->drv));
client->drv.bus = &ntb_bus;
client->drv.name = mod_name;
client->drv.owner = mod;
return driver_register(&client->drv);
}
EXPORT_SYMBOL(__ntb_register_client);
void ntb_unregister_client(struct ntb_client *client)
{
driver_unregister(&client->drv);
}
EXPORT_SYMBOL(ntb_unregister_client);
int ntb_register_device(struct ntb_dev *ntb)
{
if (!ntb)
return -EINVAL;
if (!ntb->pdev)
return -EINVAL;
if (!ntb->ops)
return -EINVAL;
if (!ntb_dev_ops_is_valid(ntb->ops))
return -EINVAL;
init_completion(&ntb->released);
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;
dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
ntb->ctx = NULL;
ntb->ctx_ops = NULL;
spin_lock_init(&ntb->ctx_lock);
return device_register(&ntb->dev);
}
EXPORT_SYMBOL(ntb_register_device);
void ntb_unregister_device(struct ntb_dev *ntb)
{
device_unregister(&ntb->dev);
wait_for_completion(&ntb->released);
}
EXPORT_SYMBOL(ntb_unregister_device);
int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
const struct ntb_ctx_ops *ctx_ops)
{
unsigned long irqflags;
if (!ntb_ctx_ops_is_valid(ctx_ops))
return -EINVAL;
if (ntb->ctx_ops)
return -EINVAL;
spin_lock_irqsave(&ntb->ctx_lock, irqflags);
{
ntb->ctx = ctx;
ntb->ctx_ops = ctx_ops;
}
spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
return 0;
}
EXPORT_SYMBOL(ntb_set_ctx);
void ntb_clear_ctx(struct ntb_dev *ntb)
{
unsigned long irqflags;
spin_lock_irqsave(&ntb->ctx_lock, irqflags);
{
ntb->ctx_ops = NULL;
ntb->ctx = NULL;
}
spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
}
EXPORT_SYMBOL(ntb_clear_ctx);
void ntb_link_event(struct ntb_dev *ntb)
{
unsigned long irqflags;
spin_lock_irqsave(&ntb->ctx_lock, irqflags);
{
if (ntb->ctx_ops && ntb->ctx_ops->link_event)
ntb->ctx_ops->link_event(ntb->ctx);
}
spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
}
EXPORT_SYMBOL(ntb_link_event);
void ntb_db_event(struct ntb_dev *ntb, int vector)
{
unsigned long irqflags;
spin_lock_irqsave(&ntb->ctx_lock, irqflags);
{
if (ntb->ctx_ops && ntb->ctx_ops->db_event)
ntb->ctx_ops->db_event(ntb->ctx, vector);
}
spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
}
EXPORT_SYMBOL(ntb_db_event);
void ntb_msg_event(struct ntb_dev *ntb)
{
unsigned long irqflags;
spin_lock_irqsave(&ntb->ctx_lock, irqflags);
{
if (ntb->ctx_ops && ntb->ctx_ops->msg_event)
ntb->ctx_ops->msg_event(ntb->ctx);
}
spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
}
EXPORT_SYMBOL(ntb_msg_event);
int ntb_default_port_number(struct ntb_dev *ntb)
{
switch (ntb->topo) {
case NTB_TOPO_PRI:
case NTB_TOPO_B2B_USD:
return NTB_PORT_PRI_USD;
case NTB_TOPO_SEC:
case NTB_TOPO_B2B_DSD:
return NTB_PORT_SEC_DSD;
default:
return 0;
}
}
EXPORT_SYMBOL(ntb_default_port_number);
int ntb_default_peer_port_count(struct ntb_dev *ntb)
{
return NTB_DEF_PEER_CNT;
}
EXPORT_SYMBOL(ntb_default_peer_port_count);
int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx)
{
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
switch (ntb->topo) {
case NTB_TOPO_PRI:
case NTB_TOPO_B2B_USD:
return NTB_PORT_SEC_DSD;
case NTB_TOPO_SEC:
case NTB_TOPO_B2B_DSD:
return NTB_PORT_PRI_USD;
default:
return 0;
}
}
EXPORT_SYMBOL(ntb_default_peer_port_number);
int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port)
{
int peer_port = ntb_default_peer_port_number(ntb, NTB_DEF_PEER_IDX);
if (peer_port == -EINVAL || port != peer_port)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(ntb_default_peer_port_idx);
static int ntb_probe(struct device *dev)
{
struct ntb_dev *ntb;
struct ntb_client *client;
int rc;
get_device(dev);
ntb = dev_ntb(dev);
client = drv_ntb_client(dev->driver);
rc = client->ops.probe(client, ntb);
if (rc)
put_device(dev);
return rc;
}
static void ntb_remove(struct device *dev)
{
struct ntb_dev *ntb;
struct ntb_client *client;
if (dev->driver) {
ntb = dev_ntb(dev);
client = drv_ntb_client(dev->driver);
client->ops.remove(client, ntb);
put_device(dev);
}
}
static void ntb_dev_release(struct device *dev)
{
struct ntb_dev *ntb = dev_ntb(dev);
complete(&ntb->released);
}
static struct bus_type ntb_bus = {
.name = "ntb",
.probe = ntb_probe,
.remove = ntb_remove,
};
static int __init ntb_driver_init(void)
{
return bus_register(&ntb_bus);
}
module_init(ntb_driver_init);
static void __exit ntb_driver_exit(void)
{
bus_unregister(&ntb_bus);
}
module_exit(ntb_driver_exit);
| linux-master | drivers/ntb/core.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2012 Intel Corporation. All rights reserved.
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe NTB Linux driver
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ntb.h>
#include "ntb_hw_intel.h"
#include "ntb_hw_gen1.h"
#include "ntb_hw_gen3.h"
#include "ntb_hw_gen4.h"
#define NTB_NAME "ntb_hw_intel"
#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
#define NTB_VER "2.0"
MODULE_DESCRIPTION(NTB_DESC);
MODULE_VERSION(NTB_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
#define bar0_off(base, bar) ((base) + ((bar) << 2))
#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
static const struct intel_ntb_reg xeon_reg;
static const struct intel_ntb_alt_reg xeon_pri_reg;
static const struct intel_ntb_alt_reg xeon_sec_reg;
static const struct intel_ntb_alt_reg xeon_b2b_reg;
static const struct intel_ntb_xlat_reg xeon_pri_xlat;
static const struct intel_ntb_xlat_reg xeon_sec_xlat;
static const struct ntb_dev_ops intel_ntb_ops;
static const struct file_operations intel_ntb_debugfs_info;
static struct dentry *debugfs_dir;
static int b2b_mw_idx = -1;
module_param(b2b_mw_idx, int, 0644);
MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
"value of zero or positive starts from first mw idx, and a "
"negative value starts from last mw idx. Both sides MUST "
"set the same value here!");
static unsigned int b2b_mw_share;
module_param(b2b_mw_share, uint, 0644);
MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
"ntb so that the peer ntb only occupies the first half of "
"the mw, so the second half can still be used as a mw. Both "
"sides MUST set the same value here!");
module_param_named(xeon_b2b_usd_bar2_addr64,
xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
"XEON B2B USD BAR 2 64-bit address");
module_param_named(xeon_b2b_usd_bar4_addr64,
xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
"XEON B2B USD BAR 4 64-bit address");
module_param_named(xeon_b2b_usd_bar4_addr32,
xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
"XEON B2B USD split-BAR 4 32-bit address");
module_param_named(xeon_b2b_usd_bar5_addr32,
xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
"XEON B2B USD split-BAR 5 32-bit address");
module_param_named(xeon_b2b_dsd_bar2_addr64,
xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
"XEON B2B DSD BAR 2 64-bit address");
module_param_named(xeon_b2b_dsd_bar4_addr64,
xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
"XEON B2B DSD BAR 4 64-bit address");
module_param_named(xeon_b2b_dsd_bar4_addr32,
xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
"XEON B2B DSD split-BAR 4 32-bit address");
module_param_named(xeon_b2b_dsd_bar5_addr32,
xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
"XEON B2B DSD split-BAR 5 32-bit address");
static int xeon_init_isr(struct intel_ntb_dev *ndev);
static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
{
ndev->unsafe_flags = 0;
ndev->unsafe_flags_ignore = 0;
/* Only B2B has a workaround to avoid SDOORBELL */
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
if (!ntb_topo_is_b2b(ndev->ntb.topo))
ndev->unsafe_flags |= NTB_UNSAFE_DB;
/* No low level workaround to avoid SB01BASE */
if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
ndev->unsafe_flags |= NTB_UNSAFE_DB;
ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
}
}
static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
unsigned long flag)
{
return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
}
static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
unsigned long flag)
{
flag &= ndev->unsafe_flags;
ndev->unsafe_flags_ignore |= flag;
return !!flag;
}
int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
{
if (idx < 0 || idx >= ndev->mw_count)
return -EINVAL;
return ndev->reg->mw_bar[idx];
}
void ndev_db_addr(struct intel_ntb_dev *ndev,
phys_addr_t *db_addr, resource_size_t *db_size,
phys_addr_t reg_addr, unsigned long reg)
{
if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
pr_warn_once("%s: NTB unsafe doorbell access", __func__);
if (db_addr) {
*db_addr = reg_addr + reg;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
}
if (db_size) {
*db_size = ndev->reg->db_size;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
}
}
u64 ndev_db_read(struct intel_ntb_dev *ndev,
void __iomem *mmio)
{
if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
pr_warn_once("%s: NTB unsafe doorbell access", __func__);
return ndev->reg->db_ioread(mmio);
}
int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
void __iomem *mmio)
{
if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
pr_warn_once("%s: NTB unsafe doorbell access", __func__);
if (db_bits & ~ndev->db_valid_mask)
return -EINVAL;
ndev->reg->db_iowrite(db_bits, mmio);
return 0;
}
static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
void __iomem *mmio)
{
unsigned long irqflags;
if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
pr_warn_once("%s: NTB unsafe doorbell access", __func__);
if (db_bits & ~ndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
{
ndev->db_mask |= db_bits;
ndev->reg->db_iowrite(ndev->db_mask, mmio);
}
spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
return 0;
}
static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
void __iomem *mmio)
{
unsigned long irqflags;
if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
pr_warn_once("%s: NTB unsafe doorbell access", __func__);
if (db_bits & ~ndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
{
ndev->db_mask &= ~db_bits;
ndev->reg->db_iowrite(ndev->db_mask, mmio);
}
spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
return 0;
}
static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
{
u64 shift, mask;
shift = ndev->db_vec_shift;
mask = BIT_ULL(shift) - 1;
return mask << (shift * db_vector);
}
static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
phys_addr_t *spad_addr, phys_addr_t reg_addr,
unsigned long reg)
{
if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
if (idx < 0 || idx >= ndev->spad_count)
return -EINVAL;
if (spad_addr) {
*spad_addr = reg_addr + reg + (idx << 2);
dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
*spad_addr);
}
return 0;
}
static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
void __iomem *mmio)
{
if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
if (idx < 0 || idx >= ndev->spad_count)
return 0;
return ioread32(mmio + (idx << 2));
}
static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
void __iomem *mmio)
{
if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
if (idx < 0 || idx >= ndev->spad_count)
return -EINVAL;
iowrite32(val, mmio + (idx << 2));
return 0;
}
static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
{
u64 vec_mask;
vec_mask = ndev_vec_mask(ndev, vec);
if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
vec_mask |= ndev->db_link_mask;
dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
ndev->last_ts = jiffies;
if (vec_mask & ndev->db_link_mask) {
if (ndev->reg->poll_link(ndev))
ntb_link_event(&ndev->ntb);
}
if (vec_mask & ndev->db_valid_mask)
ntb_db_event(&ndev->ntb, vec);
return IRQ_HANDLED;
}
static irqreturn_t ndev_vec_isr(int irq, void *dev)
{
struct intel_ntb_vec *nvec = dev;
dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
irq, nvec->num);
return ndev_interrupt(nvec->ndev, nvec->num);
}
static irqreturn_t ndev_irq_isr(int irq, void *dev)
{
struct intel_ntb_dev *ndev = dev;
return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
}
int ndev_init_isr(struct intel_ntb_dev *ndev,
int msix_min, int msix_max,
int msix_shift, int total_shift)
{
struct pci_dev *pdev;
int rc, i, msix_count, node;
pdev = ndev->ntb.pdev;
node = dev_to_node(&pdev->dev);
/* Mask all doorbell interrupts */
ndev->db_mask = ndev->db_valid_mask;
ndev->reg->db_iowrite(ndev->db_mask,
ndev->self_mmio +
ndev->self_reg->db_mask);
/* Try to set up msix irq */
ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
GFP_KERNEL, node);
if (!ndev->vec)
goto err_msix_vec_alloc;
ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
GFP_KERNEL, node);
if (!ndev->msix)
goto err_msix_alloc;
for (i = 0; i < msix_max; ++i)
ndev->msix[i].entry = i;
msix_count = pci_enable_msix_range(pdev, ndev->msix,
msix_min, msix_max);
if (msix_count < 0)
goto err_msix_enable;
for (i = 0; i < msix_count; ++i) {
ndev->vec[i].ndev = ndev;
ndev->vec[i].num = i;
rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
"ndev_vec_isr", &ndev->vec[i]);
if (rc)
goto err_msix_request;
}
dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
ndev->db_vec_count = msix_count;
ndev->db_vec_shift = msix_shift;
return 0;
err_msix_request:
while (i-- > 0)
free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
err_msix_enable:
kfree(ndev->msix);
err_msix_alloc:
kfree(ndev->vec);
err_msix_vec_alloc:
ndev->msix = NULL;
ndev->vec = NULL;
/* Try to set up msi irq */
rc = pci_enable_msi(pdev);
if (rc)
goto err_msi_enable;
rc = request_irq(pdev->irq, ndev_irq_isr, 0,
"ndev_irq_isr", ndev);
if (rc)
goto err_msi_request;
dev_dbg(&pdev->dev, "Using msi interrupts\n");
ndev->db_vec_count = 1;
ndev->db_vec_shift = total_shift;
return 0;
err_msi_request:
pci_disable_msi(pdev);
err_msi_enable:
/* Try to set up intx irq */
pci_intx(pdev, 1);
rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
"ndev_irq_isr", ndev);
if (rc)
goto err_intx_request;
dev_dbg(&pdev->dev, "Using intx interrupts\n");
ndev->db_vec_count = 1;
ndev->db_vec_shift = total_shift;
return 0;
err_intx_request:
return rc;
}
static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev;
int i;
pdev = ndev->ntb.pdev;
/* Mask all doorbell interrupts */
ndev->db_mask = ndev->db_valid_mask;
ndev->reg->db_iowrite(ndev->db_mask,
ndev->self_mmio +
ndev->self_reg->db_mask);
if (ndev->msix) {
i = ndev->db_vec_count;
while (i--)
free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
kfree(ndev->msix);
kfree(ndev->vec);
} else {
free_irq(pdev->irq, ndev);
if (pci_dev_msi_enabled(pdev))
pci_disable_msi(pdev);
}
}
static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
struct pci_dev *pdev;
void __iomem *mmio;
char *buf;
size_t buf_size;
ssize_t ret, off;
union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
ndev = filp->private_data;
pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
off = 0;
off += scnprintf(buf + off, buf_size - off,
"NTB Device Information:\n");
off += scnprintf(buf + off, buf_size - off,
"Connection Topology -\t%s\n",
ntb_topo_string(ndev->ntb.topo));
if (ndev->b2b_idx != UINT_MAX) {
off += scnprintf(buf + off, buf_size - off,
"B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
off += scnprintf(buf + off, buf_size - off,
"B2B Offset -\t\t%#lx\n", ndev->b2b_off);
}
off += scnprintf(buf + off, buf_size - off,
"BAR4 Split -\t\t%s\n",
ndev->bar4_split ? "yes" : "no");
off += scnprintf(buf + off, buf_size - off,
"NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
off += scnprintf(buf + off, buf_size - off,
"LNK STA -\t\t%#06x\n", ndev->lnk_sta);
if (!ndev->reg->link_is_up(ndev)) {
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tDown\n");
} else {
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tUp\n");
off += scnprintf(buf + off, buf_size - off,
"Link Speed -\t\tPCI-E Gen %u\n",
NTB_LNK_STA_SPEED(ndev->lnk_sta));
off += scnprintf(buf + off, buf_size - off,
"Link Width -\t\tx%u\n",
NTB_LNK_STA_WIDTH(ndev->lnk_sta));
}
off += scnprintf(buf + off, buf_size - off,
"Memory Window Count -\t%u\n", ndev->mw_count);
off += scnprintf(buf + off, buf_size - off,
"Scratchpad Count -\t%u\n", ndev->spad_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Count -\t%u\n", ndev->db_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask -\t\t%#llx\n", u.v64);
u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Bell -\t\t%#llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Window Size:\n");
pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"PBAR23SZ %hhu\n", u.v8);
if (!ndev->bar4_split) {
pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"PBAR45SZ %hhu\n", u.v8);
} else {
pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"PBAR4SZ %hhu\n", u.v8);
pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"PBAR5SZ %hhu\n", u.v8);
}
pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"SBAR23SZ %hhu\n", u.v8);
if (!ndev->bar4_split) {
pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"SBAR45SZ %hhu\n", u.v8);
} else {
pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"SBAR4SZ %hhu\n", u.v8);
pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
off += scnprintf(buf + off, buf_size - off,
"SBAR5SZ %hhu\n", u.v8);
}
off += scnprintf(buf + off, buf_size - off,
"\nNTB Incoming XLAT:\n");
u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
off += scnprintf(buf + off, buf_size - off,
"XLAT23 -\t\t%#018llx\n", u.v64);
if (ndev->bar4_split) {
u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
off += scnprintf(buf + off, buf_size - off,
"XLAT4 -\t\t\t%#06x\n", u.v32);
u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
off += scnprintf(buf + off, buf_size - off,
"XLAT5 -\t\t\t%#06x\n", u.v32);
} else {
u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
off += scnprintf(buf + off, buf_size - off,
"XLAT45 -\t\t%#018llx\n", u.v64);
}
u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
off += scnprintf(buf + off, buf_size - off,
"LMT23 -\t\t\t%#018llx\n", u.v64);
if (ndev->bar4_split) {
u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
off += scnprintf(buf + off, buf_size - off,
"LMT4 -\t\t\t%#06x\n", u.v32);
u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
off += scnprintf(buf + off, buf_size - off,
"LMT5 -\t\t\t%#06x\n", u.v32);
} else {
u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
off += scnprintf(buf + off, buf_size - off,
"LMT45 -\t\t\t%#018llx\n", u.v64);
}
if (pdev_is_gen1(pdev)) {
if (ntb_topo_is_b2b(ndev->ntb.topo)) {
off += scnprintf(buf + off, buf_size - off,
"\nNTB Outgoing B2B XLAT:\n");
u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B XLAT23 -\t\t%#018llx\n", u.v64);
if (ndev->bar4_split) {
u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B XLAT4 -\t\t%#06x\n",
u.v32);
u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B XLAT5 -\t\t%#06x\n",
u.v32);
} else {
u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B XLAT45 -\t\t%#018llx\n",
u.v64);
}
u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B LMT23 -\t\t%#018llx\n", u.v64);
if (ndev->bar4_split) {
u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B LMT4 -\t\t%#06x\n",
u.v32);
u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B LMT5 -\t\t%#06x\n",
u.v32);
} else {
u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"B2B LMT45 -\t\t%#018llx\n",
u.v64);
}
off += scnprintf(buf + off, buf_size - off,
"\nNTB Secondary BAR:\n");
u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"SBAR01 -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"SBAR23 -\t\t%#018llx\n", u.v64);
if (ndev->bar4_split) {
u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"SBAR4 -\t\t\t%#06x\n", u.v32);
u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"SBAR5 -\t\t\t%#06x\n", u.v32);
} else {
u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"SBAR45 -\t\t%#018llx\n",
u.v64);
}
}
off += scnprintf(buf + off, buf_size - off,
"\nXEON NTB Statistics:\n");
u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"Upstream Memory Miss -\t%u\n", u.v16);
off += scnprintf(buf + off, buf_size - off,
"\nXEON NTB Hardware Errors:\n");
if (!pci_read_config_word(pdev,
XEON_DEVSTS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"DEVSTS -\t\t%#06x\n", u.v16);
if (!pci_read_config_word(pdev,
XEON_LINK_STATUS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"LNKSTS -\t\t%#06x\n", u.v16);
if (!pci_read_config_dword(pdev,
XEON_UNCERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"UNCERRSTS -\t\t%#06x\n", u.v32);
if (!pci_read_config_dword(pdev,
XEON_CORERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"CORERRSTS -\t\t%#06x\n", u.v32);
}
ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
kfree(buf);
return ret;
}
static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev = filp->private_data;
if (pdev_is_gen1(ndev->ntb.pdev))
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
}
static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
{
if (!debugfs_dir) {
ndev->debugfs_dir = NULL;
ndev->debugfs_info = NULL;
} else {
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
if (!ndev->debugfs_dir)
ndev->debugfs_info = NULL;
else
ndev->debugfs_info =
debugfs_create_file("info", S_IRUSR,
ndev->debugfs_dir, ndev,
&intel_ntb_debugfs_info);
}
}
static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
{
debugfs_remove_recursive(ndev->debugfs_dir);
}
int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
return ntb_ndev(ntb)->mw_count;
}
int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
resource_size_t bar_size, mw_size;
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
bar_size = pci_resource_len(ndev->ntb.pdev, bar);
if (idx == ndev->b2b_idx)
mw_size = bar_size - ndev->b2b_off;
else
mw_size = bar_size;
if (addr_align)
*addr_align = pci_resource_len(ndev->ntb.pdev, bar);
if (size_align)
*size_align = 1;
if (size_max)
*size_max = mw_size;
return 0;
}
static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
unsigned long base_reg, xlat_reg, limit_reg;
resource_size_t bar_size, mw_size;
void __iomem *mmio;
u64 base, limit, reg_val;
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
bar_size = pci_resource_len(ndev->ntb.pdev, bar);
if (idx == ndev->b2b_idx)
mw_size = bar_size - ndev->b2b_off;
else
mw_size = bar_size;
/* hardware requires that addr is aligned to bar size */
if (addr & (bar_size - 1))
return -EINVAL;
/* make sure the range fits in the usable mw size */
if (size > mw_size)
return -EINVAL;
mmio = ndev->self_mmio;
base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
if (bar < 4 || !ndev->bar4_split) {
base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
/* Set the limit if supported, if size is not mw_size */
if (limit_reg && size != mw_size)
limit = base + size;
else
limit = 0;
/* set and verify setting the translation address */
iowrite64(addr, mmio + xlat_reg);
reg_val = ioread64(mmio + xlat_reg);
if (reg_val != addr) {
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
/* set and verify setting the limit */
iowrite64(limit, mmio + limit_reg);
reg_val = ioread64(mmio + limit_reg);
if (reg_val != limit) {
iowrite64(base, mmio + limit_reg);
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
} else {
/* split bar addr range must all be 32 bit */
if (addr & (~0ull << 32))
return -EINVAL;
if ((addr + size) & (~0ull << 32))
return -EINVAL;
base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
/* Set the limit if supported, if size is not mw_size */
if (limit_reg && size != mw_size)
limit = base + size;
else
limit = 0;
/* set and verify setting the translation address */
iowrite32(addr, mmio + xlat_reg);
reg_val = ioread32(mmio + xlat_reg);
if (reg_val != addr) {
iowrite32(0, mmio + xlat_reg);
return -EIO;
}
/* set and verify setting the limit */
iowrite32(limit, mmio + limit_reg);
reg_val = ioread32(mmio + limit_reg);
if (reg_val != limit) {
iowrite32(base, mmio + limit_reg);
iowrite32(0, mmio + xlat_reg);
return -EIO;
}
}
return 0;
}
u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
enum ntb_width *width)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
if (ndev->reg->link_is_up(ndev)) {
if (speed)
*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
if (width)
*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
return 1;
} else {
/* TODO MAYBE: is it possible to observe the link speed and
* width while link is training? */
if (speed)
*speed = NTB_SPEED_NONE;
if (width)
*width = NTB_WIDTH_NONE;
return 0;
}
}
static int intel_ntb_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed,
enum ntb_width max_width)
{
struct intel_ntb_dev *ndev;
u32 ntb_ctl;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n",
max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO)
dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO)
dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
if (ndev->bar4_split)
ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
return 0;
}
int intel_ntb_link_disable(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev;
u32 ntb_cntl;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
dev_dbg(&ntb->pdev->dev, "Disabling link\n");
/* Bring NTB link down */
ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
if (ndev->bar4_split)
ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
return 0;
}
int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
{
/* Numbers of inbound and outbound memory windows match */
return ntb_ndev(ntb)->mw_count;
}
int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
phys_addr_t *base, resource_size_t *size)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
int bar;
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) +
(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
if (size)
*size = pci_resource_len(ndev->ntb.pdev, bar) -
(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
return 0;
}
static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
{
return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
}
u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->db_valid_mask;
}
int intel_ntb_db_vector_count(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
return ndev->db_vec_count;
}
u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
if (db_vector < 0 || db_vector > ndev->db_vec_count)
return 0;
return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
}
static u64 intel_ntb_db_read(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_read(ndev,
ndev->self_mmio +
ndev->self_reg->db_bell);
}
static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_write(ndev, db_bits,
ndev->self_mmio +
ndev->self_reg->db_bell);
}
int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_set_mask(ndev, db_bits,
ndev->self_mmio +
ndev->self_reg->db_mask);
}
int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_clear_mask(ndev, db_bits,
ndev->self_mmio +
ndev->self_reg->db_mask);
}
static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size, u64 *db_data, int db_bit)
{
u64 db_bits;
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
if (unlikely(db_bit >= BITS_PER_LONG_LONG))
return -EINVAL;
db_bits = BIT_ULL(db_bit);
if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
return -EINVAL;
ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
ndev->peer_reg->db_bell);
if (db_data)
*db_data = db_bits;
return 0;
}
static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_write(ndev, db_bits,
ndev->peer_mmio +
ndev->peer_reg->db_bell);
}
int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
{
return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
}
int intel_ntb_spad_count(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
return ndev->spad_count;
}
u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_spad_read(ndev, idx,
ndev->self_mmio +
ndev->self_reg->spad);
}
int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_spad_write(ndev, idx, val,
ndev->self_mmio +
ndev->self_reg->spad);
}
int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
phys_addr_t *spad_addr)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
ndev->peer_reg->spad);
}
u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_spad_read(ndev, sidx,
ndev->peer_mmio +
ndev->peer_reg->spad);
}
int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
u32 val)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_spad_write(ndev, sidx, val,
ndev->peer_mmio +
ndev->peer_reg->spad);
}
static u64 xeon_db_ioread(const void __iomem *mmio)
{
return (u64)ioread16(mmio);
}
static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
{
iowrite16((u16)bits, mmio);
}
static int xeon_poll_link(struct intel_ntb_dev *ndev)
{
u16 reg_val;
int rc;
ndev->reg->db_iowrite(ndev->db_link_mask,
ndev->self_mmio +
ndev->self_reg->db_bell);
rc = pci_read_config_word(ndev->ntb.pdev,
XEON_LINK_STATUS_OFFSET, ®_val);
if (rc)
return 0;
if (reg_val == ndev->lnk_sta)
return 0;
ndev->lnk_sta = reg_val;
return 1;
}
int xeon_link_is_up(struct intel_ntb_dev *ndev)
{
if (ndev->ntb.topo == NTB_TOPO_SEC)
return 1;
return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
}
enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
{
switch (ppd & XEON_PPD_TOPO_MASK) {
case XEON_PPD_TOPO_B2B_USD:
return NTB_TOPO_B2B_USD;
case XEON_PPD_TOPO_B2B_DSD:
return NTB_TOPO_B2B_DSD;
case XEON_PPD_TOPO_PRI_USD:
case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
return NTB_TOPO_PRI;
case XEON_PPD_TOPO_SEC_USD:
case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
return NTB_TOPO_SEC;
}
return NTB_TOPO_NONE;
}
static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
{
if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
return 1;
}
return 0;
}
static int xeon_init_isr(struct intel_ntb_dev *ndev)
{
return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
XEON_DB_MSIX_VECTOR_COUNT,
XEON_DB_MSIX_VECTOR_SHIFT,
XEON_DB_TOTAL_SHIFT);
}
static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
{
ndev_deinit_isr(ndev);
}
static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
const struct intel_b2b_addr *addr,
const struct intel_b2b_addr *peer_addr)
{
struct pci_dev *pdev;
void __iomem *mmio;
resource_size_t bar_size;
phys_addr_t bar_addr;
int b2b_bar;
u8 bar_sz;
pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
if (ndev->b2b_idx == UINT_MAX) {
dev_dbg(&pdev->dev, "not using b2b mw\n");
b2b_bar = 0;
ndev->b2b_off = 0;
} else {
b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
if (b2b_bar < 0)
return -EIO;
dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
dev_dbg(&pdev->dev, "b2b using first half of bar\n");
ndev->b2b_off = bar_size >> 1;
} else if (XEON_B2B_MIN_SIZE <= bar_size) {
dev_dbg(&pdev->dev, "b2b using whole bar\n");
ndev->b2b_off = 0;
--ndev->mw_count;
} else {
dev_dbg(&pdev->dev, "b2b bar size is too small\n");
return -EIO;
}
}
/* Reset the secondary bar sizes to match the primary bar sizes,
* except disable or halve the size of the b2b secondary bar.
*
* Note: code for each specific bar size register, because the register
* offsets are not in a consistent order (bar5sz comes after ppd, odd).
*/
pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
if (b2b_bar == 2) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
if (!ndev->bar4_split) {
pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
if (b2b_bar == 4) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
} else {
pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
if (b2b_bar == 4) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
if (b2b_bar == 5) {
if (ndev->b2b_off)
bar_sz -= 1;
else
bar_sz = 0;
}
pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
}
/* SBAR01 hit by first part of the b2b bar */
if (b2b_bar == 0)
bar_addr = addr->bar0_addr;
else if (b2b_bar == 2)
bar_addr = addr->bar2_addr64;
else if (b2b_bar == 4 && !ndev->bar4_split)
bar_addr = addr->bar4_addr64;
else if (b2b_bar == 4)
bar_addr = addr->bar4_addr32;
else if (b2b_bar == 5)
bar_addr = addr->bar5_addr32;
else
return -EIO;
dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
/* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
* The b2b bar is either disabled above, or configured half-size, and
* it starts at the PBAR xlat + offset.
*/
bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
if (!ndev->bar4_split) {
bar_addr = addr->bar4_addr64 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
} else {
bar_addr = addr->bar4_addr32 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
bar_addr = addr->bar5_addr32 +
(b2b_bar == 5 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
}
/* setup incoming bar limits == base addrs (zero length windows) */
bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
if (!ndev->bar4_split) {
bar_addr = addr->bar4_addr64 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
} else {
bar_addr = addr->bar4_addr32 +
(b2b_bar == 4 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
bar_addr = addr->bar5_addr32 +
(b2b_bar == 5 ? ndev->b2b_off : 0);
iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
}
/* zero incoming translation addrs */
iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
if (!ndev->bar4_split) {
iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
} else {
iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
}
/* zero outgoing translation limits (whole bar size windows) */
iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
if (!ndev->bar4_split) {
iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
} else {
iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
}
/* set outgoing translation offsets */
bar_addr = peer_addr->bar2_addr64;
iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
if (!ndev->bar4_split) {
bar_addr = peer_addr->bar4_addr64;
iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
} else {
bar_addr = peer_addr->bar4_addr32;
iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
bar_addr = peer_addr->bar5_addr32;
iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
}
/* set the translation offset for b2b registers */
if (b2b_bar == 0)
bar_addr = peer_addr->bar0_addr;
else if (b2b_bar == 2)
bar_addr = peer_addr->bar2_addr64;
else if (b2b_bar == 4 && !ndev->bar4_split)
bar_addr = peer_addr->bar4_addr64;
else if (b2b_bar == 4)
bar_addr = peer_addr->bar4_addr32;
else if (b2b_bar == 5)
bar_addr = peer_addr->bar5_addr32;
else
return -EIO;
/* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
if (b2b_bar) {
/* map peer ntb mmio config space registers */
ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
XEON_B2B_MIN_SIZE);
if (!ndev->peer_mmio)
return -EIO;
ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
}
return 0;
}
static int xeon_init_ntb(struct intel_ntb_dev *ndev)
{
struct device *dev = &ndev->ntb.pdev->dev;
int rc;
u32 ntb_ctl;
if (ndev->bar4_split)
ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
else
ndev->mw_count = XEON_MW_COUNT;
ndev->spad_count = XEON_SPAD_COUNT;
ndev->db_count = XEON_DB_COUNT;
ndev->db_link_mask = XEON_DB_LINK_BIT;
switch (ndev->ntb.topo) {
case NTB_TOPO_PRI:
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
dev_err(dev, "NTB Primary config disabled\n");
return -EINVAL;
}
/* enable link to allow secondary side device to appear */
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~NTB_CTL_DISABLE;
iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
/* use half the spads for the peer */
ndev->spad_count >>= 1;
ndev->self_reg = &xeon_pri_reg;
ndev->peer_reg = &xeon_sec_reg;
ndev->xlat_reg = &xeon_sec_xlat;
break;
case NTB_TOPO_SEC:
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
dev_err(dev, "NTB Secondary config disabled\n");
return -EINVAL;
}
/* use half the spads for the peer */
ndev->spad_count >>= 1;
ndev->self_reg = &xeon_sec_reg;
ndev->peer_reg = &xeon_pri_reg;
ndev->xlat_reg = &xeon_pri_xlat;
break;
case NTB_TOPO_B2B_USD:
case NTB_TOPO_B2B_DSD:
ndev->self_reg = &xeon_pri_reg;
ndev->peer_reg = &xeon_b2b_reg;
ndev->xlat_reg = &xeon_sec_xlat;
if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
ndev->peer_reg = &xeon_pri_reg;
if (b2b_mw_idx < 0)
ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
else
ndev->b2b_idx = b2b_mw_idx;
if (ndev->b2b_idx >= ndev->mw_count) {
dev_dbg(dev,
"b2b_mw_idx %d invalid for mw_count %u\n",
b2b_mw_idx, ndev->mw_count);
return -EINVAL;
}
dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
b2b_mw_idx, ndev->b2b_idx);
} else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
dev_warn(dev, "Reduce doorbell count by 1\n");
ndev->db_count -= 1;
}
if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
rc = xeon_setup_b2b_mw(ndev,
&xeon_b2b_dsd_addr,
&xeon_b2b_usd_addr);
} else {
rc = xeon_setup_b2b_mw(ndev,
&xeon_b2b_usd_addr,
&xeon_b2b_dsd_addr);
}
if (rc)
return rc;
/* Enable Bus Master and Memory Space on the secondary side */
iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
ndev->self_mmio + XEON_SPCICMD_OFFSET);
break;
default:
return -EINVAL;
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
ndev->reg->db_iowrite(ndev->db_valid_mask,
ndev->self_mmio +
ndev->self_reg->db_mask);
return 0;
}
static int xeon_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev;
u8 ppd;
int rc, mem;
pdev = ndev->ntb.pdev;
switch (pdev->device) {
/* There is a Xeon hardware errata related to writes to SDOORBELL or
* B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
* which may hang the system. To workaround this use the second memory
* window to access the interrupt and scratch pad registers on the
* remote system.
*/
case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
break;
}
switch (pdev->device) {
/* There is a hardware errata related to accessing any register in
* SB01BASE in the presence of bidirectional traffic crossing the NTB.
*/
case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
break;
}
switch (pdev->device) {
/* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
* mirrored to the remote system. Shrink the number of bits by one,
* since bit 14 is the last bit.
*/
case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
break;
}
ndev->reg = &xeon_reg;
rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
if (rc)
return -EIO;
ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
if (ndev->ntb.topo != NTB_TOPO_SEC) {
ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
ppd, ndev->bar4_split);
} else {
/* This is a way for transparent BAR to figure out if we are
* doing split BAR or not. There is no way for the hw on the
* transparent side to know and set the PPD.
*/
mem = pci_select_bars(pdev, IORESOURCE_MEM);
ndev->bar4_split = hweight32(mem) ==
HSX_SPLIT_BAR_MW_COUNT + 1;
dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
mem, ndev->bar4_split);
}
rc = xeon_init_ntb(ndev);
if (rc)
return rc;
return xeon_init_isr(ndev);
}
static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
{
xeon_deinit_isr(ndev);
}
static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
{
int rc;
pci_set_drvdata(pdev, ndev);
rc = pci_enable_device(pdev);
if (rc)
goto err_pci_enable;
rc = pci_request_regions(pdev, NTB_NAME);
if (rc)
goto err_pci_regions;
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
rc = -EIO;
goto err_mmio;
}
ndev->peer_mmio = ndev->self_mmio;
ndev->peer_addr = pci_resource_start(pdev, 0);
return 0;
err_mmio:
err_dma_mask:
pci_release_regions(pdev);
err_pci_regions:
pci_disable_device(pdev);
err_pci_enable:
pci_set_drvdata(pdev, NULL);
return rc;
}
static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
pci_iounmap(pdev, ndev->peer_mmio);
pci_iounmap(pdev, ndev->self_mmio);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
struct pci_dev *pdev)
{
ndev->ntb.pdev = pdev;
ndev->ntb.topo = NTB_TOPO_NONE;
ndev->ntb.ops = &intel_ntb_ops;
ndev->b2b_off = 0;
ndev->b2b_idx = UINT_MAX;
ndev->bar4_split = 0;
ndev->mw_count = 0;
ndev->spad_count = 0;
ndev->db_count = 0;
ndev->db_vec_count = 0;
ndev->db_vec_shift = 0;
ndev->ntb_ctl = 0;
ndev->lnk_sta = 0;
ndev->db_valid_mask = 0;
ndev->db_link_mask = 0;
ndev->db_mask = 0;
spin_lock_init(&ndev->db_mask_lock);
}
static int intel_ntb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_ntb_dev *ndev;
int rc, node;
node = dev_to_node(&pdev->dev);
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
if (!ndev) {
rc = -ENOMEM;
goto err_ndev;
}
ndev_init_struct(ndev, pdev);
if (pdev_is_gen1(pdev)) {
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
rc = xeon_init_dev(ndev);
if (rc)
goto err_init_dev;
} else if (pdev_is_gen3(pdev)) {
ndev->ntb.ops = &intel_ntb3_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
} else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
ndev->ntb.ops = &intel_ntb4_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
rc = gen4_init_dev(ndev);
if (rc)
goto err_init_dev;
} else {
rc = -EINVAL;
goto err_init_pci;
}
ndev_reset_unsafe_flags(ndev);
ndev->reg->poll_link(ndev);
ndev_init_debugfs(ndev);
rc = ntb_register_device(&ndev->ntb);
if (rc)
goto err_register;
dev_info(&pdev->dev, "NTB device registered.\n");
return 0;
err_register:
ndev_deinit_debugfs(ndev);
if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
err_init_pci:
kfree(ndev);
err_ndev:
return rc;
}
static void intel_ntb_pci_remove(struct pci_dev *pdev)
{
struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
}
static const struct intel_ntb_reg xeon_reg = {
.poll_link = xeon_poll_link,
.link_is_up = xeon_link_is_up,
.db_ioread = xeon_db_ioread,
.db_iowrite = xeon_db_iowrite,
.db_size = sizeof(u32),
.ntb_ctl = XEON_NTBCNTL_OFFSET,
.mw_bar = {2, 4, 5},
};
static const struct intel_ntb_alt_reg xeon_pri_reg = {
.db_bell = XEON_PDOORBELL_OFFSET,
.db_mask = XEON_PDBMSK_OFFSET,
.spad = XEON_SPAD_OFFSET,
};
static const struct intel_ntb_alt_reg xeon_sec_reg = {
.db_bell = XEON_SDOORBELL_OFFSET,
.db_mask = XEON_SDBMSK_OFFSET,
/* second half of the scratchpads */
.spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
};
static const struct intel_ntb_alt_reg xeon_b2b_reg = {
.db_bell = XEON_B2B_DOORBELL_OFFSET,
.spad = XEON_B2B_SPAD_OFFSET,
};
static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
/* Note: no primary .bar0_base visible to the secondary side.
*
* The secondary side cannot get the base address stored in primary
* bars. The base address is necessary to set the limit register to
* any value other than zero, or unlimited.
*
* WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
* window by setting the limit equal to base, nor can it limit the size
* of the memory window by setting the limit to base + size.
*/
.bar2_limit = XEON_PBAR23LMT_OFFSET,
.bar2_xlat = XEON_PBAR23XLAT_OFFSET,
};
static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
.bar0_base = XEON_SBAR0BASE_OFFSET,
.bar2_limit = XEON_SBAR23LMT_OFFSET,
.bar2_xlat = XEON_SBAR23XLAT_OFFSET,
};
struct intel_b2b_addr xeon_b2b_usd_addr = {
.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
};
struct intel_b2b_addr xeon_b2b_dsd_addr = {
.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
};
/* operations for primary side of local ntb */
static const struct ntb_dev_ops intel_ntb_ops = {
.mw_count = intel_ntb_mw_count,
.mw_get_align = intel_ntb_mw_get_align,
.mw_set_trans = intel_ntb_mw_set_trans,
.peer_mw_count = intel_ntb_peer_mw_count,
.peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
.link_is_up = intel_ntb_link_is_up,
.link_enable = intel_ntb_link_enable,
.link_disable = intel_ntb_link_disable,
.db_is_unsafe = intel_ntb_db_is_unsafe,
.db_valid_mask = intel_ntb_db_valid_mask,
.db_vector_count = intel_ntb_db_vector_count,
.db_vector_mask = intel_ntb_db_vector_mask,
.db_read = intel_ntb_db_read,
.db_clear = intel_ntb_db_clear,
.db_set_mask = intel_ntb_db_set_mask,
.db_clear_mask = intel_ntb_db_clear_mask,
.peer_db_addr = intel_ntb_peer_db_addr,
.peer_db_set = intel_ntb_peer_db_set,
.spad_is_unsafe = intel_ntb_spad_is_unsafe,
.spad_count = intel_ntb_spad_count,
.spad_read = intel_ntb_spad_read,
.spad_write = intel_ntb_spad_write,
.peer_spad_addr = intel_ntb_peer_spad_addr,
.peer_spad_read = intel_ntb_peer_spad_read,
.peer_spad_write = intel_ntb_peer_spad_write,
};
static const struct file_operations intel_ntb_debugfs_info = {
.owner = THIS_MODULE,
.open = simple_open,
.read = ndev_debugfs_read,
};
static const struct pci_device_id intel_ntb_pci_tbl[] = {
/* GEN1 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
/* GEN3 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
/* GEN4 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
/* GEN5 PCIe */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
static struct pci_driver intel_ntb_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = intel_ntb_pci_tbl,
.probe = intel_ntb_pci_probe,
.remove = intel_ntb_pci_remove,
};
static int __init intel_ntb_pci_driver_init(void)
{
int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = pci_register_driver(&intel_ntb_pci_driver);
if (ret)
debugfs_remove_recursive(debugfs_dir);
return ret;
}
module_init(intel_ntb_pci_driver_init);
static void __exit intel_ntb_pci_driver_exit(void)
{
pci_unregister_driver(&intel_ntb_pci_driver);
debugfs_remove_recursive(debugfs_dir);
}
module_exit(intel_ntb_pci_driver_exit);
| linux-master | drivers/ntb/hw/intel/ntb_hw_gen1.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ntb.h>
#include <linux/log2.h>
#include "ntb_hw_intel.h"
#include "ntb_hw_gen1.h"
#include "ntb_hw_gen3.h"
#include "ntb_hw_gen4.h"
static int gen4_poll_link(struct intel_ntb_dev *ndev);
static int gen4_link_is_up(struct intel_ntb_dev *ndev);
static const struct intel_ntb_reg gen4_reg = {
.poll_link = gen4_poll_link,
.link_is_up = gen4_link_is_up,
.db_ioread = gen3_db_ioread,
.db_iowrite = gen3_db_iowrite,
.db_size = sizeof(u32),
.ntb_ctl = GEN4_NTBCNTL_OFFSET,
.mw_bar = {2, 4},
};
static const struct intel_ntb_alt_reg gen4_pri_reg = {
.db_clear = GEN4_IM_INT_STATUS_OFFSET,
.db_mask = GEN4_IM_INT_DISABLE_OFFSET,
.spad = GEN4_IM_SPAD_OFFSET,
};
static const struct intel_ntb_xlat_reg gen4_sec_xlat = {
.bar2_limit = GEN4_IM23XLMT_OFFSET,
.bar2_xlat = GEN4_IM23XBASE_OFFSET,
.bar2_idx = GEN4_IM23XBASEIDX_OFFSET,
};
static const struct intel_ntb_alt_reg gen4_b2b_reg = {
.db_bell = GEN4_IM_DOORBELL_OFFSET,
.spad = GEN4_EM_SPAD_OFFSET,
};
static int gen4_poll_link(struct intel_ntb_dev *ndev)
{
u16 reg_val;
/*
* We need to write to DLLSCS bit in the SLOTSTS before we
* can clear the hardware link interrupt on ICX NTB.
*/
iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS);
ndev->reg->db_iowrite(ndev->db_link_mask,
ndev->self_mmio +
ndev->self_reg->db_clear);
reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET);
if (reg_val == ndev->lnk_sta)
return 0;
ndev->lnk_sta = reg_val;
return 1;
}
static int gen4_link_is_up(struct intel_ntb_dev *ndev)
{
return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
}
static int gen4_init_isr(struct intel_ntb_dev *ndev)
{
int i;
/*
* The MSIX vectors and the interrupt status bits are not lined up
* on Gen3 (Skylake) and Gen4. By default the link status bit is bit
* 32, however it is by default MSIX vector0. We need to fixup to
* line them up. The vectors at reset is 1-32,0. We need to reprogram
* to 0-32.
*/
for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++)
iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i);
return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT,
GEN4_DB_MSIX_VECTOR_COUNT,
GEN4_DB_MSIX_VECTOR_SHIFT,
GEN4_DB_TOTAL_SHIFT);
}
static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev,
const struct intel_b2b_addr *addr,
const struct intel_b2b_addr *peer_addr)
{
struct pci_dev *pdev;
void __iomem *mmio;
phys_addr_t bar_addr;
pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
/* setup incoming bar limits == base addrs (zero length windows) */
bar_addr = addr->bar2_addr64;
iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET);
bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr);
bar_addr = addr->bar4_addr64;
iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET);
bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr);
/* zero incoming translation addrs */
iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET);
iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET);
ndev->peer_mmio = ndev->self_mmio;
return 0;
}
static int gen4_init_ntb(struct intel_ntb_dev *ndev)
{
int rc;
ndev->mw_count = XEON_MW_COUNT;
ndev->spad_count = GEN4_SPAD_COUNT;
ndev->db_count = GEN4_DB_COUNT;
ndev->db_link_mask = GEN4_DB_LINK_BIT;
ndev->self_reg = &gen4_pri_reg;
ndev->xlat_reg = &gen4_sec_xlat;
ndev->peer_reg = &gen4_b2b_reg;
if (ndev->ntb.topo == NTB_TOPO_B2B_USD)
rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr,
&xeon_b2b_usd_addr);
else
rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr,
&xeon_b2b_dsd_addr);
if (rc)
return rc;
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
ndev->reg->db_iowrite(ndev->db_valid_mask,
ndev->self_mmio +
ndev->self_reg->db_mask);
return 0;
}
static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
{
switch (ppd & GEN4_PPD_TOPO_MASK) {
case GEN4_PPD_TOPO_B2B_USD:
return NTB_TOPO_B2B_USD;
case GEN4_PPD_TOPO_B2B_DSD:
return NTB_TOPO_B2B_DSD;
}
return NTB_TOPO_NONE;
}
static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
{
switch (ppd & SPR_PPD_TOPO_MASK) {
case SPR_PPD_TOPO_B2B_USD:
return NTB_TOPO_B2B_USD;
case SPR_PPD_TOPO_B2B_DSD:
return NTB_TOPO_B2B_DSD;
}
return NTB_TOPO_NONE;
}
int gen4_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
u32 ppd1/*, ppd0*/;
u16 lnkctl;
int rc;
ndev->reg = &gen4_reg;
if (pdev_is_ICX(pdev)) {
ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
ndev->hwerr_flags |= NTB_HWERR_LTR_BAD;
}
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
if (pdev_is_ICX(pdev))
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
rc = gen4_init_ntb(ndev);
if (rc)
return rc;
/* init link setup */
lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
return gen4_init_isr(ndev);
}
ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
void __iomem *mmio;
char *buf;
size_t buf_size;
ssize_t ret, off;
union { u64 v64; u32 v32; u16 v16; } u;
ndev = filp->private_data;
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
off = 0;
off += scnprintf(buf + off, buf_size - off,
"NTB Device Information:\n");
off += scnprintf(buf + off, buf_size - off,
"Connection Topology -\t%s\n",
ntb_topo_string(ndev->ntb.topo));
off += scnprintf(buf + off, buf_size - off,
"NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
off += scnprintf(buf + off, buf_size - off,
"LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta);
if (!ndev->reg->link_is_up(ndev))
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tDown\n");
else {
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tUp\n");
off += scnprintf(buf + off, buf_size - off,
"Link Speed -\t\tPCI-E Gen %u\n",
NTB_LNK_STA_SPEED(ndev->lnk_sta));
off += scnprintf(buf + off, buf_size - off,
"Link Width -\t\tx%u\n",
NTB_LNK_STA_WIDTH(ndev->lnk_sta));
}
off += scnprintf(buf + off, buf_size - off,
"Memory Window Count -\t%u\n", ndev->mw_count);
off += scnprintf(buf + off, buf_size - off,
"Scratchpad Count -\t%u\n", ndev->spad_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Count -\t%u\n", ndev->db_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask -\t\t%#llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Incoming XLAT:\n");
u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IM23XBASE -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IM45XBASE -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IM23XLMT -\t\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IM45XLMT -\t\t\t%#018llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Statistics:\n");
off += scnprintf(buf + off, buf_size - off,
"\nNTB Hardware Errors:\n");
if (!pci_read_config_word(ndev->ntb.pdev,
GEN4_DEVSTS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"DEVSTS -\t\t%#06x\n", u.v16);
u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"LNKSTS -\t\t%#06x\n", u.v16);
if (!pci_read_config_dword(ndev->ntb.pdev,
GEN4_UNCERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"UNCERRSTS -\t\t%#06x\n", u.v32);
if (!pci_read_config_dword(ndev->ntb.pdev,
GEN4_CORERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"CORERRSTS -\t\t%#06x\n", u.v32);
ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
kfree(buf);
return ret;
}
static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
unsigned long xlat_reg, limit_reg, idx_reg;
unsigned short base_idx, reg_val16;
resource_size_t bar_size, mw_size;
void __iomem *mmio;
u64 base, limit, reg_val;
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
bar_size = pci_resource_len(ndev->ntb.pdev, bar);
if (idx == ndev->b2b_idx)
mw_size = bar_size - ndev->b2b_off;
else
mw_size = bar_size;
if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
/* hardware requires that addr is aligned to bar size */
if (addr & (bar_size - 1))
return -EINVAL;
} else {
if (addr & (PAGE_SIZE - 1))
return -EINVAL;
}
/* make sure the range fits in the usable mw size */
if (size > mw_size)
return -EINVAL;
mmio = ndev->self_mmio;
xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
base = pci_resource_start(ndev->ntb.pdev, bar);
/* Set the limit if supported, if size is not mw_size */
if (limit_reg && size != mw_size) {
limit = base + size;
base_idx = __ilog2_u64(size);
} else {
limit = base + mw_size;
base_idx = __ilog2_u64(mw_size);
}
/* set and verify setting the translation address */
iowrite64(addr, mmio + xlat_reg);
reg_val = ioread64(mmio + xlat_reg);
if (reg_val != addr) {
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val);
/* set and verify setting the limit */
iowrite64(limit, mmio + limit_reg);
reg_val = ioread64(mmio + limit_reg);
if (reg_val != limit) {
iowrite64(base, mmio + limit_reg);
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val);
if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2);
iowrite16(base_idx, mmio + idx_reg);
reg_val16 = ioread16(mmio + idx_reg);
if (reg_val16 != base_idx) {
iowrite64(base, mmio + limit_reg);
iowrite64(0, mmio + xlat_reg);
iowrite16(0, mmio + idx_reg);
return -EIO;
}
dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16);
}
return 0;
}
static int intel_ntb4_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed, enum ntb_width max_width)
{
struct intel_ntb_dev *ndev;
u32 ntb_ctl, ppd0;
u16 lnkctl;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n",
max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO)
dev_dbg(&ntb->pdev->dev,
"ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO)
dev_dbg(&ntb->pdev->dev,
"ignoring max_width %d\n", max_width);
if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD)) {
u32 ltr;
/* Setup active snoop LTR values */
ltr = NTB_LTR_ACTIVE_REQMNT | NTB_LTR_ACTIVE_VAL | NTB_LTR_ACTIVE_LATSCALE;
/* Setup active non-snoop values */
ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
iowrite32(ltr, ndev->self_mmio + GEN4_LTR_ACTIVE_OFFSET);
/* Setup idle snoop LTR values */
ltr = NTB_LTR_IDLE_VAL | NTB_LTR_IDLE_LATSCALE | NTB_LTR_IDLE_REQMNT;
/* Setup idle non-snoop values */
ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
iowrite32(ltr, ndev->self_mmio + GEN4_LTR_IDLE_OFFSET);
/* setup PCIe LTR to active */
iowrite8(NTB_LTR_SWSEL_ACTIVE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
}
ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE;
iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
/* start link training in PPD0 */
ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
ppd0 |= GEN4_PPD_LINKTRN;
iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET);
/* make sure link training has started */
ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
if (!(ppd0 & GEN4_PPD_LINKTRN)) {
dev_warn(&ntb->pdev->dev, "Link is not training\n");
return -ENXIO;
}
ndev->dev_up = 1;
return 0;
}
static int intel_ntb4_link_disable(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev;
u32 ntb_cntl;
u16 lnkctl;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
dev_dbg(&ntb->pdev->dev, "Disabling link\n");
/* clear the snoop bits */
ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP);
ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP);
iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
/* set LTR to idle */
if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD))
iowrite8(NTB_LTR_SWSEL_IDLE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
ndev->dev_up = 0;
return 0;
}
static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
resource_size_t bar_size, mw_size;
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
bar_size = pci_resource_len(ndev->ntb.pdev, bar);
if (idx == ndev->b2b_idx)
mw_size = bar_size - ndev->b2b_off;
else
mw_size = bar_size;
if (addr_align) {
if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN)
*addr_align = pci_resource_len(ndev->ntb.pdev, bar);
else
*addr_align = PAGE_SIZE;
}
if (size_align)
*size_align = 1;
if (size_max)
*size_max = mw_size;
return 0;
}
const struct ntb_dev_ops intel_ntb4_ops = {
.mw_count = intel_ntb_mw_count,
.mw_get_align = intel_ntb4_mw_get_align,
.mw_set_trans = intel_ntb4_mw_set_trans,
.peer_mw_count = intel_ntb_peer_mw_count,
.peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
.link_is_up = intel_ntb_link_is_up,
.link_enable = intel_ntb4_link_enable,
.link_disable = intel_ntb4_link_disable,
.db_valid_mask = intel_ntb_db_valid_mask,
.db_vector_count = intel_ntb_db_vector_count,
.db_vector_mask = intel_ntb_db_vector_mask,
.db_read = intel_ntb3_db_read,
.db_clear = intel_ntb3_db_clear,
.db_set_mask = intel_ntb_db_set_mask,
.db_clear_mask = intel_ntb_db_clear_mask,
.peer_db_addr = intel_ntb3_peer_db_addr,
.peer_db_set = intel_ntb3_peer_db_set,
.spad_is_unsafe = intel_ntb_spad_is_unsafe,
.spad_count = intel_ntb_spad_count,
.spad_read = intel_ntb_spad_read,
.spad_write = intel_ntb_spad_write,
.peer_spad_addr = intel_ntb_peer_spad_addr,
.peer_spad_read = intel_ntb_peer_spad_read,
.peer_spad_write = intel_ntb_peer_spad_write,
};
| linux-master | drivers/ntb/hw/intel/ntb_hw_gen4.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Intel PCIe GEN3 NTB Linux driver
*
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ntb.h>
#include "ntb_hw_intel.h"
#include "ntb_hw_gen1.h"
#include "ntb_hw_gen3.h"
static int gen3_poll_link(struct intel_ntb_dev *ndev);
static const struct intel_ntb_reg gen3_reg = {
.poll_link = gen3_poll_link,
.link_is_up = xeon_link_is_up,
.db_ioread = gen3_db_ioread,
.db_iowrite = gen3_db_iowrite,
.db_size = sizeof(u32),
.ntb_ctl = GEN3_NTBCNTL_OFFSET,
.mw_bar = {2, 4},
};
static const struct intel_ntb_alt_reg gen3_pri_reg = {
.db_bell = GEN3_EM_DOORBELL_OFFSET,
.db_clear = GEN3_IM_INT_STATUS_OFFSET,
.db_mask = GEN3_IM_INT_DISABLE_OFFSET,
.spad = GEN3_IM_SPAD_OFFSET,
};
static const struct intel_ntb_alt_reg gen3_b2b_reg = {
.db_bell = GEN3_IM_DOORBELL_OFFSET,
.db_clear = GEN3_EM_INT_STATUS_OFFSET,
.db_mask = GEN3_EM_INT_DISABLE_OFFSET,
.spad = GEN3_B2B_SPAD_OFFSET,
};
static const struct intel_ntb_xlat_reg gen3_sec_xlat = {
/* .bar0_base = GEN3_EMBAR0_OFFSET, */
.bar2_limit = GEN3_IMBAR1XLMT_OFFSET,
.bar2_xlat = GEN3_IMBAR1XBASE_OFFSET,
};
static int gen3_poll_link(struct intel_ntb_dev *ndev)
{
u16 reg_val;
int rc;
ndev->reg->db_iowrite(ndev->db_link_mask,
ndev->self_mmio +
ndev->self_reg->db_clear);
rc = pci_read_config_word(ndev->ntb.pdev,
GEN3_LINK_STATUS_OFFSET, ®_val);
if (rc)
return 0;
if (reg_val == ndev->lnk_sta)
return 0;
ndev->lnk_sta = reg_val;
return 1;
}
static int gen3_init_isr(struct intel_ntb_dev *ndev)
{
int i;
/*
* The MSIX vectors and the interrupt status bits are not lined up
* on Skylake. By default the link status bit is bit 32, however it
* is by default MSIX vector0. We need to fixup to line them up.
* The vectors at reset is 1-32,0. We need to reprogram to 0-32.
*/
for (i = 0; i < GEN3_DB_MSIX_VECTOR_COUNT; i++)
iowrite8(i, ndev->self_mmio + GEN3_INTVEC_OFFSET + i);
/* move link status down one as workaround */
if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
iowrite8(GEN3_DB_MSIX_VECTOR_COUNT - 2,
ndev->self_mmio + GEN3_INTVEC_OFFSET +
(GEN3_DB_MSIX_VECTOR_COUNT - 1));
}
return ndev_init_isr(ndev, GEN3_DB_MSIX_VECTOR_COUNT,
GEN3_DB_MSIX_VECTOR_COUNT,
GEN3_DB_MSIX_VECTOR_SHIFT,
GEN3_DB_TOTAL_SHIFT);
}
static int gen3_setup_b2b_mw(struct intel_ntb_dev *ndev,
const struct intel_b2b_addr *addr,
const struct intel_b2b_addr *peer_addr)
{
struct pci_dev *pdev;
void __iomem *mmio;
phys_addr_t bar_addr;
pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
/* setup incoming bar limits == base addrs (zero length windows) */
bar_addr = addr->bar2_addr64;
iowrite64(bar_addr, mmio + GEN3_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
bar_addr = addr->bar4_addr64;
iowrite64(bar_addr, mmio + GEN3_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
/* zero incoming translation addrs */
iowrite64(0, mmio + GEN3_IMBAR1XBASE_OFFSET);
iowrite64(0, mmio + GEN3_IMBAR2XBASE_OFFSET);
ndev->peer_mmio = ndev->self_mmio;
return 0;
}
static int gen3_init_ntb(struct intel_ntb_dev *ndev)
{
int rc;
ndev->mw_count = XEON_MW_COUNT;
ndev->spad_count = GEN3_SPAD_COUNT;
ndev->db_count = GEN3_DB_COUNT;
ndev->db_link_mask = GEN3_DB_LINK_BIT;
/* DB fixup for using 31 right now */
if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
ndev->db_link_mask |= BIT_ULL(31);
switch (ndev->ntb.topo) {
case NTB_TOPO_B2B_USD:
case NTB_TOPO_B2B_DSD:
ndev->self_reg = &gen3_pri_reg;
ndev->peer_reg = &gen3_b2b_reg;
ndev->xlat_reg = &gen3_sec_xlat;
if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
rc = gen3_setup_b2b_mw(ndev,
&xeon_b2b_dsd_addr,
&xeon_b2b_usd_addr);
} else {
rc = gen3_setup_b2b_mw(ndev,
&xeon_b2b_usd_addr,
&xeon_b2b_dsd_addr);
}
if (rc)
return rc;
/* Enable Bus Master and Memory Space on the secondary side */
iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
ndev->self_mmio + GEN3_SPCICMD_OFFSET);
break;
default:
return -EINVAL;
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
ndev->reg->db_iowrite(ndev->db_valid_mask,
ndev->self_mmio +
ndev->self_reg->db_mask);
return 0;
}
int gen3_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev;
u8 ppd;
int rc;
pdev = ndev->ntb.pdev;
ndev->reg = &gen3_reg;
rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
if (rc)
return -EIO;
ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
rc = gen3_init_ntb(ndev);
if (rc)
return rc;
return gen3_init_isr(ndev);
}
ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
void __iomem *mmio;
char *buf;
size_t buf_size;
ssize_t ret, off;
union { u64 v64; u32 v32; u16 v16; } u;
ndev = filp->private_data;
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
off = 0;
off += scnprintf(buf + off, buf_size - off,
"NTB Device Information:\n");
off += scnprintf(buf + off, buf_size - off,
"Connection Topology -\t%s\n",
ntb_topo_string(ndev->ntb.topo));
off += scnprintf(buf + off, buf_size - off,
"NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
off += scnprintf(buf + off, buf_size - off,
"LNK STA -\t\t%#06x\n", ndev->lnk_sta);
if (!ndev->reg->link_is_up(ndev))
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tDown\n");
else {
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tUp\n");
off += scnprintf(buf + off, buf_size - off,
"Link Speed -\t\tPCI-E Gen %u\n",
NTB_LNK_STA_SPEED(ndev->lnk_sta));
off += scnprintf(buf + off, buf_size - off,
"Link Width -\t\tx%u\n",
NTB_LNK_STA_WIDTH(ndev->lnk_sta));
}
off += scnprintf(buf + off, buf_size - off,
"Memory Window Count -\t%u\n", ndev->mw_count);
off += scnprintf(buf + off, buf_size - off,
"Scratchpad Count -\t%u\n", ndev->spad_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Count -\t%u\n", ndev->db_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask -\t\t%#llx\n", u.v64);
u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Bell -\t\t%#llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Incoming XLAT:\n");
u.v64 = ioread64(mmio + GEN3_IMBAR1XBASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IMBAR1XBASE -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_IMBAR2XBASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IMBAR2XBASE -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
if (ntb_topo_is_b2b(ndev->ntb.topo)) {
off += scnprintf(buf + off, buf_size - off,
"\nNTB Outgoing B2B XLAT:\n");
u.v64 = ioread64(mmio + GEN3_EMBAR1XBASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR1XBASE -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_EMBAR2XBASE_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR2XBASE -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_EMBAR1XLMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR1XLMT -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_EMBAR2XLMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR2XLMT -\t\t%#018llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Secondary BAR:\n");
u.v64 = ioread64(mmio + GEN3_EMBAR0_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR0 -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_EMBAR1_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR1 -\t\t%#018llx\n", u.v64);
u.v64 = ioread64(mmio + GEN3_EMBAR2_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"EMBAR2 -\t\t%#018llx\n", u.v64);
}
off += scnprintf(buf + off, buf_size - off,
"\nNTB Statistics:\n");
u.v16 = ioread16(mmio + GEN3_USMEMMISS_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"Upstream Memory Miss -\t%u\n", u.v16);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Hardware Errors:\n");
if (!pci_read_config_word(ndev->ntb.pdev,
GEN3_DEVSTS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"DEVSTS -\t\t%#06x\n", u.v16);
if (!pci_read_config_word(ndev->ntb.pdev,
GEN3_LINK_STATUS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"LNKSTS -\t\t%#06x\n", u.v16);
if (!pci_read_config_dword(ndev->ntb.pdev,
GEN3_UNCERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"UNCERRSTS -\t\t%#06x\n", u.v32);
if (!pci_read_config_dword(ndev->ntb.pdev,
GEN3_CORERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"CORERRSTS -\t\t%#06x\n", u.v32);
ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
kfree(buf);
return ret;
}
int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed,
enum ntb_width max_width)
{
struct intel_ntb_dev *ndev;
u32 ntb_ctl;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n",
max_speed, max_width);
if (max_speed != NTB_SPEED_AUTO)
dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
if (max_width != NTB_WIDTH_AUTO)
dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
return 0;
}
static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
unsigned long xlat_reg, limit_reg;
resource_size_t bar_size, mw_size;
void __iomem *mmio;
u64 base, limit, reg_val;
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (idx >= ndev->b2b_idx && !ndev->b2b_off)
idx += 1;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
bar_size = pci_resource_len(ndev->ntb.pdev, bar);
if (idx == ndev->b2b_idx)
mw_size = bar_size - ndev->b2b_off;
else
mw_size = bar_size;
/* hardware requires that addr is aligned to bar size */
if (addr & (bar_size - 1))
return -EINVAL;
/* make sure the range fits in the usable mw size */
if (size > mw_size)
return -EINVAL;
mmio = ndev->self_mmio;
xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
base = pci_resource_start(ndev->ntb.pdev, bar);
/* Set the limit if supported, if size is not mw_size */
if (limit_reg && size != mw_size)
limit = base + size;
else
limit = base + mw_size;
/* set and verify setting the translation address */
iowrite64(addr, mmio + xlat_reg);
reg_val = ioread64(mmio + xlat_reg);
if (reg_val != addr) {
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
/* set and verify setting the limit */
iowrite64(limit, mmio + limit_reg);
reg_val = ioread64(mmio + limit_reg);
if (reg_val != limit) {
iowrite64(base, mmio + limit_reg);
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
/* setup the EP */
limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
base = ioread64(mmio + GEN3_EMBAR1_OFFSET + (8 * idx));
base &= ~0xf;
if (limit_reg && size != mw_size)
limit = base + size;
else
limit = base + mw_size;
/* set and verify setting the limit */
iowrite64(limit, mmio + limit_reg);
reg_val = ioread64(mmio + limit_reg);
if (reg_val != limit) {
iowrite64(base, mmio + limit_reg);
iowrite64(0, mmio + xlat_reg);
return -EIO;
}
dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
return 0;
}
int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size,
u64 *db_data, int db_bit)
{
phys_addr_t db_addr_base;
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
if (unlikely(db_bit >= BITS_PER_LONG_LONG))
return -EINVAL;
if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask))
return -EINVAL;
ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr,
ndev->peer_reg->db_bell);
if (db_addr) {
*db_addr = db_addr_base + (db_bit * 4);
dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n",
*db_addr, db_bit);
}
if (db_data) {
*db_data = 1;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n",
*db_data, db_bit);
}
return 0;
}
int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
int bit;
if (db_bits & ~ndev->db_valid_mask)
return -EINVAL;
while (db_bits) {
bit = __ffs(db_bits);
iowrite32(1, ndev->peer_mmio +
ndev->peer_reg->db_bell + (bit * 4));
db_bits &= db_bits - 1;
}
return 0;
}
u64 intel_ntb3_db_read(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_read(ndev,
ndev->self_mmio +
ndev->self_reg->db_clear);
}
int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
return ndev_db_write(ndev, db_bits,
ndev->self_mmio +
ndev->self_reg->db_clear);
}
const struct ntb_dev_ops intel_ntb3_ops = {
.mw_count = intel_ntb_mw_count,
.mw_get_align = intel_ntb_mw_get_align,
.mw_set_trans = intel_ntb3_mw_set_trans,
.peer_mw_count = intel_ntb_peer_mw_count,
.peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
.link_is_up = intel_ntb_link_is_up,
.link_enable = intel_ntb3_link_enable,
.link_disable = intel_ntb_link_disable,
.db_valid_mask = intel_ntb_db_valid_mask,
.db_vector_count = intel_ntb_db_vector_count,
.db_vector_mask = intel_ntb_db_vector_mask,
.db_read = intel_ntb3_db_read,
.db_clear = intel_ntb3_db_clear,
.db_set_mask = intel_ntb_db_set_mask,
.db_clear_mask = intel_ntb_db_clear_mask,
.peer_db_addr = intel_ntb3_peer_db_addr,
.peer_db_set = intel_ntb3_peer_db_set,
.spad_is_unsafe = intel_ntb_spad_is_unsafe,
.spad_count = intel_ntb_spad_count,
.spad_read = intel_ntb_spad_read,
.spad_write = intel_ntb_spad_write,
.peer_spad_addr = intel_ntb_peer_spad_addr,
.peer_spad_read = intel_ntb_peer_spad_read,
.peer_spad_write = intel_ntb_peer_spad_write,
};
| linux-master | drivers/ntb/hw/intel/ntb_hw_gen3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Microsemi Switchtec(tm) PCIe Management Driver
* Copyright (c) 2017, Microsemi Corporation
*/
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/ntb.h>
#include <linux/pci.h>
#include <linux/switchtec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Microsemi Corporation");
static ulong max_mw_size = SZ_2M;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size,
"Max memory window size reported to the upper layer");
static bool use_lut_mws;
module_param(use_lut_mws, bool, 0644);
MODULE_PARM_DESC(use_lut_mws,
"Enable the use of the LUT based memory windows");
#define SWITCHTEC_NTB_MAGIC 0x45CC0001
#define MAX_MWS 128
struct shared_mw {
u32 magic;
u32 link_sta;
u32 partition_id;
u64 mw_sizes[MAX_MWS];
u32 spad[128];
};
#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
#define LUT_SIZE SZ_64K
struct switchtec_ntb {
struct ntb_dev ntb;
struct switchtec_dev *stdev;
int self_partition;
int peer_partition;
int doorbell_irq;
int message_irq;
struct ntb_info_regs __iomem *mmio_ntb;
struct ntb_ctrl_regs __iomem *mmio_ctrl;
struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
void __iomem *mmio_xlink_win;
struct shared_mw *self_shared;
struct shared_mw __iomem *peer_shared;
dma_addr_t self_shared_dma;
u64 db_mask;
u64 db_valid_mask;
int db_shift;
int db_peer_shift;
/* synchronize rmw access of db_mask and hw reg */
spinlock_t db_mask_lock;
int nr_direct_mw;
int nr_lut_mw;
int nr_rsvd_luts;
int direct_mw_to_bar[MAX_DIRECT_MW];
int peer_nr_direct_mw;
int peer_nr_lut_mw;
int peer_direct_mw_to_bar[MAX_DIRECT_MW];
bool link_is_up;
enum ntb_speed link_speed;
enum ntb_width link_width;
struct work_struct check_link_status_work;
bool link_force_down;
};
static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
{
return container_of(ntb, struct switchtec_ntb, ntb);
}
static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *ctl,
u32 op, int wait_status)
{
static const char * const op_text[] = {
[NTB_CTRL_PART_OP_LOCK] = "lock",
[NTB_CTRL_PART_OP_CFG] = "configure",
[NTB_CTRL_PART_OP_RESET] = "reset",
};
int i;
u32 ps;
int status;
switch (op) {
case NTB_CTRL_PART_OP_LOCK:
status = NTB_CTRL_PART_STATUS_LOCKING;
break;
case NTB_CTRL_PART_OP_CFG:
status = NTB_CTRL_PART_STATUS_CONFIGURING;
break;
case NTB_CTRL_PART_OP_RESET:
status = NTB_CTRL_PART_STATUS_RESETTING;
break;
default:
return -EINVAL;
}
iowrite32(op, &ctl->partition_op);
for (i = 0; i < 1000; i++) {
if (msleep_interruptible(50) != 0) {
iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
return -EINTR;
}
ps = ioread32(&ctl->partition_status) & 0xFFFF;
if (ps != status)
break;
}
if (ps == wait_status)
return 0;
if (ps == status) {
dev_err(&sndev->stdev->dev,
"Timed out while performing %s (%d). (%08x)\n",
op_text[op], op,
ioread32(&ctl->partition_status));
return -ETIMEDOUT;
}
return -EIO;
}
static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
u32 val)
{
if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
return -EINVAL;
iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
return 0;
}
static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int nr_direct_mw = sndev->peer_nr_direct_mw;
int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (!use_lut_mws)
nr_lut_mw = 0;
return nr_direct_mw + nr_lut_mw;
}
static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
{
return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
}
static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
{
return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
}
static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
int widx, resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int lut;
resource_size_t size;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
lut = widx >= sndev->peer_nr_direct_mw;
size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
if (size == 0)
return -EINVAL;
if (addr_align)
*addr_align = lut ? size : SZ_4K;
if (size_align)
*size_align = lut ? size : SZ_4K;
if (size_max)
*size_max = size;
return 0;
}
static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
int bar = sndev->peer_direct_mw_to_bar[idx];
u32 ctl_val;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(0, &ctl->bar_entry[bar].win_size);
iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
}
static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
}
static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
dma_addr_t addr, resource_size_t size)
{
int xlate_pos = ilog2(size);
int bar = sndev->peer_direct_mw_to_bar[idx];
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
u32 ctl_val;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
&ctl->bar_entry[bar].win_size);
iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->self_partition | addr,
&ctl->bar_entry[bar].xlate_addr);
}
static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
dma_addr_t addr, resource_size_t size)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
&ctl->lut_entry[peer_lut_index(sndev, idx)]);
}
static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
dma_addr_t addr, resource_size_t size)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
int xlate_pos = ilog2(size);
int nr_direct_mw = sndev->peer_nr_direct_mw;
int rc;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
widx, pidx, &addr, &size);
if (widx >= switchtec_ntb_mw_count(ntb, pidx))
return -EINVAL;
if (size != 0 && xlate_pos < 12)
return -EINVAL;
if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
/*
* In certain circumstances we can get a buffer that is
* not aligned to its size. (Most of the time
* dma_alloc_coherent ensures this). This can happen when
* using large buffers allocated by the CMA
* (see CMA_CONFIG_ALIGNMENT)
*/
dev_err(&sndev->stdev->dev,
"ERROR: Memory window address is not aligned to its size!\n");
return -EINVAL;
}
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
if (size == 0) {
if (widx < nr_direct_mw)
switchtec_ntb_mw_clr_direct(sndev, widx);
else
switchtec_ntb_mw_clr_lut(sndev, widx);
} else {
if (widx < nr_direct_mw)
switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
else
switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
}
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc == -EIO) {
dev_err(&sndev->stdev->dev,
"Hardware reported an error configuring mw %d: %08x\n",
widx, ioread32(&ctl->bar_error));
if (widx < nr_direct_mw)
switchtec_ntb_mw_clr_direct(sndev, widx);
else
switchtec_ntb_mw_clr_lut(sndev, widx);
switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
}
return rc;
}
static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
}
static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
int idx, phys_addr_t *base,
resource_size_t *size)
{
int bar = sndev->direct_mw_to_bar[idx];
size_t offset = 0;
if (bar < 0)
return -EINVAL;
if (idx == 0) {
/*
* This is the direct BAR shared with the LUTs
* which means the actual window will be offset
* by the size of all the LUT entries.
*/
offset = LUT_SIZE * sndev->nr_lut_mw;
}
if (base)
*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
if (size) {
*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
if (offset && *size > offset)
*size = offset;
if (*size > max_mw_size)
*size = max_mw_size;
}
return 0;
}
static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
int idx, phys_addr_t *base,
resource_size_t *size)
{
int bar = sndev->direct_mw_to_bar[0];
int offset;
offset = LUT_SIZE * lut_index(sndev, idx);
if (base)
*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
if (size)
*size = LUT_SIZE;
return 0;
}
static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
phys_addr_t *base,
resource_size_t *size)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (idx < sndev->nr_direct_mw)
return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
else if (idx < switchtec_ntb_peer_mw_count(ntb))
return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
else
return -EINVAL;
}
static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
int partition,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct switchtec_dev *stdev = sndev->stdev;
struct part_cfg_regs __iomem *part_cfg =
&stdev->mmio_part_cfg_all[partition];
u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF;
u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
if (speed)
*speed = (linksta >> 16) & 0xF;
if (width)
*width = (linksta >> 20) & 0x3F;
}
static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
{
enum ntb_speed self_speed, peer_speed;
enum ntb_width self_width, peer_width;
if (!sndev->link_is_up) {
sndev->link_speed = NTB_SPEED_NONE;
sndev->link_width = NTB_WIDTH_NONE;
return;
}
switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
&self_speed, &self_width);
switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
&peer_speed, &peer_width);
sndev->link_speed = min(self_speed, peer_speed);
sndev->link_width = min(self_width, peer_width);
}
static int crosslink_is_enabled(struct switchtec_ntb *sndev)
{
struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
}
static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
{
int i;
u32 msg_map = 0;
if (!crosslink_is_enabled(sndev))
return;
for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
int m = i | sndev->self_partition << 2;
msg_map |= m << i * 8;
}
iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
&sndev->mmio_peer_dbmsg->odb_mask);
}
enum switchtec_msg {
LINK_MESSAGE = 0,
MSG_LINK_UP = 1,
MSG_LINK_DOWN = 2,
MSG_CHECK_LINK = 3,
MSG_LINK_FORCE_DOWN = 4,
};
static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
static void switchtec_ntb_link_status_update(struct switchtec_ntb *sndev)
{
int link_sta;
int old = sndev->link_is_up;
link_sta = sndev->self_shared->link_sta;
if (link_sta) {
u64 peer = ioread64(&sndev->peer_shared->magic);
if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
link_sta = peer >> 32;
else
link_sta = 0;
}
sndev->link_is_up = link_sta;
switchtec_ntb_set_link_speed(sndev);
if (link_sta != old) {
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
ntb_link_event(&sndev->ntb);
dev_info(&sndev->stdev->dev, "ntb link %s\n",
link_sta ? "up" : "down");
if (link_sta)
crosslink_init_dbmsgs(sndev);
}
}
static void check_link_status_work(struct work_struct *work)
{
struct switchtec_ntb *sndev;
sndev = container_of(work, struct switchtec_ntb,
check_link_status_work);
if (sndev->link_force_down) {
sndev->link_force_down = false;
switchtec_ntb_reinit_peer(sndev);
if (sndev->link_is_up) {
sndev->link_is_up = 0;
ntb_link_event(&sndev->ntb);
dev_info(&sndev->stdev->dev, "ntb link forced down\n");
}
return;
}
switchtec_ntb_link_status_update(sndev);
}
static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
enum switchtec_msg msg)
{
if (msg == MSG_LINK_FORCE_DOWN)
sndev->link_force_down = true;
schedule_work(&sndev->check_link_status_work);
}
static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
{
struct switchtec_ntb *sndev = stdev->sndev;
switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
}
static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (speed)
*speed = sndev->link_speed;
if (width)
*width = sndev->link_width;
return sndev->link_is_up;
}
static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed,
enum ntb_width max_width)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
dev_dbg(&sndev->stdev->dev, "enabling link\n");
sndev->self_shared->link_sta = 1;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
switchtec_ntb_link_status_update(sndev);
return 0;
}
static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
dev_dbg(&sndev->stdev->dev, "disabling link\n");
sndev->self_shared->link_sta = 0;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
switchtec_ntb_link_status_update(sndev);
return 0;
}
static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return sndev->db_valid_mask;
}
static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
{
return 1;
}
static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (db_vector < 0 || db_vector > 1)
return 0;
return sndev->db_valid_mask;
}
static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
{
u64 ret;
struct switchtec_ntb *sndev = ntb_sndev(ntb);
ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
return ret & sndev->db_valid_mask;
}
static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
return 0;
}
static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
unsigned long irqflags;
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (db_bits & ~sndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
sndev->db_mask |= db_bits << sndev->db_shift;
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
return 0;
}
static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
unsigned long irqflags;
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (db_bits & ~sndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
sndev->db_mask &= ~(db_bits << sndev->db_shift);
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
return 0;
}
static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
}
static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
resource_size_t *db_size,
u64 *db_data,
int db_bit)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
if (unlikely(db_bit >= BITS_PER_LONG_LONG))
return -EINVAL;
offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
(unsigned long)sndev->stdev->mmio;
offset += sndev->db_shift / 8;
if (db_addr)
*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
if (db_size)
*db_size = sizeof(u32);
if (db_data)
*db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
return 0;
}
static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
iowrite64(db_bits << sndev->db_peer_shift,
&sndev->mmio_peer_dbmsg->odb);
return 0;
}
static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
return ARRAY_SIZE(sndev->self_shared->spad);
}
static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
return 0;
if (!sndev->self_shared)
return 0;
return sndev->self_shared->spad[idx];
}
static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
return -EINVAL;
if (!sndev->self_shared)
return -EIO;
sndev->self_shared->spad[idx] = val;
return 0;
}
static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
int sidx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
return 0;
if (!sndev->peer_shared)
return 0;
return ioread32(&sndev->peer_shared->spad[sidx]);
}
static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
int sidx, u32 val)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
return -EINVAL;
if (!sndev->peer_shared)
return -EIO;
iowrite32(val, &sndev->peer_shared->spad[sidx]);
return 0;
}
static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
int sidx, phys_addr_t *spad_addr)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
(unsigned long)sndev->stdev->mmio;
if (spad_addr)
*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
return 0;
}
static const struct ntb_dev_ops switchtec_ntb_ops = {
.mw_count = switchtec_ntb_mw_count,
.mw_get_align = switchtec_ntb_mw_get_align,
.mw_set_trans = switchtec_ntb_mw_set_trans,
.peer_mw_count = switchtec_ntb_peer_mw_count,
.peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
.link_is_up = switchtec_ntb_link_is_up,
.link_enable = switchtec_ntb_link_enable,
.link_disable = switchtec_ntb_link_disable,
.db_valid_mask = switchtec_ntb_db_valid_mask,
.db_vector_count = switchtec_ntb_db_vector_count,
.db_vector_mask = switchtec_ntb_db_vector_mask,
.db_read = switchtec_ntb_db_read,
.db_clear = switchtec_ntb_db_clear,
.db_set_mask = switchtec_ntb_db_set_mask,
.db_clear_mask = switchtec_ntb_db_clear_mask,
.db_read_mask = switchtec_ntb_db_read_mask,
.peer_db_addr = switchtec_ntb_peer_db_addr,
.peer_db_set = switchtec_ntb_peer_db_set,
.spad_count = switchtec_ntb_spad_count,
.spad_read = switchtec_ntb_spad_read,
.spad_write = switchtec_ntb_spad_write,
.peer_spad_read = switchtec_ntb_peer_spad_read,
.peer_spad_write = switchtec_ntb_peer_spad_write,
.peer_spad_addr = switchtec_ntb_peer_spad_addr,
};
static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
{
u64 tpart_vec;
int self;
u64 part_map;
sndev->ntb.pdev = sndev->stdev->pdev;
sndev->ntb.topo = NTB_TOPO_SWITCH;
sndev->ntb.ops = &switchtec_ntb_ops;
INIT_WORK(&sndev->check_link_status_work, check_link_status_work);
sndev->link_force_down = false;
sndev->self_partition = sndev->stdev->partition;
sndev->mmio_ntb = sndev->stdev->mmio_ntb;
self = sndev->self_partition;
tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
tpart_vec <<= 32;
tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
part_map = ioread64(&sndev->mmio_ntb->ep_map);
tpart_vec &= part_map;
part_map &= ~(1 << sndev->self_partition);
if (!tpart_vec) {
if (sndev->stdev->partition_count != 2) {
dev_err(&sndev->stdev->dev,
"ntb target partition not defined\n");
return -ENODEV;
}
if (!part_map) {
dev_err(&sndev->stdev->dev,
"peer partition is not NT partition\n");
return -ENODEV;
}
sndev->peer_partition = __ffs64(part_map);
} else {
if (__ffs64(tpart_vec) != (fls64(tpart_vec) - 1)) {
dev_err(&sndev->stdev->dev,
"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
return -ENODEV;
}
sndev->peer_partition = __ffs64(tpart_vec);
if (!(part_map & (1ULL << sndev->peer_partition))) {
dev_err(&sndev->stdev->dev,
"ntb target partition is not NT partition\n");
return -ENODEV;
}
}
dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
sndev->self_partition, sndev->stdev->partition_count);
sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
SWITCHTEC_NTB_REG_CTRL_OFFSET;
sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
SWITCHTEC_NTB_REG_DBMSG_OFFSET;
sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
return 0;
}
static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *ctl,
int lut_idx, int partition, u64 addr)
{
int peer_bar = sndev->peer_direct_mw_to_bar[0];
u32 ctl_val;
int rc;
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
ctl_val &= 0xFF;
ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
ctl_val |= ilog2(LUT_SIZE) << 8;
ctl_val |= (sndev->nr_lut_mw - 1) << 14;
iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
&ctl->lut_entry[lut_idx]);
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc) {
u32 bar_error, lut_error;
bar_error = ioread32(&ctl->bar_error);
lut_error = ioread32(&ctl->lut_error);
dev_err(&sndev->stdev->dev,
"Error setting up reserved lut window: %08x / %08x\n",
bar_error, lut_error);
return rc;
}
return 0;
}
static int config_req_id_table(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *mmio_ctrl,
int *req_ids, int count)
{
int i, rc = 0;
u32 error;
u32 proxy_id;
if (ioread16(&mmio_ctrl->req_id_table_size) < count) {
dev_err(&sndev->stdev->dev,
"Not enough requester IDs available.\n");
return -EFAULT;
}
rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
for (i = 0; i < count; i++) {
iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
&mmio_ctrl->req_id_table[i]);
proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
dev_dbg(&sndev->stdev->dev,
"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
(proxy_id >> 1) & 0x7);
}
rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc == -EIO) {
error = ioread32(&mmio_ctrl->req_id_error);
dev_err(&sndev->stdev->dev,
"Error setting up the requester ID table: %08x\n",
error);
}
return 0;
}
static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
u64 *mw_addrs, int mw_count)
{
int rc, i;
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
u64 addr;
size_t size, offset;
int bar;
int xlate_pos;
u32 ctl_val;
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
for (i = 0; i < sndev->nr_lut_mw; i++) {
if (i == ntb_lut_idx)
continue;
addr = mw_addrs[0] + LUT_SIZE * i;
iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
addr),
&ctl->lut_entry[i]);
}
sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
for (i = 0; i < sndev->nr_direct_mw; i++) {
bar = sndev->direct_mw_to_bar[i];
offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
addr = mw_addrs[i] + offset;
size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
xlate_pos = ilog2(size);
if (offset && size > offset)
size = offset;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
&ctl->bar_entry[bar].win_size);
iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
iowrite64(sndev->peer_partition | addr,
&ctl->bar_entry[bar].xlate_addr);
}
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc) {
u32 bar_error, lut_error;
bar_error = ioread32(&ctl->bar_error);
lut_error = ioread32(&ctl->lut_error);
dev_err(&sndev->stdev->dev,
"Error setting up cross link windows: %08x / %08x\n",
bar_error, lut_error);
return rc;
}
return 0;
}
static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *mmio_ctrl)
{
int req_ids[16];
int i;
u32 proxy_id;
for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
break;
req_ids[i] = ((proxy_id >> 1) & 0xFF);
}
return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
}
/*
* In crosslink configuration there is a virtual partition in the
* middle of the two switches. The BARs in this partition have to be
* enumerated and assigned addresses.
*/
static int crosslink_enum_partition(struct switchtec_ntb *sndev,
u64 *bar_addrs)
{
struct part_cfg_regs __iomem *part_cfg =
&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF;
struct pff_csr_regs __iomem *mmio_pff =
&sndev->stdev->mmio_pff_csr[pff];
const u64 bar_space = 0x1000000000LL;
u64 bar_addr;
int bar_cnt = 0;
int i;
iowrite16(0x6, &mmio_pff->pcicmd);
for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
bar_addr &= ~0xf;
dev_dbg(&sndev->stdev->dev,
"Crosslink BAR%d addr: %llx\n",
i*2, bar_addr);
if (bar_addr != bar_space * i)
continue;
bar_addrs[bar_cnt++] = bar_addr;
}
return bar_cnt;
}
static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
{
int rc;
int bar = sndev->direct_mw_to_bar[0];
const int ntb_lut_idx = 1;
u64 bar_addrs[6];
u64 addr;
int offset;
int bar_cnt;
if (!crosslink_is_enabled(sndev))
return 0;
dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
sndev->ntb.topo = NTB_TOPO_CROSSLINK;
bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
if (bar_cnt < sndev->nr_direct_mw + 1) {
dev_err(&sndev->stdev->dev,
"Error enumerating crosslink partition\n");
return -EINVAL;
}
addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
SWITCHTEC_NTB_REG_DBMSG_OFFSET +
sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
offset = addr & (LUT_SIZE - 1);
addr -= offset;
rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
sndev->peer_partition, addr);
if (rc)
return rc;
rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
bar_cnt - 1);
if (rc)
return rc;
rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
if (rc)
return rc;
sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
LUT_SIZE, LUT_SIZE);
if (!sndev->mmio_xlink_win) {
rc = -ENOMEM;
return rc;
}
sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
sndev->nr_rsvd_luts++;
crosslink_init_dbmsgs(sndev);
return 0;
}
static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
{
if (sndev->mmio_xlink_win)
pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
}
static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
{
int i;
int cnt = 0;
for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
u32 r = ioread32(&ctrl->bar_entry[i].ctl);
if (r & NTB_CTRL_BAR_VALID)
map[cnt++] = i;
}
return cnt;
}
static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
{
sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
sndev->mmio_self_ctrl);
sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
sndev->nr_direct_mw, sndev->nr_lut_mw);
sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
sndev->mmio_peer_ctrl);
sndev->peer_nr_lut_mw =
ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
}
/*
* There are 64 doorbells in the switch hardware but this is
* shared among all partitions. So we must split them in half
* (32 for each partition). However, the message interrupts are
* also shared with the top 4 doorbells so we just limit this to
* 28 doorbells per partition.
*
* In crosslink mode, each side has it's own dbmsg register so
* they can each use all 60 of the available doorbells.
*/
static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
{
sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
sndev->db_shift = 0;
sndev->db_peer_shift = 0;
sndev->db_valid_mask = sndev->db_mask;
} else if (sndev->self_partition < sndev->peer_partition) {
sndev->db_shift = 0;
sndev->db_peer_shift = 32;
sndev->db_valid_mask = 0x0FFFFFFF;
} else {
sndev->db_shift = 32;
sndev->db_peer_shift = 0;
sndev->db_valid_mask = 0x0FFFFFFF;
}
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
&sndev->mmio_peer_dbmsg->odb_mask);
dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
}
static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
{
int i;
u32 msg_map = 0;
for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
int m = i | sndev->peer_partition << 2;
msg_map |= m << i * 8;
}
iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
&sndev->mmio_self_dbmsg->imsg[i]);
}
static int
switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
{
int req_ids[2];
/*
* Root Complex Requester ID (which is 0:00.0)
*/
req_ids[0] = 0;
/*
* Host Bridge Requester ID (as read from the mmap address)
*/
req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
ARRAY_SIZE(req_ids));
}
static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
{
int i;
memset(sndev->self_shared, 0, LUT_SIZE);
sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
sndev->self_shared->partition_id = sndev->stdev->partition;
for (i = 0; i < sndev->nr_direct_mw; i++) {
int bar = sndev->direct_mw_to_bar[i];
resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
if (i == 0)
sz = min_t(resource_size_t, sz,
LUT_SIZE * sndev->nr_lut_mw);
sndev->self_shared->mw_sizes[i] = sz;
}
for (i = 0; i < sndev->nr_lut_mw; i++) {
int idx = sndev->nr_direct_mw + i;
sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
}
}
static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
{
int self_bar = sndev->direct_mw_to_bar[0];
int rc;
sndev->nr_rsvd_luts++;
sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
LUT_SIZE,
&sndev->self_shared_dma,
GFP_KERNEL);
if (!sndev->self_shared) {
dev_err(&sndev->stdev->dev,
"unable to allocate memory for shared mw\n");
return -ENOMEM;
}
switchtec_ntb_init_shared(sndev);
rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
sndev->self_partition,
sndev->self_shared_dma);
if (rc)
goto unalloc_and_exit;
sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
if (!sndev->peer_shared) {
rc = -ENOMEM;
goto unalloc_and_exit;
}
dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
return 0;
unalloc_and_exit:
dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
sndev->self_shared, sndev->self_shared_dma);
return rc;
}
static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
{
if (sndev->peer_shared)
pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
if (sndev->self_shared)
dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
sndev->self_shared,
sndev->self_shared_dma);
sndev->nr_rsvd_luts--;
}
static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
{
struct switchtec_ntb *sndev = dev;
dev_dbg(&sndev->stdev->dev, "doorbell\n");
ntb_db_event(&sndev->ntb, 0);
return IRQ_HANDLED;
}
static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
{
int i;
struct switchtec_ntb *sndev = dev;
for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
if (msg & NTB_DBMSG_IMSG_STATUS) {
dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
i, (u32)msg);
iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
if (i == LINK_MESSAGE)
switchtec_ntb_check_link(sndev, msg);
}
}
return IRQ_HANDLED;
}
static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
{
int i;
int rc;
int doorbell_irq = 0;
int message_irq = 0;
int event_irq;
int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
while (doorbell_irq == event_irq)
doorbell_irq++;
while (message_irq == doorbell_irq ||
message_irq == event_irq)
message_irq++;
dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
event_irq, doorbell_irq, message_irq);
for (i = 0; i < idb_vecs - 4; i++)
iowrite8(doorbell_irq,
&sndev->mmio_self_dbmsg->idb_vec_map[i]);
for (; i < idb_vecs; i++)
iowrite8(message_irq,
&sndev->mmio_self_dbmsg->idb_vec_map[i]);
sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
rc = request_irq(sndev->doorbell_irq,
switchtec_ntb_doorbell_isr, 0,
"switchtec_ntb_doorbell", sndev);
if (rc)
return rc;
rc = request_irq(sndev->message_irq,
switchtec_ntb_message_isr, 0,
"switchtec_ntb_message", sndev);
if (rc) {
free_irq(sndev->doorbell_irq, sndev);
return rc;
}
return 0;
}
static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
{
free_irq(sndev->doorbell_irq, sndev);
free_irq(sndev->message_irq, sndev);
}
static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
{
int rc;
if (crosslink_is_enabled(sndev))
return 0;
dev_info(&sndev->stdev->dev, "reinitialize shared memory window\n");
rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
sndev->self_partition,
sndev->self_shared_dma);
return rc;
}
static int switchtec_ntb_add(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
struct switchtec_ntb *sndev;
int rc;
stdev->sndev = NULL;
if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
return -ENODEV;
sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
if (!sndev)
return -ENOMEM;
sndev->stdev = stdev;
rc = switchtec_ntb_init_sndev(sndev);
if (rc)
goto free_and_exit;
switchtec_ntb_init_mw(sndev);
rc = switchtec_ntb_init_req_id_table(sndev);
if (rc)
goto free_and_exit;
rc = switchtec_ntb_init_crosslink(sndev);
if (rc)
goto free_and_exit;
switchtec_ntb_init_db(sndev);
switchtec_ntb_init_msgs(sndev);
rc = switchtec_ntb_init_shared_mw(sndev);
if (rc)
goto deinit_crosslink;
rc = switchtec_ntb_init_db_msg_irq(sndev);
if (rc)
goto deinit_shared_and_exit;
/*
* If this host crashed, the other host may think the link is
* still up. Tell them to force it down (it will go back up
* once we register the ntb device).
*/
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
rc = ntb_register_device(&sndev->ntb);
if (rc)
goto deinit_and_exit;
stdev->sndev = sndev;
stdev->link_notifier = switchtec_ntb_link_notification;
dev_info(dev, "NTB device registered\n");
return 0;
deinit_and_exit:
switchtec_ntb_deinit_db_msg_irq(sndev);
deinit_shared_and_exit:
switchtec_ntb_deinit_shared_mw(sndev);
deinit_crosslink:
switchtec_ntb_deinit_crosslink(sndev);
free_and_exit:
kfree(sndev);
dev_err(dev, "failed to register ntb device: %d\n", rc);
return rc;
}
static void switchtec_ntb_remove(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
struct switchtec_ntb *sndev = stdev->sndev;
if (!sndev)
return;
stdev->link_notifier = NULL;
stdev->sndev = NULL;
ntb_unregister_device(&sndev->ntb);
switchtec_ntb_deinit_db_msg_irq(sndev);
switchtec_ntb_deinit_shared_mw(sndev);
switchtec_ntb_deinit_crosslink(sndev);
kfree(sndev);
dev_info(dev, "ntb device unregistered\n");
}
static struct class_interface switchtec_interface = {
.add_dev = switchtec_ntb_add,
.remove_dev = switchtec_ntb_remove,
};
static int __init switchtec_ntb_init(void)
{
switchtec_interface.class = switchtec_class;
return class_interface_register(&switchtec_interface);
}
module_init(switchtec_ntb_init);
static void __exit switchtec_ntb_exit(void)
{
class_interface_unregister(&switchtec_interface);
}
module_exit(switchtec_ntb_exit);
| linux-master | drivers/ntb/hw/mscc/ntb_hw_switchtec.c |
/*
* This file is provided under a GPLv2 license. When using or
* redistributing this file, you may do so under that license.
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, one can be found http://www.gnu.org/licenses/.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* IDT PCIe-switch NTB Linux driver
*
* Contact Information:
* Serge Semin <[email protected]>, <[email protected]>
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/sizes.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/ntb.h>
#include "ntb_hw_idt.h"
#define NTB_NAME "ntb_hw_idt"
#define NTB_DESC "IDT PCI-E Non-Transparent Bridge Driver"
#define NTB_VER "2.0"
#define NTB_IRQNAME "ntb_irq_idt"
MODULE_DESCRIPTION(NTB_DESC);
MODULE_VERSION(NTB_VER);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("T-platforms");
/*
* NT Endpoint registers table simplifying a loop access to the functionally
* related registers
*/
static const struct idt_ntb_regs ntdata_tbl = {
{ {IDT_NT_BARSETUP0, IDT_NT_BARLIMIT0,
IDT_NT_BARLTBASE0, IDT_NT_BARUTBASE0},
{IDT_NT_BARSETUP1, IDT_NT_BARLIMIT1,
IDT_NT_BARLTBASE1, IDT_NT_BARUTBASE1},
{IDT_NT_BARSETUP2, IDT_NT_BARLIMIT2,
IDT_NT_BARLTBASE2, IDT_NT_BARUTBASE2},
{IDT_NT_BARSETUP3, IDT_NT_BARLIMIT3,
IDT_NT_BARLTBASE3, IDT_NT_BARUTBASE3},
{IDT_NT_BARSETUP4, IDT_NT_BARLIMIT4,
IDT_NT_BARLTBASE4, IDT_NT_BARUTBASE4},
{IDT_NT_BARSETUP5, IDT_NT_BARLIMIT5,
IDT_NT_BARLTBASE5, IDT_NT_BARUTBASE5} },
{ {IDT_NT_INMSG0, IDT_NT_OUTMSG0, IDT_NT_INMSGSRC0},
{IDT_NT_INMSG1, IDT_NT_OUTMSG1, IDT_NT_INMSGSRC1},
{IDT_NT_INMSG2, IDT_NT_OUTMSG2, IDT_NT_INMSGSRC2},
{IDT_NT_INMSG3, IDT_NT_OUTMSG3, IDT_NT_INMSGSRC3} }
};
/*
* NT Endpoint ports data table with the corresponding pcie command, link
* status, control and BAR-related registers
*/
static const struct idt_ntb_port portdata_tbl[IDT_MAX_NR_PORTS] = {
/*0*/ { IDT_SW_NTP0_PCIECMDSTS, IDT_SW_NTP0_PCIELCTLSTS,
IDT_SW_NTP0_NTCTL,
IDT_SW_SWPORT0CTL, IDT_SW_SWPORT0STS,
{ {IDT_SW_NTP0_BARSETUP0, IDT_SW_NTP0_BARLIMIT0,
IDT_SW_NTP0_BARLTBASE0, IDT_SW_NTP0_BARUTBASE0},
{IDT_SW_NTP0_BARSETUP1, IDT_SW_NTP0_BARLIMIT1,
IDT_SW_NTP0_BARLTBASE1, IDT_SW_NTP0_BARUTBASE1},
{IDT_SW_NTP0_BARSETUP2, IDT_SW_NTP0_BARLIMIT2,
IDT_SW_NTP0_BARLTBASE2, IDT_SW_NTP0_BARUTBASE2},
{IDT_SW_NTP0_BARSETUP3, IDT_SW_NTP0_BARLIMIT3,
IDT_SW_NTP0_BARLTBASE3, IDT_SW_NTP0_BARUTBASE3},
{IDT_SW_NTP0_BARSETUP4, IDT_SW_NTP0_BARLIMIT4,
IDT_SW_NTP0_BARLTBASE4, IDT_SW_NTP0_BARUTBASE4},
{IDT_SW_NTP0_BARSETUP5, IDT_SW_NTP0_BARLIMIT5,
IDT_SW_NTP0_BARLTBASE5, IDT_SW_NTP0_BARUTBASE5} } },
/*1*/ {0},
/*2*/ { IDT_SW_NTP2_PCIECMDSTS, IDT_SW_NTP2_PCIELCTLSTS,
IDT_SW_NTP2_NTCTL,
IDT_SW_SWPORT2CTL, IDT_SW_SWPORT2STS,
{ {IDT_SW_NTP2_BARSETUP0, IDT_SW_NTP2_BARLIMIT0,
IDT_SW_NTP2_BARLTBASE0, IDT_SW_NTP2_BARUTBASE0},
{IDT_SW_NTP2_BARSETUP1, IDT_SW_NTP2_BARLIMIT1,
IDT_SW_NTP2_BARLTBASE1, IDT_SW_NTP2_BARUTBASE1},
{IDT_SW_NTP2_BARSETUP2, IDT_SW_NTP2_BARLIMIT2,
IDT_SW_NTP2_BARLTBASE2, IDT_SW_NTP2_BARUTBASE2},
{IDT_SW_NTP2_BARSETUP3, IDT_SW_NTP2_BARLIMIT3,
IDT_SW_NTP2_BARLTBASE3, IDT_SW_NTP2_BARUTBASE3},
{IDT_SW_NTP2_BARSETUP4, IDT_SW_NTP2_BARLIMIT4,
IDT_SW_NTP2_BARLTBASE4, IDT_SW_NTP2_BARUTBASE4},
{IDT_SW_NTP2_BARSETUP5, IDT_SW_NTP2_BARLIMIT5,
IDT_SW_NTP2_BARLTBASE5, IDT_SW_NTP2_BARUTBASE5} } },
/*3*/ {0},
/*4*/ { IDT_SW_NTP4_PCIECMDSTS, IDT_SW_NTP4_PCIELCTLSTS,
IDT_SW_NTP4_NTCTL,
IDT_SW_SWPORT4CTL, IDT_SW_SWPORT4STS,
{ {IDT_SW_NTP4_BARSETUP0, IDT_SW_NTP4_BARLIMIT0,
IDT_SW_NTP4_BARLTBASE0, IDT_SW_NTP4_BARUTBASE0},
{IDT_SW_NTP4_BARSETUP1, IDT_SW_NTP4_BARLIMIT1,
IDT_SW_NTP4_BARLTBASE1, IDT_SW_NTP4_BARUTBASE1},
{IDT_SW_NTP4_BARSETUP2, IDT_SW_NTP4_BARLIMIT2,
IDT_SW_NTP4_BARLTBASE2, IDT_SW_NTP4_BARUTBASE2},
{IDT_SW_NTP4_BARSETUP3, IDT_SW_NTP4_BARLIMIT3,
IDT_SW_NTP4_BARLTBASE3, IDT_SW_NTP4_BARUTBASE3},
{IDT_SW_NTP4_BARSETUP4, IDT_SW_NTP4_BARLIMIT4,
IDT_SW_NTP4_BARLTBASE4, IDT_SW_NTP4_BARUTBASE4},
{IDT_SW_NTP4_BARSETUP5, IDT_SW_NTP4_BARLIMIT5,
IDT_SW_NTP4_BARLTBASE5, IDT_SW_NTP4_BARUTBASE5} } },
/*5*/ {0},
/*6*/ { IDT_SW_NTP6_PCIECMDSTS, IDT_SW_NTP6_PCIELCTLSTS,
IDT_SW_NTP6_NTCTL,
IDT_SW_SWPORT6CTL, IDT_SW_SWPORT6STS,
{ {IDT_SW_NTP6_BARSETUP0, IDT_SW_NTP6_BARLIMIT0,
IDT_SW_NTP6_BARLTBASE0, IDT_SW_NTP6_BARUTBASE0},
{IDT_SW_NTP6_BARSETUP1, IDT_SW_NTP6_BARLIMIT1,
IDT_SW_NTP6_BARLTBASE1, IDT_SW_NTP6_BARUTBASE1},
{IDT_SW_NTP6_BARSETUP2, IDT_SW_NTP6_BARLIMIT2,
IDT_SW_NTP6_BARLTBASE2, IDT_SW_NTP6_BARUTBASE2},
{IDT_SW_NTP6_BARSETUP3, IDT_SW_NTP6_BARLIMIT3,
IDT_SW_NTP6_BARLTBASE3, IDT_SW_NTP6_BARUTBASE3},
{IDT_SW_NTP6_BARSETUP4, IDT_SW_NTP6_BARLIMIT4,
IDT_SW_NTP6_BARLTBASE4, IDT_SW_NTP6_BARUTBASE4},
{IDT_SW_NTP6_BARSETUP5, IDT_SW_NTP6_BARLIMIT5,
IDT_SW_NTP6_BARLTBASE5, IDT_SW_NTP6_BARUTBASE5} } },
/*7*/ {0},
/*8*/ { IDT_SW_NTP8_PCIECMDSTS, IDT_SW_NTP8_PCIELCTLSTS,
IDT_SW_NTP8_NTCTL,
IDT_SW_SWPORT8CTL, IDT_SW_SWPORT8STS,
{ {IDT_SW_NTP8_BARSETUP0, IDT_SW_NTP8_BARLIMIT0,
IDT_SW_NTP8_BARLTBASE0, IDT_SW_NTP8_BARUTBASE0},
{IDT_SW_NTP8_BARSETUP1, IDT_SW_NTP8_BARLIMIT1,
IDT_SW_NTP8_BARLTBASE1, IDT_SW_NTP8_BARUTBASE1},
{IDT_SW_NTP8_BARSETUP2, IDT_SW_NTP8_BARLIMIT2,
IDT_SW_NTP8_BARLTBASE2, IDT_SW_NTP8_BARUTBASE2},
{IDT_SW_NTP8_BARSETUP3, IDT_SW_NTP8_BARLIMIT3,
IDT_SW_NTP8_BARLTBASE3, IDT_SW_NTP8_BARUTBASE3},
{IDT_SW_NTP8_BARSETUP4, IDT_SW_NTP8_BARLIMIT4,
IDT_SW_NTP8_BARLTBASE4, IDT_SW_NTP8_BARUTBASE4},
{IDT_SW_NTP8_BARSETUP5, IDT_SW_NTP8_BARLIMIT5,
IDT_SW_NTP8_BARLTBASE5, IDT_SW_NTP8_BARUTBASE5} } },
/*9*/ {0},
/*10*/ {0},
/*11*/ {0},
/*12*/ { IDT_SW_NTP12_PCIECMDSTS, IDT_SW_NTP12_PCIELCTLSTS,
IDT_SW_NTP12_NTCTL,
IDT_SW_SWPORT12CTL, IDT_SW_SWPORT12STS,
{ {IDT_SW_NTP12_BARSETUP0, IDT_SW_NTP12_BARLIMIT0,
IDT_SW_NTP12_BARLTBASE0, IDT_SW_NTP12_BARUTBASE0},
{IDT_SW_NTP12_BARSETUP1, IDT_SW_NTP12_BARLIMIT1,
IDT_SW_NTP12_BARLTBASE1, IDT_SW_NTP12_BARUTBASE1},
{IDT_SW_NTP12_BARSETUP2, IDT_SW_NTP12_BARLIMIT2,
IDT_SW_NTP12_BARLTBASE2, IDT_SW_NTP12_BARUTBASE2},
{IDT_SW_NTP12_BARSETUP3, IDT_SW_NTP12_BARLIMIT3,
IDT_SW_NTP12_BARLTBASE3, IDT_SW_NTP12_BARUTBASE3},
{IDT_SW_NTP12_BARSETUP4, IDT_SW_NTP12_BARLIMIT4,
IDT_SW_NTP12_BARLTBASE4, IDT_SW_NTP12_BARUTBASE4},
{IDT_SW_NTP12_BARSETUP5, IDT_SW_NTP12_BARLIMIT5,
IDT_SW_NTP12_BARLTBASE5, IDT_SW_NTP12_BARUTBASE5} } },
/*13*/ {0},
/*14*/ {0},
/*15*/ {0},
/*16*/ { IDT_SW_NTP16_PCIECMDSTS, IDT_SW_NTP16_PCIELCTLSTS,
IDT_SW_NTP16_NTCTL,
IDT_SW_SWPORT16CTL, IDT_SW_SWPORT16STS,
{ {IDT_SW_NTP16_BARSETUP0, IDT_SW_NTP16_BARLIMIT0,
IDT_SW_NTP16_BARLTBASE0, IDT_SW_NTP16_BARUTBASE0},
{IDT_SW_NTP16_BARSETUP1, IDT_SW_NTP16_BARLIMIT1,
IDT_SW_NTP16_BARLTBASE1, IDT_SW_NTP16_BARUTBASE1},
{IDT_SW_NTP16_BARSETUP2, IDT_SW_NTP16_BARLIMIT2,
IDT_SW_NTP16_BARLTBASE2, IDT_SW_NTP16_BARUTBASE2},
{IDT_SW_NTP16_BARSETUP3, IDT_SW_NTP16_BARLIMIT3,
IDT_SW_NTP16_BARLTBASE3, IDT_SW_NTP16_BARUTBASE3},
{IDT_SW_NTP16_BARSETUP4, IDT_SW_NTP16_BARLIMIT4,
IDT_SW_NTP16_BARLTBASE4, IDT_SW_NTP16_BARUTBASE4},
{IDT_SW_NTP16_BARSETUP5, IDT_SW_NTP16_BARLIMIT5,
IDT_SW_NTP16_BARLTBASE5, IDT_SW_NTP16_BARUTBASE5} } },
/*17*/ {0},
/*18*/ {0},
/*19*/ {0},
/*20*/ { IDT_SW_NTP20_PCIECMDSTS, IDT_SW_NTP20_PCIELCTLSTS,
IDT_SW_NTP20_NTCTL,
IDT_SW_SWPORT20CTL, IDT_SW_SWPORT20STS,
{ {IDT_SW_NTP20_BARSETUP0, IDT_SW_NTP20_BARLIMIT0,
IDT_SW_NTP20_BARLTBASE0, IDT_SW_NTP20_BARUTBASE0},
{IDT_SW_NTP20_BARSETUP1, IDT_SW_NTP20_BARLIMIT1,
IDT_SW_NTP20_BARLTBASE1, IDT_SW_NTP20_BARUTBASE1},
{IDT_SW_NTP20_BARSETUP2, IDT_SW_NTP20_BARLIMIT2,
IDT_SW_NTP20_BARLTBASE2, IDT_SW_NTP20_BARUTBASE2},
{IDT_SW_NTP20_BARSETUP3, IDT_SW_NTP20_BARLIMIT3,
IDT_SW_NTP20_BARLTBASE3, IDT_SW_NTP20_BARUTBASE3},
{IDT_SW_NTP20_BARSETUP4, IDT_SW_NTP20_BARLIMIT4,
IDT_SW_NTP20_BARLTBASE4, IDT_SW_NTP20_BARUTBASE4},
{IDT_SW_NTP20_BARSETUP5, IDT_SW_NTP20_BARLIMIT5,
IDT_SW_NTP20_BARLTBASE5, IDT_SW_NTP20_BARUTBASE5} } },
/*21*/ {0},
/*22*/ {0},
/*23*/ {0}
};
/*
* IDT PCIe-switch partitions table with the corresponding control, status
* and messages control registers
*/
static const struct idt_ntb_part partdata_tbl[IDT_MAX_NR_PARTS] = {
/*0*/ { IDT_SW_SWPART0CTL, IDT_SW_SWPART0STS,
{IDT_SW_SWP0MSGCTL0, IDT_SW_SWP0MSGCTL1,
IDT_SW_SWP0MSGCTL2, IDT_SW_SWP0MSGCTL3} },
/*1*/ { IDT_SW_SWPART1CTL, IDT_SW_SWPART1STS,
{IDT_SW_SWP1MSGCTL0, IDT_SW_SWP1MSGCTL1,
IDT_SW_SWP1MSGCTL2, IDT_SW_SWP1MSGCTL3} },
/*2*/ { IDT_SW_SWPART2CTL, IDT_SW_SWPART2STS,
{IDT_SW_SWP2MSGCTL0, IDT_SW_SWP2MSGCTL1,
IDT_SW_SWP2MSGCTL2, IDT_SW_SWP2MSGCTL3} },
/*3*/ { IDT_SW_SWPART3CTL, IDT_SW_SWPART3STS,
{IDT_SW_SWP3MSGCTL0, IDT_SW_SWP3MSGCTL1,
IDT_SW_SWP3MSGCTL2, IDT_SW_SWP3MSGCTL3} },
/*4*/ { IDT_SW_SWPART4CTL, IDT_SW_SWPART4STS,
{IDT_SW_SWP4MSGCTL0, IDT_SW_SWP4MSGCTL1,
IDT_SW_SWP4MSGCTL2, IDT_SW_SWP4MSGCTL3} },
/*5*/ { IDT_SW_SWPART5CTL, IDT_SW_SWPART5STS,
{IDT_SW_SWP5MSGCTL0, IDT_SW_SWP5MSGCTL1,
IDT_SW_SWP5MSGCTL2, IDT_SW_SWP5MSGCTL3} },
/*6*/ { IDT_SW_SWPART6CTL, IDT_SW_SWPART6STS,
{IDT_SW_SWP6MSGCTL0, IDT_SW_SWP6MSGCTL1,
IDT_SW_SWP6MSGCTL2, IDT_SW_SWP6MSGCTL3} },
/*7*/ { IDT_SW_SWPART7CTL, IDT_SW_SWPART7STS,
{IDT_SW_SWP7MSGCTL0, IDT_SW_SWP7MSGCTL1,
IDT_SW_SWP7MSGCTL2, IDT_SW_SWP7MSGCTL3} }
};
/*
* DebugFS directory to place the driver debug file
*/
static struct dentry *dbgfs_topdir;
/*=============================================================================
* 1. IDT PCIe-switch registers IO-functions
*
* Beside ordinary configuration space registers IDT PCIe-switch expose
* global configuration registers, which are used to determine state of other
* device ports as well as being notified of some switch-related events.
* Additionally all the configuration space registers of all the IDT
* PCIe-switch functions are mapped to the Global Address space, so each
* function can determine a configuration of any other PCI-function.
* Functions declared in this chapter are created to encapsulate access
* to configuration and global registers, so the driver code just need to
* provide IDT NTB hardware descriptor and a register address.
*=============================================================================
*/
/*
* idt_nt_write() - PCI configuration space registers write method
* @ndev: IDT NTB hardware driver descriptor
* @reg: Register to write data to
* @data: Value to write to the register
*
* IDT PCIe-switch registers are all Little endian.
*/
static void idt_nt_write(struct idt_ntb_dev *ndev,
const unsigned int reg, const u32 data)
{
/*
* It's obvious bug to request a register exceeding the maximum possible
* value as well as to have it unaligned.
*/
if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
return;
/* Just write the value to the specified register */
iowrite32(data, ndev->cfgspc + (ptrdiff_t)reg);
}
/*
* idt_nt_read() - PCI configuration space registers read method
* @ndev: IDT NTB hardware driver descriptor
* @reg: Register to write data to
*
* IDT PCIe-switch Global configuration registers are all Little endian.
*
* Return: register value
*/
static u32 idt_nt_read(struct idt_ntb_dev *ndev, const unsigned int reg)
{
/*
* It's obvious bug to request a register exceeding the maximum possible
* value as well as to have it unaligned.
*/
if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
return ~0;
/* Just read the value from the specified register */
return ioread32(ndev->cfgspc + (ptrdiff_t)reg);
}
/*
* idt_sw_write() - Global registers write method
* @ndev: IDT NTB hardware driver descriptor
* @reg: Register to write data to
* @data: Value to write to the register
*
* IDT PCIe-switch Global configuration registers are all Little endian.
*/
static void idt_sw_write(struct idt_ntb_dev *ndev,
const unsigned int reg, const u32 data)
{
unsigned long irqflags;
/*
* It's obvious bug to request a register exceeding the maximum possible
* value as well as to have it unaligned.
*/
if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
return;
/* Lock GASA registers operations */
spin_lock_irqsave(&ndev->gasa_lock, irqflags);
/* Set the global register address */
iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
/* Put the new value of the register */
iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
/* Unlock GASA registers operations */
spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
}
/*
* idt_sw_read() - Global registers read method
* @ndev: IDT NTB hardware driver descriptor
* @reg: Register to write data to
*
* IDT PCIe-switch Global configuration registers are all Little endian.
*
* Return: register value
*/
static u32 idt_sw_read(struct idt_ntb_dev *ndev, const unsigned int reg)
{
unsigned long irqflags;
u32 data;
/*
* It's obvious bug to request a register exceeding the maximum possible
* value as well as to have it unaligned.
*/
if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
return ~0;
/* Lock GASA registers operations */
spin_lock_irqsave(&ndev->gasa_lock, irqflags);
/* Set the global register address */
iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
/* Get the data of the register (read ops acts as MMIO barrier) */
data = ioread32(ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
/* Unlock GASA registers operations */
spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
return data;
}
/*
* idt_reg_set_bits() - set bits of a passed register
* @ndev: IDT NTB hardware driver descriptor
* @reg: Register to change bits of
* @reg_lock: Register access spin lock
* @valid_mask: Mask of valid bits
* @set_bits: Bitmask to set
*
* Helper method to check whether a passed bitfield is valid and set
* corresponding bits of a register.
*
* WARNING! Make sure the passed register isn't accessed over plane
* idt_nt_write() method (read method is ok to be used concurrently).
*
* Return: zero on success, negative error on invalid bitmask.
*/
static inline int idt_reg_set_bits(struct idt_ntb_dev *ndev, unsigned int reg,
spinlock_t *reg_lock,
u64 valid_mask, u64 set_bits)
{
unsigned long irqflags;
u32 data;
if (set_bits & ~(u64)valid_mask)
return -EINVAL;
/* Lock access to the register unless the change is written back */
spin_lock_irqsave(reg_lock, irqflags);
data = idt_nt_read(ndev, reg) | (u32)set_bits;
idt_nt_write(ndev, reg, data);
/* Unlock the register */
spin_unlock_irqrestore(reg_lock, irqflags);
return 0;
}
/*
* idt_reg_clear_bits() - clear bits of a passed register
* @ndev: IDT NTB hardware driver descriptor
* @reg: Register to change bits of
* @reg_lock: Register access spin lock
* @set_bits: Bitmask to clear
*
* Helper method to check whether a passed bitfield is valid and clear
* corresponding bits of a register.
*
* NOTE! Invalid bits are always considered cleared so it's not an error
* to clear them over.
*
* WARNING! Make sure the passed register isn't accessed over plane
* idt_nt_write() method (read method is ok to use concurrently).
*/
static inline void idt_reg_clear_bits(struct idt_ntb_dev *ndev,
unsigned int reg, spinlock_t *reg_lock,
u64 clear_bits)
{
unsigned long irqflags;
u32 data;
/* Lock access to the register unless the change is written back */
spin_lock_irqsave(reg_lock, irqflags);
data = idt_nt_read(ndev, reg) & ~(u32)clear_bits;
idt_nt_write(ndev, reg, data);
/* Unlock the register */
spin_unlock_irqrestore(reg_lock, irqflags);
}
/*===========================================================================
* 2. Ports operations
*
* IDT PCIe-switches can have from 3 up to 8 ports with possible
* NT-functions enabled. So all the possible ports need to be scanned looking
* for NTB activated. NTB API will have enumerated only the ports with NTB.
*===========================================================================
*/
/*
* idt_scan_ports() - scan IDT PCIe-switch ports collecting info in the tables
* @ndev: Pointer to the PCI device descriptor
*
* Return: zero on success, otherwise a negative error number.
*/
static int idt_scan_ports(struct idt_ntb_dev *ndev)
{
unsigned char pidx, port, part;
u32 data, portsts, partsts;
/* Retrieve the local port number */
data = idt_nt_read(ndev, IDT_NT_PCIELCAP);
ndev->port = GET_FIELD(PCIELCAP_PORTNUM, data);
/* Retrieve the local partition number */
portsts = idt_sw_read(ndev, portdata_tbl[ndev->port].sts);
ndev->part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
/* Initialize port/partition -> index tables with invalid values */
memset(ndev->port_idx_map, -EINVAL, sizeof(ndev->port_idx_map));
memset(ndev->part_idx_map, -EINVAL, sizeof(ndev->part_idx_map));
/*
* Walk over all the possible ports checking whether any of them has
* NT-function activated
*/
ndev->peer_cnt = 0;
for (pidx = 0; pidx < ndev->swcfg->port_cnt; pidx++) {
port = ndev->swcfg->ports[pidx];
/* Skip local port */
if (port == ndev->port)
continue;
/* Read the port status register to get it partition */
portsts = idt_sw_read(ndev, portdata_tbl[port].sts);
part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
/* Retrieve the partition status */
partsts = idt_sw_read(ndev, partdata_tbl[part].sts);
/* Check if partition state is active and port has NTB */
if (IS_FLD_SET(SWPARTxSTS_STATE, partsts, ACT) &&
(IS_FLD_SET(SWPORTxSTS_MODE, portsts, NT) ||
IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNT) ||
IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNTDMA) ||
IS_FLD_SET(SWPORTxSTS_MODE, portsts, NTDMA))) {
/* Save the port and partition numbers */
ndev->peers[ndev->peer_cnt].port = port;
ndev->peers[ndev->peer_cnt].part = part;
/* Fill in the port/partition -> index tables */
ndev->port_idx_map[port] = ndev->peer_cnt;
ndev->part_idx_map[part] = ndev->peer_cnt;
ndev->peer_cnt++;
}
}
dev_dbg(&ndev->ntb.pdev->dev, "Local port: %hhu, num of peers: %hhu\n",
ndev->port, ndev->peer_cnt);
/* It's useless to have this driver loaded if there is no any peer */
if (ndev->peer_cnt == 0) {
dev_warn(&ndev->ntb.pdev->dev, "No active peer found\n");
return -ENODEV;
}
return 0;
}
/*
* idt_ntb_port_number() - get the local port number
* @ntb: NTB device context.
*
* Return: the local port number
*/
static int idt_ntb_port_number(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return ndev->port;
}
/*
* idt_ntb_peer_port_count() - get the number of peer ports
* @ntb: NTB device context.
*
* Return the count of detected peer NT-functions.
*
* Return: number of peer ports
*/
static int idt_ntb_peer_port_count(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return ndev->peer_cnt;
}
/*
* idt_ntb_peer_port_number() - get peer port by given index
* @ntb: NTB device context.
* @pidx: Peer port index.
*
* Return: peer port or negative error
*/
static int idt_ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (pidx < 0 || ndev->peer_cnt <= pidx)
return -EINVAL;
/* Return the detected NT-function port number */
return ndev->peers[pidx].port;
}
/*
* idt_ntb_peer_port_idx() - get peer port index by given port number
* @ntb: NTB device context.
* @port: Peer port number.
*
* Internal port -> index table is pre-initialized with -EINVAL values,
* so we just need to return it value
*
* Return: peer NT-function port index or negative error
*/
static int idt_ntb_peer_port_idx(struct ntb_dev *ntb, int port)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (port < 0 || IDT_MAX_NR_PORTS <= port)
return -EINVAL;
return ndev->port_idx_map[port];
}
/*===========================================================================
* 3. Link status operations
* There is no any ready-to-use method to have peer ports notified if NTB
* link is set up or got down. Instead global signal can be used instead.
* In case if any one of ports changes local NTB link state, it sends
* global signal and clears corresponding global state bit. Then all the ports
* receive a notification of that, so to make client driver being aware of
* possible NTB link change.
* Additionally each of active NT-functions is subscribed to PCIe-link
* state changes of peer ports.
*===========================================================================
*/
static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev);
/*
* idt_init_link() - Initialize NTB link state notification subsystem
* @ndev: IDT NTB hardware driver descriptor
*
* Function performs the basic initialization of some global registers
* needed to enable IRQ-based notifications of PCIe Link Up/Down and
* Global Signal events.
* NOTE Since it's not possible to determine when all the NTB peer drivers are
* unloaded as well as have those registers accessed concurrently, we must
* preinitialize them with the same value and leave it uncleared on local
* driver unload.
*/
static void idt_init_link(struct idt_ntb_dev *ndev)
{
u32 part_mask, port_mask, se_mask;
unsigned char pidx;
/* Initialize spin locker of Mapping Table access registers */
spin_lock_init(&ndev->mtbl_lock);
/* Walk over all detected peers collecting port and partition masks */
port_mask = ~BIT(ndev->port);
part_mask = ~BIT(ndev->part);
for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
port_mask &= ~BIT(ndev->peers[pidx].port);
part_mask &= ~BIT(ndev->peers[pidx].part);
}
/* Clean the Link Up/Down and GLobal Signal status registers */
idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
/* Unmask NT-activated partitions to receive Global Switch events */
idt_sw_write(ndev, IDT_SW_SEPMSK, part_mask);
/* Enable PCIe Link Up events of NT-activated ports */
idt_sw_write(ndev, IDT_SW_SELINKUPMSK, port_mask);
/* Enable PCIe Link Down events of NT-activated ports */
idt_sw_write(ndev, IDT_SW_SELINKDNMSK, port_mask);
/* Unmask NT-activated partitions to receive Global Signal events */
idt_sw_write(ndev, IDT_SW_SEGSIGMSK, part_mask);
/* Unmask Link Up/Down and Global Switch Events */
se_mask = ~(IDT_SEMSK_LINKUP | IDT_SEMSK_LINKDN | IDT_SEMSK_GSIGNAL);
idt_sw_write(ndev, IDT_SW_SEMSK, se_mask);
dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events initialized");
}
/*
* idt_deinit_link() - deinitialize link subsystem
* @ndev: IDT NTB hardware driver descriptor
*
* Just disable the link back.
*/
static void idt_deinit_link(struct idt_ntb_dev *ndev)
{
/* Disable the link */
idt_ntb_local_link_disable(ndev);
dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events deinitialized");
}
/*
* idt_se_isr() - switch events ISR
* @ndev: IDT NTB hardware driver descriptor
* @ntint_sts: NT-function interrupt status
*
* This driver doesn't support IDT PCIe-switch dynamic reconfigurations,
* Failover capability, etc, so switch events are utilized to notify of
* PCIe and NTB link events.
* The method is called from PCIe ISR bottom-half routine.
*/
static void idt_se_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
{
u32 sests;
/* Read Switch Events status */
sests = idt_sw_read(ndev, IDT_SW_SESTS);
/* Clean the Link Up/Down and Global Signal status registers */
idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
/* Clean the corresponding interrupt bit */
idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_SEVENT);
dev_dbg(&ndev->ntb.pdev->dev, "SE IRQ detected %#08x (SESTS %#08x)",
ntint_sts, sests);
/* Notify the client driver of possible link state change */
ntb_link_event(&ndev->ntb);
}
/*
* idt_ntb_local_link_enable() - enable the local NTB link.
* @ndev: IDT NTB hardware driver descriptor
*
* In order to enable the NTB link we need:
* - enable Completion TLPs translation
* - initialize mapping table to enable the Request ID translation
* - notify peers of NTB link state change
*/
static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev)
{
u32 reqid, mtbldata = 0;
unsigned long irqflags;
/* Enable the ID protection and Completion TLPs translation */
idt_nt_write(ndev, IDT_NT_NTCTL, IDT_NTCTL_CPEN);
/* Retrieve the current Requester ID (Bus:Device:Function) */
reqid = idt_nt_read(ndev, IDT_NT_REQIDCAP);
/*
* Set the corresponding NT Mapping table entry of port partition index
* with the data to perform the Request ID translation
*/
mtbldata = SET_FIELD(NTMTBLDATA_REQID, 0, reqid) |
SET_FIELD(NTMTBLDATA_PART, 0, ndev->part) |
IDT_NTMTBLDATA_VALID;
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
}
/*
* idt_ntb_local_link_disable() - disable the local NTB link.
* @ndev: IDT NTB hardware driver descriptor
*
* In order to enable the NTB link we need:
* - disable Completion TLPs translation
* - clear corresponding mapping table entry
* - notify peers of NTB link state change
*/
static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev)
{
unsigned long irqflags;
/* Disable Completion TLPs translation */
idt_nt_write(ndev, IDT_NT_NTCTL, 0);
/* Clear the corresponding NT Mapping table entry */
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Notify the peers by setting and clearing the global signal bit */
idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
}
/*
* idt_ntb_local_link_is_up() - test wethter local NTB link is up
* @ndev: IDT NTB hardware driver descriptor
*
* Local link is up under the following conditions:
* - Bus mastering is enabled
* - NTCTL has Completion TLPs translation enabled
* - Mapping table permits Request TLPs translation
* NOTE: We don't need to check PCIe link state since it's obviously
* up while we are able to communicate with IDT PCIe-switch
*
* Return: true if link is up, otherwise false
*/
static bool idt_ntb_local_link_is_up(struct idt_ntb_dev *ndev)
{
unsigned long irqflags;
u32 data;
/* Read the local Bus Master Enable status */
data = idt_nt_read(ndev, IDT_NT_PCICMDSTS);
if (!(data & IDT_PCICMDSTS_BME))
return false;
/* Read the local Completion TLPs translation enable status */
data = idt_nt_read(ndev, IDT_NT_NTCTL);
if (!(data & IDT_NTCTL_CPEN))
return false;
/* Read Mapping table entry corresponding to the local partition */
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
return !!(data & IDT_NTMTBLDATA_VALID);
}
/*
* idt_ntb_peer_link_is_up() - test whether peer NTB link is up
* @ndev: IDT NTB hardware driver descriptor
* @pidx: Peer port index
*
* Peer link is up under the following conditions:
* - PCIe link is up
* - Bus mastering is enabled
* - NTCTL has Completion TLPs translation enabled
* - Mapping table permits Request TLPs translation
*
* Return: true if link is up, otherwise false
*/
static bool idt_ntb_peer_link_is_up(struct idt_ntb_dev *ndev, int pidx)
{
unsigned long irqflags;
unsigned char port;
u32 data;
/* Retrieve the device port number */
port = ndev->peers[pidx].port;
/* Check whether PCIe link is up */
data = idt_sw_read(ndev, portdata_tbl[port].sts);
if (!(data & IDT_SWPORTxSTS_LINKUP))
return false;
/* Check whether bus mastering is enabled on the peer port */
data = idt_sw_read(ndev, portdata_tbl[port].pcicmdsts);
if (!(data & IDT_PCICMDSTS_BME))
return false;
/* Check if Completion TLPs translation is enabled on the peer port */
data = idt_sw_read(ndev, portdata_tbl[port].ntctl);
if (!(data & IDT_NTCTL_CPEN))
return false;
/* Read Mapping table entry corresponding to the peer partition */
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->peers[pidx].part);
data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
return !!(data & IDT_NTMTBLDATA_VALID);
}
/*
* idt_ntb_link_is_up() - get the current ntb link state (NTB API callback)
* @ntb: NTB device context.
* @speed: OUT - The link speed expressed as PCIe generation number.
* @width: OUT - The link width expressed as the number of PCIe lanes.
*
* Get the bitfield of NTB link states for all peer ports
*
* Return: bitfield of indexed ports link state: bit is set/cleared if the
* link is up/down respectively.
*/
static u64 idt_ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed, enum ntb_width *width)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
unsigned char pidx;
u64 status;
u32 data;
/* Retrieve the local link speed and width */
if (speed != NULL || width != NULL) {
data = idt_nt_read(ndev, IDT_NT_PCIELCTLSTS);
if (speed != NULL)
*speed = GET_FIELD(PCIELCTLSTS_CLS, data);
if (width != NULL)
*width = GET_FIELD(PCIELCTLSTS_NLW, data);
}
/* If local NTB link isn't up then all the links are considered down */
if (!idt_ntb_local_link_is_up(ndev))
return 0;
/* Collect all the peer ports link states into the bitfield */
status = 0;
for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
if (idt_ntb_peer_link_is_up(ndev, pidx))
status |= ((u64)1 << pidx);
}
return status;
}
/*
* idt_ntb_link_enable() - enable local port ntb link (NTB API callback)
* @ntb: NTB device context.
* @max_speed: The maximum link speed expressed as PCIe generation number.
* @max_width: The maximum link width expressed as the number of PCIe lanes.
*
* Enable just local NTB link. PCIe link parameters are ignored.
*
* Return: always zero.
*/
static int idt_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed speed,
enum ntb_width width)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
/* Just enable the local NTB link */
idt_ntb_local_link_enable(ndev);
dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link enabled");
return 0;
}
/*
* idt_ntb_link_disable() - disable local port ntb link (NTB API callback)
* @ntb: NTB device context.
*
* Disable just local NTB link.
*
* Return: always zero.
*/
static int idt_ntb_link_disable(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
/* Just disable the local NTB link */
idt_ntb_local_link_disable(ndev);
dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link disabled");
return 0;
}
/*=============================================================================
* 4. Memory Window operations
*
* IDT PCIe-switches have two types of memory windows: MWs with direct
* address translation and MWs with LUT based translation. The first type of
* MWs is simple map of corresponding BAR address space to a memory space
* of specified target port. So it implemets just ont-to-one mapping. Lookup
* table in its turn can map one BAR address space to up to 24 different
* memory spaces of different ports.
* NT-functions BARs can be turned on to implement either direct or lookup
* table based address translations, so:
* BAR0 - NT configuration registers space/direct address translation
* BAR1 - direct address translation/upper address of BAR0x64
* BAR2 - direct address translation/Lookup table with either 12 or 24 entries
* BAR3 - direct address translation/upper address of BAR2x64
* BAR4 - direct address translation/Lookup table with either 12 or 24 entries
* BAR5 - direct address translation/upper address of BAR4x64
* Additionally BAR2 and BAR4 can't have 24-entries LUT enabled at the same
* time. Since the BARs setup can be rather complicated this driver implements
* a scanning algorithm to have all the possible memory windows configuration
* covered.
*
* NOTE 1 BAR setup must be done before Linux kernel enumerated NT-function
* of any port, so this driver would have memory windows configurations fixed.
* In this way all initializations must be performed either by platform BIOS
* or using EEPROM connected to IDT PCIe-switch master SMBus.
*
* NOTE 2 This driver expects BAR0 mapping NT-function configuration space.
* Easy calculation can give us an upper boundary of 29 possible memory windows
* per each NT-function if all the BARs are of 32bit type.
*=============================================================================
*/
/*
* idt_get_mw_count() - get memory window count
* @mw_type: Memory window type
*
* Return: number of memory windows with respect to the BAR type
*/
static inline unsigned char idt_get_mw_count(enum idt_mw_type mw_type)
{
switch (mw_type) {
case IDT_MW_DIR:
return 1;
case IDT_MW_LUT12:
return 12;
case IDT_MW_LUT24:
return 24;
default:
break;
}
return 0;
}
/*
* idt_get_mw_name() - get memory window name
* @mw_type: Memory window type
*
* Return: pointer to a string with name
*/
static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
{
switch (mw_type) {
case IDT_MW_DIR:
return "DIR ";
case IDT_MW_LUT12:
return "LUT12";
case IDT_MW_LUT24:
return "LUT24";
default:
break;
}
return "unknown";
}
/*
* idt_scan_mws() - scan memory windows of the port
* @ndev: IDT NTB hardware driver descriptor
* @port: Port to get number of memory windows for
* @mw_cnt: Out - number of memory windows
*
* It walks over BAR setup registers of the specified port and determines
* the memory windows parameters if any activated.
*
* Return: array of memory windows
*/
static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
unsigned char *mw_cnt)
{
struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
const struct idt_ntb_bar *bars;
enum idt_mw_type mw_type;
unsigned char widx, bidx, en_cnt;
bool bar_64bit = false;
int aprt_size;
u32 data;
/* Retrieve the array of the BARs registers */
bars = portdata_tbl[port].bars;
/* Scan all the BARs belonging to the port */
*mw_cnt = 0;
for (bidx = 0; bidx < IDT_BAR_CNT; bidx += 1 + bar_64bit) {
/* Read BARSETUP register value */
data = idt_sw_read(ndev, bars[bidx].setup);
/* Skip disabled BARs */
if (!(data & IDT_BARSETUP_EN)) {
bar_64bit = false;
continue;
}
/* Skip next BARSETUP if current one has 64bit addressing */
bar_64bit = IS_FLD_SET(BARSETUP_TYPE, data, 64);
/* Skip configuration space mapping BARs */
if (data & IDT_BARSETUP_MODE_CFG)
continue;
/* Retrieve MW type/entries count and aperture size */
mw_type = GET_FIELD(BARSETUP_ATRAN, data);
en_cnt = idt_get_mw_count(mw_type);
aprt_size = (u64)1 << GET_FIELD(BARSETUP_SIZE, data);
/* Save configurations of all available memory windows */
for (widx = 0; widx < en_cnt; widx++, (*mw_cnt)++) {
/*
* IDT can expose a limited number of MWs, so it's bug
* to have more than the driver expects
*/
if (*mw_cnt >= IDT_MAX_NR_MWS)
return ERR_PTR(-EINVAL);
/* Save basic MW info */
mws[*mw_cnt].type = mw_type;
mws[*mw_cnt].bar = bidx;
mws[*mw_cnt].idx = widx;
/* It's always DWORD aligned */
mws[*mw_cnt].addr_align = IDT_TRANS_ALIGN;
/* DIR and LUT approachs differently configure MWs */
if (mw_type == IDT_MW_DIR)
mws[*mw_cnt].size_max = aprt_size;
else if (mw_type == IDT_MW_LUT12)
mws[*mw_cnt].size_max = aprt_size / 16;
else
mws[*mw_cnt].size_max = aprt_size / 32;
mws[*mw_cnt].size_align = (mw_type == IDT_MW_DIR) ?
IDT_DIR_SIZE_ALIGN : mws[*mw_cnt].size_max;
}
}
/* Allocate memory for memory window descriptors */
ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
GFP_KERNEL);
if (!ret_mws)
return ERR_PTR(-ENOMEM);
/* Copy the info of detected memory windows */
memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
return ret_mws;
}
/*
* idt_init_mws() - initialize memory windows subsystem
* @ndev: IDT NTB hardware driver descriptor
*
* Scan BAR setup registers of local and peer ports to determine the
* outbound and inbound memory windows parameters
*
* Return: zero on success, otherwise a negative error number
*/
static int idt_init_mws(struct idt_ntb_dev *ndev)
{
struct idt_ntb_peer *peer;
unsigned char pidx;
/* Scan memory windows of the local port */
ndev->mws = idt_scan_mws(ndev, ndev->port, &ndev->mw_cnt);
if (IS_ERR(ndev->mws)) {
dev_err(&ndev->ntb.pdev->dev,
"Failed to scan mws of local port %hhu", ndev->port);
return PTR_ERR(ndev->mws);
}
/* Scan memory windows of the peer ports */
for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
peer = &ndev->peers[pidx];
peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt);
if (IS_ERR(peer->mws)) {
dev_err(&ndev->ntb.pdev->dev,
"Failed to scan mws of port %hhu", peer->port);
return PTR_ERR(peer->mws);
}
}
/* Initialize spin locker of the LUT registers */
spin_lock_init(&ndev->lut_lock);
dev_dbg(&ndev->ntb.pdev->dev, "Outbound and inbound MWs initialized");
return 0;
}
/*
* idt_ntb_mw_count() - number of inbound memory windows (NTB API callback)
* @ntb: NTB device context.
* @pidx: Port index of peer device.
*
* The value is returned for the specified peer, so generally speaking it can
* be different for different port depending on the IDT PCIe-switch
* initialization.
*
* Return: the number of memory windows.
*/
static int idt_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (pidx < 0 || ndev->peer_cnt <= pidx)
return -EINVAL;
return ndev->peers[pidx].mw_cnt;
}
/*
* idt_ntb_mw_get_align() - inbound memory window parameters (NTB API callback)
* @ntb: NTB device context.
* @pidx: Port index of peer device.
* @widx: Memory window index.
* @addr_align: OUT - the base alignment for translating the memory window
* @size_align: OUT - the size alignment for translating the memory window
* @size_max: OUT - the maximum size of the memory window
*
* The peer memory window parameters have already been determined, so just
* return the corresponding values, which mustn't change within session.
*
* Return: Zero on success, otherwise a negative error number.
*/
static int idt_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
struct idt_ntb_peer *peer;
if (pidx < 0 || ndev->peer_cnt <= pidx)
return -EINVAL;
peer = &ndev->peers[pidx];
if (widx < 0 || peer->mw_cnt <= widx)
return -EINVAL;
if (addr_align != NULL)
*addr_align = peer->mws[widx].addr_align;
if (size_align != NULL)
*size_align = peer->mws[widx].size_align;
if (size_max != NULL)
*size_max = peer->mws[widx].size_max;
return 0;
}
/*
* idt_ntb_peer_mw_count() - number of outbound memory windows
* (NTB API callback)
* @ntb: NTB device context.
*
* Outbound memory windows parameters have been determined based on the
* BAR setup registers value, which are mostly constants within one session.
*
* Return: the number of memory windows.
*/
static int idt_ntb_peer_mw_count(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return ndev->mw_cnt;
}
/*
* idt_ntb_peer_mw_get_addr() - get map address of an outbound memory window
* (NTB API callback)
* @ntb: NTB device context.
* @widx: Memory window index (within ntb_peer_mw_count() return value).
* @base: OUT - the base address of mapping region.
* @size: OUT - the size of mapping region.
*
* Return just parameters of BAR resources mapping. Size reflects just the size
* of the resource
*
* Return: Zero on success, otherwise a negative error number.
*/
static int idt_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
phys_addr_t *base, resource_size_t *size)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (widx < 0 || ndev->mw_cnt <= widx)
return -EINVAL;
/* Mapping address is just properly shifted BAR resource start */
if (base != NULL)
*base = pci_resource_start(ntb->pdev, ndev->mws[widx].bar) +
ndev->mws[widx].idx * ndev->mws[widx].size_max;
/* Mapping size has already been calculated at MWs scanning */
if (size != NULL)
*size = ndev->mws[widx].size_max;
return 0;
}
/*
* idt_ntb_peer_mw_set_trans() - set a translation address of a memory window
* (NTB API callback)
* @ntb: NTB device context.
* @pidx: Port index of peer device the translation address received from.
* @widx: Memory window index.
* @addr: The dma address of the shared memory to access.
* @size: The size of the shared memory to access.
*
* The Direct address translation and LUT base translation is initialized a
* bit differenet. Although the parameters restriction are now determined by
* the same code.
*
* Return: Zero on success, otherwise an error number.
*/
static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
u64 addr, resource_size_t size)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
struct idt_mw_cfg *mw_cfg;
u32 data = 0, lutoff = 0;
if (pidx < 0 || ndev->peer_cnt <= pidx)
return -EINVAL;
if (widx < 0 || ndev->mw_cnt <= widx)
return -EINVAL;
/*
* Retrieve the memory window config to make sure the passed arguments
* fit it restrictions
*/
mw_cfg = &ndev->mws[widx];
if (!IS_ALIGNED(addr, mw_cfg->addr_align))
return -EINVAL;
if (!IS_ALIGNED(size, mw_cfg->size_align) || size > mw_cfg->size_max)
return -EINVAL;
/* DIR and LUT based translations are initialized differently */
if (mw_cfg->type == IDT_MW_DIR) {
const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
u64 limit;
/* Set destination partition of translation */
data = idt_nt_read(ndev, bar->setup);
data = SET_FIELD(BARSETUP_TPART, data, ndev->peers[pidx].part);
idt_nt_write(ndev, bar->setup, data);
/* Set translation base address */
idt_nt_write(ndev, bar->ltbase, (u32)addr);
idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32));
/* Set the custom BAR aperture limit */
limit = pci_bus_address(ntb->pdev, mw_cfg->bar) + size;
idt_nt_write(ndev, bar->limit, (u32)limit);
if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32));
} else {
unsigned long irqflags;
/* Initialize corresponding LUT entry */
lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
data = SET_FIELD(LUTUDATA_PART, 0, ndev->peers[pidx].part) |
IDT_LUTUDATA_VALID;
spin_lock_irqsave(&ndev->lut_lock, irqflags);
idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
/* Limit address isn't specified since size is fixed for LUT */
}
return 0;
}
/*
* idt_ntb_peer_mw_clear_trans() - clear the outbound MW translation address
* (NTB API callback)
* @ntb: NTB device context.
* @pidx: Port index of peer device.
* @widx: Memory window index.
*
* It effectively disables the translation over the specified outbound MW.
*
* Return: Zero on success, otherwise an error number.
*/
static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
int widx)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
struct idt_mw_cfg *mw_cfg;
if (pidx < 0 || ndev->peer_cnt <= pidx)
return -EINVAL;
if (widx < 0 || ndev->mw_cnt <= widx)
return -EINVAL;
mw_cfg = &ndev->mws[widx];
/* DIR and LUT based translations are initialized differently */
if (mw_cfg->type == IDT_MW_DIR) {
const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
u32 data;
/* Read BARSETUP to check BAR type */
data = idt_nt_read(ndev, bar->setup);
/* Disable translation by specifying zero BAR limit */
idt_nt_write(ndev, bar->limit, 0);
if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
idt_nt_write(ndev, (bar + 1)->limit, 0);
} else {
unsigned long irqflags;
u32 lutoff;
/* Clear the corresponding LUT entry up */
lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
spin_lock_irqsave(&ndev->lut_lock, irqflags);
idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
}
return 0;
}
/*=============================================================================
* 5. Doorbell operations
*
* Doorbell functionality of IDT PCIe-switches is pretty unusual. First of
* all there is global doorbell register which state can be changed by any
* NT-function of the IDT device in accordance with global permissions. These
* permissions configs are not supported by NTB API, so it must be done by
* either BIOS or EEPROM settings. In the same way the state of the global
* doorbell is reflected to the NT-functions local inbound doorbell registers.
* It can lead to situations when client driver sets some peer doorbell bits
* and get them bounced back to local inbound doorbell if permissions are
* granted.
* Secondly there is just one IRQ vector for Doorbell, Message, Temperature
* and Switch events, so if client driver left any of Doorbell bits set and
* some other event occurred, the driver will be notified of Doorbell event
* again.
*=============================================================================
*/
/*
* idt_db_isr() - doorbell event ISR
* @ndev: IDT NTB hardware driver descriptor
* @ntint_sts: NT-function interrupt status
*
* Doorbell event happans when DBELL bit of NTINTSTS switches from 0 to 1.
* It happens only when unmasked doorbell bits are set to ones on completely
* zeroed doorbell register.
* The method is called from PCIe ISR bottom-half routine.
*/
static void idt_db_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
{
/*
* Doorbell IRQ status will be cleaned only when client
* driver unsets all the doorbell bits.
*/
dev_dbg(&ndev->ntb.pdev->dev, "DB IRQ detected %#08x", ntint_sts);
/* Notify the client driver of possible doorbell state change */
ntb_db_event(&ndev->ntb, 0);
}
/*
* idt_ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
* (NTB API callback)
* @ntb: NTB device context.
*
* IDT PCIe-switches expose just one Doorbell register of DWORD size.
*
* Return: A mask of doorbell bits supported by the ntb.
*/
static u64 idt_ntb_db_valid_mask(struct ntb_dev *ntb)
{
return IDT_DBELL_MASK;
}
/*
* idt_ntb_db_read() - read the local doorbell register (NTB API callback)
* @ntb: NTB device context.
*
* There is just on inbound doorbell register of each NT-function, so
* this method return it value.
*
* Return: The bits currently set in the local doorbell register.
*/
static u64 idt_ntb_db_read(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return idt_nt_read(ndev, IDT_NT_INDBELLSTS);
}
/*
* idt_ntb_db_clear() - clear bits in the local doorbell register
* (NTB API callback)
* @ntb: NTB device context.
* @db_bits: Doorbell bits to clear.
*
* Clear bits of inbound doorbell register by writing ones to it.
*
* NOTE! Invalid bits are always considered cleared so it's not an error
* to clear them over.
*
* Return: always zero as success.
*/
static int idt_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
idt_nt_write(ndev, IDT_NT_INDBELLSTS, (u32)db_bits);
return 0;
}
/*
* idt_ntb_db_read_mask() - read the local doorbell mask (NTB API callback)
* @ntb: NTB device context.
*
* Each inbound doorbell bit can be masked from generating IRQ by setting
* the corresponding bit in inbound doorbell mask. So this method returns
* the value of the register.
*
* Return: The bits currently set in the local doorbell mask register.
*/
static u64 idt_ntb_db_read_mask(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return idt_nt_read(ndev, IDT_NT_INDBELLMSK);
}
/*
* idt_ntb_db_set_mask() - set bits in the local doorbell mask
* (NTB API callback)
* @ntb: NTB device context.
* @db_bits: Doorbell mask bits to set.
*
* The inbound doorbell register mask value must be read, then OR'ed with
* passed field and only then set back.
*
* Return: zero on success, negative error if invalid argument passed.
*/
static int idt_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return idt_reg_set_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
IDT_DBELL_MASK, db_bits);
}
/*
* idt_ntb_db_clear_mask() - clear bits in the local doorbell mask
* (NTB API callback)
* @ntb: NTB device context.
* @db_bits: Doorbell bits to clear.
*
* The method just clears the set bits up in accordance with the passed
* bitfield. IDT PCIe-switch shall generate an interrupt if there hasn't
* been any unmasked bit set before current unmasking. Otherwise IRQ won't
* be generated since there is only one IRQ vector for all doorbells.
*
* Return: always zero as success
*/
static int idt_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
idt_reg_clear_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
db_bits);
return 0;
}
/*
* idt_ntb_peer_db_set() - set bits in the peer doorbell register
* (NTB API callback)
* @ntb: NTB device context.
* @db_bits: Doorbell bits to set.
*
* IDT PCIe-switches exposes local outbound doorbell register to change peer
* inbound doorbell register state.
*
* Return: zero on success, negative error if invalid argument passed.
*/
static int idt_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (db_bits & ~(u64)IDT_DBELL_MASK)
return -EINVAL;
idt_nt_write(ndev, IDT_NT_OUTDBELLSET, (u32)db_bits);
return 0;
}
/*=============================================================================
* 6. Messaging operations
*
* Each NT-function of IDT PCIe-switch has four inbound and four outbound
* message registers. Each outbound message register can be connected to one or
* even more than one peer inbound message registers by setting global
* configurations. Since NTB API permits one-on-one message registers mapping
* only, the driver acts in according with that restriction.
*=============================================================================
*/
/*
* idt_init_msg() - initialize messaging interface
* @ndev: IDT NTB hardware driver descriptor
*
* Just initialize the message registers routing tables locker.
*/
static void idt_init_msg(struct idt_ntb_dev *ndev)
{
unsigned char midx;
/* Init the messages routing table lockers */
for (midx = 0; midx < IDT_MSG_CNT; midx++)
spin_lock_init(&ndev->msg_locks[midx]);
dev_dbg(&ndev->ntb.pdev->dev, "NTB Messaging initialized");
}
/*
* idt_msg_isr() - message event ISR
* @ndev: IDT NTB hardware driver descriptor
* @ntint_sts: NT-function interrupt status
*
* Message event happens when MSG bit of NTINTSTS switches from 0 to 1.
* It happens only when unmasked message status bits are set to ones on
* completely zeroed message status register.
* The method is called from PCIe ISR bottom-half routine.
*/
static void idt_msg_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
{
/*
* Message IRQ status will be cleaned only when client
* driver unsets all the message status bits.
*/
dev_dbg(&ndev->ntb.pdev->dev, "Message IRQ detected %#08x", ntint_sts);
/* Notify the client driver of possible message status change */
ntb_msg_event(&ndev->ntb);
}
/*
* idt_ntb_msg_count() - get the number of message registers (NTB API callback)
* @ntb: NTB device context.
*
* IDT PCIe-switches support four message registers.
*
* Return: the number of message registers.
*/
static int idt_ntb_msg_count(struct ntb_dev *ntb)
{
return IDT_MSG_CNT;
}
/*
* idt_ntb_msg_inbits() - get a bitfield of inbound message registers status
* (NTB API callback)
* @ntb: NTB device context.
*
* NT message status register is shared between inbound and outbound message
* registers status
*
* Return: bitfield of inbound message registers.
*/
static u64 idt_ntb_msg_inbits(struct ntb_dev *ntb)
{
return (u64)IDT_INMSG_MASK;
}
/*
* idt_ntb_msg_outbits() - get a bitfield of outbound message registers status
* (NTB API callback)
* @ntb: NTB device context.
*
* NT message status register is shared between inbound and outbound message
* registers status
*
* Return: bitfield of outbound message registers.
*/
static u64 idt_ntb_msg_outbits(struct ntb_dev *ntb)
{
return (u64)IDT_OUTMSG_MASK;
}
/*
* idt_ntb_msg_read_sts() - read the message registers status (NTB API callback)
* @ntb: NTB device context.
*
* IDT PCIe-switches expose message status registers to notify drivers of
* incoming data and failures in case if peer message register isn't freed.
*
* Return: status bits of message registers
*/
static u64 idt_ntb_msg_read_sts(struct ntb_dev *ntb)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return idt_nt_read(ndev, IDT_NT_MSGSTS);
}
/*
* idt_ntb_msg_clear_sts() - clear status bits of message registers
* (NTB API callback)
* @ntb: NTB device context.
* @sts_bits: Status bits to clear.
*
* Clear bits in the status register by writing ones.
*
* NOTE! Invalid bits are always considered cleared so it's not an error
* to clear them over.
*
* Return: always zero as success.
*/
static int idt_ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
idt_nt_write(ndev, IDT_NT_MSGSTS, sts_bits);
return 0;
}
/*
* idt_ntb_msg_set_mask() - set mask of message register status bits
* (NTB API callback)
* @ntb: NTB device context.
* @mask_bits: Mask bits.
*
* Mask the message status bits from raising an IRQ.
*
* Return: zero on success, negative error if invalid argument passed.
*/
static int idt_ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
return idt_reg_set_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
IDT_MSG_MASK, mask_bits);
}
/*
* idt_ntb_msg_clear_mask() - clear message registers mask
* (NTB API callback)
* @ntb: NTB device context.
* @mask_bits: Mask bits.
*
* Clear mask of message status bits IRQs.
*
* Return: always zero as success.
*/
static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
idt_reg_clear_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
mask_bits);
return 0;
}
/*
* idt_ntb_msg_read() - read message register with specified index
* (NTB API callback)
* @ntb: NTB device context.
* @pidx: OUT - Port index of peer device a message retrieved from
* @midx: Message register index
*
* Read data from the specified message register and source register.
*
* Return: inbound message register value.
*/
static u32 idt_ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (midx < 0 || IDT_MSG_CNT <= midx)
return ~(u32)0;
/* Retrieve source port index of the message */
if (pidx != NULL) {
u32 srcpart;
srcpart = idt_nt_read(ndev, ntdata_tbl.msgs[midx].src);
*pidx = ndev->part_idx_map[srcpart];
/* Sanity check partition index (for initial case) */
if (*pidx == -EINVAL)
*pidx = 0;
}
/* Retrieve data of the corresponding message register */
return idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
}
/*
* idt_ntb_peer_msg_write() - write data to the specified message register
* (NTB API callback)
* @ntb: NTB device context.
* @pidx: Port index of peer device a message being sent to
* @midx: Message register index
* @msg: Data to send
*
* Just try to send data to a peer. Message status register should be
* checked by client driver.
*
* Return: zero on success, negative error if invalid argument passed.
*/
static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
u32 msg)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
unsigned long irqflags;
u32 swpmsgctl = 0;
if (midx < 0 || IDT_MSG_CNT <= midx)
return -EINVAL;
if (pidx < 0 || ndev->peer_cnt <= pidx)
return -EINVAL;
/* Collect the routing information */
swpmsgctl = SET_FIELD(SWPxMSGCTL_REG, 0, midx) |
SET_FIELD(SWPxMSGCTL_PART, 0, ndev->peers[pidx].part);
/* Lock the messages routing table of the specified register */
spin_lock_irqsave(&ndev->msg_locks[midx], irqflags);
/* Set the route and send the data */
idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
/* Unlock the messages routing table */
spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
/* Client driver shall check the status register */
return 0;
}
/*=============================================================================
* 7. Temperature sensor operations
*
* IDT PCIe-switch has an embedded temperature sensor, which can be used to
* check current chip core temperature. Since a workload environment can be
* different on different platforms, an offset and ADC/filter settings can be
* specified. Although the offset configuration is only exposed to the sysfs
* hwmon interface at the moment. The rest of the settings can be adjusted
* for instance by the BIOS/EEPROM firmware.
*=============================================================================
*/
/*
* idt_get_deg() - convert millidegree Celsius value to just degree
* @mdegC: IN - millidegree Celsius value
*
* Return: Degree corresponding to the passed millidegree value
*/
static inline s8 idt_get_deg(long mdegC)
{
return mdegC / 1000;
}
/*
* idt_get_frac() - retrieve 0/0.5 fraction of the millidegree Celsius value
* @mdegC: IN - millidegree Celsius value
*
* Return: 0/0.5 degree fraction of the passed millidegree value
*/
static inline u8 idt_get_deg_frac(long mdegC)
{
return (mdegC % 1000) >= 500 ? 5 : 0;
}
/*
* idt_get_temp_fmt() - convert millidegree Celsius value to 0:7:1 format
* @mdegC: IN - millidegree Celsius value
*
* Return: 0:7:1 format acceptable by the IDT temperature sensor
*/
static inline u8 idt_temp_get_fmt(long mdegC)
{
return (idt_get_deg(mdegC) << 1) | (idt_get_deg_frac(mdegC) ? 1 : 0);
}
/*
* idt_get_temp_sval() - convert temp sample to signed millidegree Celsius
* @data: IN - shifted to LSB 8-bits temperature sample
*
* Return: signed millidegree Celsius
*/
static inline long idt_get_temp_sval(u32 data)
{
return ((s8)data / 2) * 1000 + (data & 0x1 ? 500 : 0);
}
/*
* idt_get_temp_sval() - convert temp sample to unsigned millidegree Celsius
* @data: IN - shifted to LSB 8-bits temperature sample
*
* Return: unsigned millidegree Celsius
*/
static inline long idt_get_temp_uval(u32 data)
{
return (data / 2) * 1000 + (data & 0x1 ? 500 : 0);
}
/*
* idt_read_temp() - read temperature from chip sensor
* @ntb: NTB device context.
* @type: IN - type of the temperature value to read
* @val: OUT - integer value of temperature in millidegree Celsius
*/
static void idt_read_temp(struct idt_ntb_dev *ndev,
const enum idt_temp_val type, long *val)
{
u32 data;
/* Alter the temperature field in accordance with the passed type */
switch (type) {
case IDT_TEMP_CUR:
data = GET_FIELD(TMPSTS_TEMP,
idt_sw_read(ndev, IDT_SW_TMPSTS));
break;
case IDT_TEMP_LOW:
data = GET_FIELD(TMPSTS_LTEMP,
idt_sw_read(ndev, IDT_SW_TMPSTS));
break;
case IDT_TEMP_HIGH:
data = GET_FIELD(TMPSTS_HTEMP,
idt_sw_read(ndev, IDT_SW_TMPSTS));
break;
case IDT_TEMP_OFFSET:
/* This is the only field with signed 0:7:1 format */
data = GET_FIELD(TMPADJ_OFFSET,
idt_sw_read(ndev, IDT_SW_TMPADJ));
*val = idt_get_temp_sval(data);
return;
default:
data = GET_FIELD(TMPSTS_TEMP,
idt_sw_read(ndev, IDT_SW_TMPSTS));
break;
}
/* The rest of the fields accept unsigned 0:7:1 format */
*val = idt_get_temp_uval(data);
}
/*
* idt_write_temp() - write temperature to the chip sensor register
* @ntb: NTB device context.
* @type: IN - type of the temperature value to change
* @val: IN - integer value of temperature in millidegree Celsius
*/
static void idt_write_temp(struct idt_ntb_dev *ndev,
const enum idt_temp_val type, const long val)
{
unsigned int reg;
u32 data;
u8 fmt;
/* Retrieve the properly formatted temperature value */
fmt = idt_temp_get_fmt(val);
mutex_lock(&ndev->hwmon_mtx);
switch (type) {
case IDT_TEMP_LOW:
reg = IDT_SW_TMPALARM;
data = SET_FIELD(TMPALARM_LTEMP, idt_sw_read(ndev, reg), fmt) &
~IDT_TMPALARM_IRQ_MASK;
break;
case IDT_TEMP_HIGH:
reg = IDT_SW_TMPALARM;
data = SET_FIELD(TMPALARM_HTEMP, idt_sw_read(ndev, reg), fmt) &
~IDT_TMPALARM_IRQ_MASK;
break;
case IDT_TEMP_OFFSET:
reg = IDT_SW_TMPADJ;
data = SET_FIELD(TMPADJ_OFFSET, idt_sw_read(ndev, reg), fmt);
break;
default:
goto inval_spin_unlock;
}
idt_sw_write(ndev, reg, data);
inval_spin_unlock:
mutex_unlock(&ndev->hwmon_mtx);
}
/*
* idt_sysfs_show_temp() - printout corresponding temperature value
* @dev: Pointer to the NTB device structure
* @da: Sensor device attribute structure
* @buf: Buffer to print temperature out
*
* Return: Number of written symbols or negative error
*/
static ssize_t idt_sysfs_show_temp(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
enum idt_temp_val type = attr->index;
long mdeg;
idt_read_temp(ndev, type, &mdeg);
return sprintf(buf, "%ld\n", mdeg);
}
/*
* idt_sysfs_set_temp() - set corresponding temperature value
* @dev: Pointer to the NTB device structure
* @da: Sensor device attribute structure
* @buf: Buffer to print temperature out
* @count: Size of the passed buffer
*
* Return: Number of written symbols or negative error
*/
static ssize_t idt_sysfs_set_temp(struct device *dev,
struct device_attribute *da, const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
enum idt_temp_val type = attr->index;
long mdeg;
int ret;
ret = kstrtol(buf, 10, &mdeg);
if (ret)
return ret;
/* Clamp the passed value in accordance with the type */
if (type == IDT_TEMP_OFFSET)
mdeg = clamp_val(mdeg, IDT_TEMP_MIN_OFFSET,
IDT_TEMP_MAX_OFFSET);
else
mdeg = clamp_val(mdeg, IDT_TEMP_MIN_MDEG, IDT_TEMP_MAX_MDEG);
idt_write_temp(ndev, type, mdeg);
return count;
}
/*
* idt_sysfs_reset_hist() - reset temperature history
* @dev: Pointer to the NTB device structure
* @da: Sensor device attribute structure
* @buf: Buffer to print temperature out
* @count: Size of the passed buffer
*
* Return: Number of written symbols or negative error
*/
static ssize_t idt_sysfs_reset_hist(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
{
struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
/* Just set the maximal value to the lowest temperature field and
* minimal value to the highest temperature field
*/
idt_write_temp(ndev, IDT_TEMP_LOW, IDT_TEMP_MAX_MDEG);
idt_write_temp(ndev, IDT_TEMP_HIGH, IDT_TEMP_MIN_MDEG);
return count;
}
/*
* Hwmon IDT sysfs attributes
*/
static SENSOR_DEVICE_ATTR(temp1_input, 0444, idt_sysfs_show_temp, NULL,
IDT_TEMP_CUR);
static SENSOR_DEVICE_ATTR(temp1_lowest, 0444, idt_sysfs_show_temp, NULL,
IDT_TEMP_LOW);
static SENSOR_DEVICE_ATTR(temp1_highest, 0444, idt_sysfs_show_temp, NULL,
IDT_TEMP_HIGH);
static SENSOR_DEVICE_ATTR(temp1_offset, 0644, idt_sysfs_show_temp,
idt_sysfs_set_temp, IDT_TEMP_OFFSET);
static DEVICE_ATTR(temp1_reset_history, 0200, NULL, idt_sysfs_reset_hist);
/*
* Hwmon IDT sysfs attributes group
*/
static struct attribute *idt_temp_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_lowest.dev_attr.attr,
&sensor_dev_attr_temp1_highest.dev_attr.attr,
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&dev_attr_temp1_reset_history.attr,
NULL
};
ATTRIBUTE_GROUPS(idt_temp);
/*
* idt_init_temp() - initialize temperature sensor interface
* @ndev: IDT NTB hardware driver descriptor
*
* Simple sensor initializarion method is responsible for device switching
* on and resource management based hwmon interface registration. Note, that
* since the device is shared we won't disable it on remove, but leave it
* working until the system is powered off.
*/
static void idt_init_temp(struct idt_ntb_dev *ndev)
{
struct device *hwmon;
/* Enable sensor if it hasn't been already */
idt_sw_write(ndev, IDT_SW_TMPCTL, 0x0);
/* Initialize hwmon interface fields */
mutex_init(&ndev->hwmon_mtx);
hwmon = devm_hwmon_device_register_with_groups(&ndev->ntb.pdev->dev,
ndev->swcfg->name, ndev, idt_temp_groups);
if (IS_ERR(hwmon)) {
dev_err(&ndev->ntb.pdev->dev, "Couldn't create hwmon device");
return;
}
dev_dbg(&ndev->ntb.pdev->dev, "Temperature HWmon interface registered");
}
/*=============================================================================
* 8. ISRs related operations
*
* IDT PCIe-switch has strangely developed IRQ system. There is just one
* interrupt vector for doorbell and message registers. So the hardware driver
* can't determine actual source of IRQ if, for example, message event happened
* while any of unmasked doorbell is still set. The similar situation may be if
* switch or temperature sensor events pop up. The difference is that SEVENT
* and TMPSENSOR bits of NT interrupt status register can be cleaned by
* IRQ handler so a next interrupt request won't have false handling of
* corresponding events.
* The hardware driver has only bottom-half handler of the IRQ, since if any
* of events happened the device won't raise it again before the last one is
* handled by clearing of corresponding NTINTSTS bit.
*=============================================================================
*/
static irqreturn_t idt_thread_isr(int irq, void *devid);
/*
* idt_init_isr() - initialize PCIe interrupt handler
* @ndev: IDT NTB hardware driver descriptor
*
* Return: zero on success, otherwise a negative error number.
*/
static int idt_init_isr(struct idt_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
u32 ntint_mask;
int ret;
/* Allocate just one interrupt vector for the ISR */
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
if (ret != 1) {
dev_err(&pdev->dev, "Failed to allocate IRQ vector");
return ret;
}
/* Retrieve the IRQ vector */
ret = pci_irq_vector(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get IRQ vector");
goto err_free_vectors;
}
/* Set the IRQ handler */
ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, idt_thread_isr,
IRQF_ONESHOT, NTB_IRQNAME, ndev);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set MSI IRQ handler, %d", ret);
goto err_free_vectors;
}
/* Unmask Message/Doorbell/SE interrupts */
ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL;
idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
/* From now on the interrupts are enabled */
dev_dbg(&pdev->dev, "NTB interrupts initialized");
return 0;
err_free_vectors:
pci_free_irq_vectors(pdev);
return ret;
}
/*
* idt_deinit_ist() - deinitialize PCIe interrupt handler
* @ndev: IDT NTB hardware driver descriptor
*
* Disable corresponding interrupts and free allocated IRQ vectors.
*/
static void idt_deinit_isr(struct idt_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
u32 ntint_mask;
/* Mask interrupts back */
ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) | IDT_NTINTMSK_ALL;
idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
/* Manually free IRQ otherwise PCI free irq vectors will fail */
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 0), ndev);
/* Free allocated IRQ vectors */
pci_free_irq_vectors(pdev);
dev_dbg(&pdev->dev, "NTB interrupts deinitialized");
}
/*
* idt_thread_isr() - NT function interrupts handler
* @irq: IRQ number
* @devid: Custom buffer
*
* It reads current NT interrupts state register and handles all the event
* it declares.
* The method is bottom-half routine of actual default PCIe IRQ handler.
*/
static irqreturn_t idt_thread_isr(int irq, void *devid)
{
struct idt_ntb_dev *ndev = devid;
bool handled = false;
u32 ntint_sts;
/* Read the NT interrupts status register */
ntint_sts = idt_nt_read(ndev, IDT_NT_NTINTSTS);
/* Handle messaging interrupts */
if (ntint_sts & IDT_NTINTSTS_MSG) {
idt_msg_isr(ndev, ntint_sts);
handled = true;
}
/* Handle doorbell interrupts */
if (ntint_sts & IDT_NTINTSTS_DBELL) {
idt_db_isr(ndev, ntint_sts);
handled = true;
}
/* Handle switch event interrupts */
if (ntint_sts & IDT_NTINTSTS_SEVENT) {
idt_se_isr(ndev, ntint_sts);
handled = true;
}
dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
/*===========================================================================
* 9. NTB hardware driver initialization
*===========================================================================
*/
/*
* NTB API operations
*/
static const struct ntb_dev_ops idt_ntb_ops = {
.port_number = idt_ntb_port_number,
.peer_port_count = idt_ntb_peer_port_count,
.peer_port_number = idt_ntb_peer_port_number,
.peer_port_idx = idt_ntb_peer_port_idx,
.link_is_up = idt_ntb_link_is_up,
.link_enable = idt_ntb_link_enable,
.link_disable = idt_ntb_link_disable,
.mw_count = idt_ntb_mw_count,
.mw_get_align = idt_ntb_mw_get_align,
.peer_mw_count = idt_ntb_peer_mw_count,
.peer_mw_get_addr = idt_ntb_peer_mw_get_addr,
.peer_mw_set_trans = idt_ntb_peer_mw_set_trans,
.peer_mw_clear_trans = idt_ntb_peer_mw_clear_trans,
.db_valid_mask = idt_ntb_db_valid_mask,
.db_read = idt_ntb_db_read,
.db_clear = idt_ntb_db_clear,
.db_read_mask = idt_ntb_db_read_mask,
.db_set_mask = idt_ntb_db_set_mask,
.db_clear_mask = idt_ntb_db_clear_mask,
.peer_db_set = idt_ntb_peer_db_set,
.msg_count = idt_ntb_msg_count,
.msg_inbits = idt_ntb_msg_inbits,
.msg_outbits = idt_ntb_msg_outbits,
.msg_read_sts = idt_ntb_msg_read_sts,
.msg_clear_sts = idt_ntb_msg_clear_sts,
.msg_set_mask = idt_ntb_msg_set_mask,
.msg_clear_mask = idt_ntb_msg_clear_mask,
.msg_read = idt_ntb_msg_read,
.peer_msg_write = idt_ntb_peer_msg_write
};
/*
* idt_register_device() - register IDT NTB device
* @ndev: IDT NTB hardware driver descriptor
*
* Return: zero on success, otherwise a negative error number.
*/
static int idt_register_device(struct idt_ntb_dev *ndev)
{
int ret;
/* Initialize the rest of NTB device structure and register it */
ndev->ntb.ops = &idt_ntb_ops;
ndev->ntb.topo = NTB_TOPO_SWITCH;
ret = ntb_register_device(&ndev->ntb);
if (ret != 0) {
dev_err(&ndev->ntb.pdev->dev, "Failed to register NTB device");
return ret;
}
dev_dbg(&ndev->ntb.pdev->dev, "NTB device successfully registered");
return 0;
}
/*
* idt_unregister_device() - unregister IDT NTB device
* @ndev: IDT NTB hardware driver descriptor
*/
static void idt_unregister_device(struct idt_ntb_dev *ndev)
{
/* Just unregister the NTB device */
ntb_unregister_device(&ndev->ntb);
dev_dbg(&ndev->ntb.pdev->dev, "NTB device unregistered");
}
/*=============================================================================
* 10. DebugFS node initialization
*=============================================================================
*/
static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp);
/*
* Driver DebugFS info file operations
*/
static const struct file_operations idt_dbgfs_info_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = idt_dbgfs_info_read
};
/*
* idt_dbgfs_info_read() - DebugFS read info node callback
* @file: File node descriptor.
* @ubuf: User-space buffer to put data to
* @count: Size of the buffer
* @offp: Offset within the buffer
*/
static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct idt_ntb_dev *ndev = filp->private_data;
unsigned char idx, pidx, cnt;
unsigned long irqflags, mdeg;
ssize_t ret = 0, off = 0;
enum ntb_speed speed;
enum ntb_width width;
char *strbuf;
size_t size;
u32 data;
/* Lets limit the buffer size the way the Intel/AMD drivers do */
size = min_t(size_t, count, 0x1000U);
/* Allocate the memory for the buffer */
strbuf = kmalloc(size, GFP_KERNEL);
if (strbuf == NULL)
return -ENOMEM;
/* Put the data into the string buffer */
off += scnprintf(strbuf + off, size - off,
"\n\t\tIDT NTB device Information:\n\n");
/* General local device configurations */
off += scnprintf(strbuf + off, size - off,
"Local Port %hhu, Partition %hhu\n", ndev->port, ndev->part);
/* Peer ports information */
off += scnprintf(strbuf + off, size - off, "Peers:\n");
for (idx = 0; idx < ndev->peer_cnt; idx++) {
off += scnprintf(strbuf + off, size - off,
"\t%hhu. Port %hhu, Partition %hhu\n",
idx, ndev->peers[idx].port, ndev->peers[idx].part);
}
/* Links status */
data = idt_ntb_link_is_up(&ndev->ntb, &speed, &width);
off += scnprintf(strbuf + off, size - off,
"NTB link status\t- 0x%08x, ", data);
off += scnprintf(strbuf + off, size - off, "PCIe Gen %d x%d lanes\n",
speed, width);
/* Mapping table entries */
off += scnprintf(strbuf + off, size - off, "NTB Mapping Table:\n");
for (idx = 0; idx < IDT_MTBL_ENTRY_CNT; idx++) {
spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
idt_nt_write(ndev, IDT_NT_NTMTBLADDR, idx);
data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
/* Print valid entries only */
if (data & IDT_NTMTBLDATA_VALID) {
off += scnprintf(strbuf + off, size - off,
"\t%hhu. Partition %d, Requester ID 0x%04x\n",
idx, GET_FIELD(NTMTBLDATA_PART, data),
GET_FIELD(NTMTBLDATA_REQID, data));
}
}
off += scnprintf(strbuf + off, size - off, "\n");
/* Outbound memory windows information */
off += scnprintf(strbuf + off, size - off,
"Outbound Memory Windows:\n");
for (idx = 0; idx < ndev->mw_cnt; idx += cnt) {
data = ndev->mws[idx].type;
cnt = idt_get_mw_count(data);
/* Print Memory Window information */
if (data == IDT_MW_DIR)
off += scnprintf(strbuf + off, size - off,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
"\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
idt_get_mw_name(data), ndev->mws[idx].bar);
off += scnprintf(strbuf + off, size - off,
"Address align 0x%08llx, ", ndev->mws[idx].addr_align);
off += scnprintf(strbuf + off, size - off,
"Size align 0x%08llx, Size max %llu\n",
ndev->mws[idx].size_align, ndev->mws[idx].size_max);
}
/* Inbound memory windows information */
for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
off += scnprintf(strbuf + off, size - off,
"Inbound Memory Windows for peer %hhu (Port %hhu):\n",
pidx, ndev->peers[pidx].port);
/* Print Memory Windows information */
for (idx = 0; idx < ndev->peers[pidx].mw_cnt; idx += cnt) {
data = ndev->peers[pidx].mws[idx].type;
cnt = idt_get_mw_count(data);
if (data == IDT_MW_DIR)
off += scnprintf(strbuf + off, size - off,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
"\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off,
"%s BAR%hhu, ", idt_get_mw_name(data),
ndev->peers[pidx].mws[idx].bar);
off += scnprintf(strbuf + off, size - off,
"Address align 0x%08llx, ",
ndev->peers[pidx].mws[idx].addr_align);
off += scnprintf(strbuf + off, size - off,
"Size align 0x%08llx, Size max %llu\n",
ndev->peers[pidx].mws[idx].size_align,
ndev->peers[pidx].mws[idx].size_max);
}
}
off += scnprintf(strbuf + off, size - off, "\n");
/* Doorbell information */
data = idt_sw_read(ndev, IDT_SW_GDBELLSTS);
off += scnprintf(strbuf + off, size - off,
"Global Doorbell state\t- 0x%08x\n", data);
data = idt_ntb_db_read(&ndev->ntb);
off += scnprintf(strbuf + off, size - off,
"Local Doorbell state\t- 0x%08x\n", data);
data = idt_nt_read(ndev, IDT_NT_INDBELLMSK);
off += scnprintf(strbuf + off, size - off,
"Local Doorbell mask\t- 0x%08x\n", data);
off += scnprintf(strbuf + off, size - off, "\n");
/* Messaging information */
off += scnprintf(strbuf + off, size - off,
"Message event valid\t- 0x%08x\n", IDT_MSG_MASK);
data = idt_ntb_msg_read_sts(&ndev->ntb);
off += scnprintf(strbuf + off, size - off,
"Message event status\t- 0x%08x\n", data);
data = idt_nt_read(ndev, IDT_NT_MSGSTSMSK);
off += scnprintf(strbuf + off, size - off,
"Message event mask\t- 0x%08x\n", data);
off += scnprintf(strbuf + off, size - off,
"Message data:\n");
for (idx = 0; idx < IDT_MSG_CNT; idx++) {
int src;
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
"\t%hhu. 0x%08x from peer %d (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
}
off += scnprintf(strbuf + off, size - off, "\n");
/* Current temperature */
idt_read_temp(ndev, IDT_TEMP_CUR, &mdeg);
off += scnprintf(strbuf + off, size - off,
"Switch temperature\t\t- %hhd.%hhuC\n",
idt_get_deg(mdeg), idt_get_deg_frac(mdeg));
/* Copy the buffer to the User Space */
ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off);
kfree(strbuf);
return ret;
}
/*
* idt_init_dbgfs() - initialize DebugFS node
* @ndev: IDT NTB hardware driver descriptor
*
* Return: zero on success, otherwise a negative error number.
*/
static int idt_init_dbgfs(struct idt_ntb_dev *ndev)
{
char devname[64];
/* If the top directory is not created then do nothing */
if (IS_ERR_OR_NULL(dbgfs_topdir)) {
dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent");
return PTR_ERR_OR_ZERO(dbgfs_topdir);
}
/* Create the info file node */
snprintf(devname, 64, "info:%s", pci_name(ndev->ntb.pdev));
ndev->dbgfs_info = debugfs_create_file(devname, 0400, dbgfs_topdir,
ndev, &idt_dbgfs_info_ops);
if (IS_ERR(ndev->dbgfs_info)) {
dev_dbg(&ndev->ntb.pdev->dev, "Failed to create DebugFS node");
return PTR_ERR(ndev->dbgfs_info);
}
dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node created");
return 0;
}
/*
* idt_deinit_dbgfs() - deinitialize DebugFS node
* @ndev: IDT NTB hardware driver descriptor
*
* Just discard the info node from DebugFS
*/
static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev)
{
debugfs_remove(ndev->dbgfs_info);
dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node discarded");
}
/*=============================================================================
* 11. Basic PCIe device initialization
*=============================================================================
*/
/*
* idt_check_setup() - Check whether the IDT PCIe-swtich is properly
* pre-initialized
* @pdev: Pointer to the PCI device descriptor
*
* Return: zero on success, otherwise a negative error number.
*/
static int idt_check_setup(struct pci_dev *pdev)
{
u32 data;
int ret;
/* Read the BARSETUP0 */
ret = pci_read_config_dword(pdev, IDT_NT_BARSETUP0, &data);
if (ret != 0) {
dev_err(&pdev->dev,
"Failed to read BARSETUP0 config register");
return ret;
}
/* Check whether the BAR0 register is enabled to be of config space */
if (!(data & IDT_BARSETUP_EN) || !(data & IDT_BARSETUP_MODE_CFG)) {
dev_err(&pdev->dev, "BAR0 doesn't map config space");
return -EINVAL;
}
/* Configuration space BAR0 must have certain size */
if ((data & IDT_BARSETUP_SIZE_MASK) != IDT_BARSETUP_SIZE_CFG) {
dev_err(&pdev->dev, "Invalid size of config space");
return -EINVAL;
}
dev_dbg(&pdev->dev, "NTB device pre-initialized correctly");
return 0;
}
/*
* Create the IDT PCIe-switch driver descriptor
* @pdev: Pointer to the PCI device descriptor
* @id: IDT PCIe-device configuration
*
* It just allocates a memory for IDT PCIe-switch device structure and
* initializes some commonly used fields.
*
* No need of release method, since managed device resource is used for
* memory allocation.
*
* Return: pointer to the descriptor, otherwise a negative error number.
*/
static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct idt_ntb_dev *ndev;
/* Allocate memory for the IDT PCIe-device descriptor */
ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL);
if (!ndev) {
dev_err(&pdev->dev, "Memory allocation failed for descriptor");
return ERR_PTR(-ENOMEM);
}
/* Save the IDT PCIe-switch ports configuration */
ndev->swcfg = (struct idt_89hpes_cfg *)id->driver_data;
/* Save the PCI-device pointer inside the NTB device structure */
ndev->ntb.pdev = pdev;
/* Initialize spin locker of Doorbell, Message and GASA registers */
spin_lock_init(&ndev->db_mask_lock);
spin_lock_init(&ndev->msg_mask_lock);
spin_lock_init(&ndev->gasa_lock);
dev_info(&pdev->dev, "IDT %s discovered", ndev->swcfg->name);
dev_dbg(&pdev->dev, "NTB device descriptor created");
return ndev;
}
/*
* idt_init_pci() - initialize the basic PCI-related subsystem
* @ndev: Pointer to the IDT PCIe-switch driver descriptor
*
* Managed device resources will be freed automatically in case of failure or
* driver detachment.
*
* Return: zero on success, otherwise negative error number.
*/
static int idt_init_pci(struct idt_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
int ret;
/* Initialize the bit mask of PCI/NTB DMA */
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret != 0) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set DMA bit mask\n");
return ret;
}
dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n");
}
/*
* The PCI core enables device error reporting. It's not critical to
* have AER disabled in the kernel.
*
* Cleanup nonfatal error status before getting to init.
*/
pci_aer_clear_nonfatal_status(pdev);
/* First enable the PCI device */
ret = pcim_enable_device(pdev);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to enable PCIe device\n");
return ret;
}
/*
* Enable the bus mastering, which effectively enables MSI IRQs and
* Request TLPs translation
*/
pci_set_master(pdev);
/* Request all BARs resources and map BAR0 only */
ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request resources\n");
goto err_clear_master;
}
/* Retrieve virtual address of BAR0 - PCI configuration space */
ndev->cfgspc = pcim_iomap_table(pdev)[0];
/* Put the IDT driver data pointer to the PCI-device private pointer */
pci_set_drvdata(pdev, ndev);
dev_dbg(&pdev->dev, "NT-function PCIe interface initialized");
return 0;
err_clear_master:
pci_clear_master(pdev);
return ret;
}
/*
* idt_deinit_pci() - deinitialize the basic PCI-related subsystem
* @ndev: Pointer to the IDT PCIe-switch driver descriptor
*
* Managed resources will be freed on the driver detachment
*/
static void idt_deinit_pci(struct idt_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
/* Clean up the PCI-device private data pointer */
pci_set_drvdata(pdev, NULL);
/* Clear the bus master disabling the Request TLPs translation */
pci_clear_master(pdev);
dev_dbg(&pdev->dev, "NT-function PCIe interface cleared");
}
/*===========================================================================
* 12. PCI bus callback functions
*===========================================================================
*/
/*
* idt_pci_probe() - PCI device probe callback
* @pdev: Pointer to PCI device structure
* @id: PCIe device custom descriptor
*
* Return: zero on success, otherwise negative error number
*/
static int idt_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct idt_ntb_dev *ndev;
int ret;
/* Check whether IDT PCIe-switch is properly pre-initialized */
ret = idt_check_setup(pdev);
if (ret != 0)
return ret;
/* Allocate the memory for IDT NTB device data */
ndev = idt_create_dev(pdev, id);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
/* Initialize the basic PCI subsystem of the device */
ret = idt_init_pci(ndev);
if (ret != 0)
return ret;
/* Scan ports of the IDT PCIe-switch */
(void)idt_scan_ports(ndev);
/* Initialize NTB link events subsystem */
idt_init_link(ndev);
/* Initialize MWs subsystem */
ret = idt_init_mws(ndev);
if (ret != 0)
goto err_deinit_link;
/* Initialize Messaging subsystem */
idt_init_msg(ndev);
/* Initialize hwmon interface */
idt_init_temp(ndev);
/* Initialize IDT interrupts handler */
ret = idt_init_isr(ndev);
if (ret != 0)
goto err_deinit_link;
/* Register IDT NTB devices on the NTB bus */
ret = idt_register_device(ndev);
if (ret != 0)
goto err_deinit_isr;
/* Initialize DebugFS info node */
(void)idt_init_dbgfs(ndev);
/* IDT PCIe-switch NTB driver is finally initialized */
dev_info(&pdev->dev, "IDT NTB device is ready");
/* May the force be with us... */
return 0;
err_deinit_isr:
idt_deinit_isr(ndev);
err_deinit_link:
idt_deinit_link(ndev);
idt_deinit_pci(ndev);
return ret;
}
/*
* idt_pci_probe() - PCI device remove callback
* @pdev: Pointer to PCI device structure
*/
static void idt_pci_remove(struct pci_dev *pdev)
{
struct idt_ntb_dev *ndev = pci_get_drvdata(pdev);
/* Deinit the DebugFS node */
idt_deinit_dbgfs(ndev);
/* Unregister NTB device */
idt_unregister_device(ndev);
/* Stop the interrupts handling */
idt_deinit_isr(ndev);
/* Deinitialize link event subsystem */
idt_deinit_link(ndev);
/* Deinit basic PCI subsystem */
idt_deinit_pci(ndev);
/* IDT PCIe-switch NTB driver is finally initialized */
dev_info(&pdev->dev, "IDT NTB device is removed");
/* Sayonara... */
}
/*
* IDT PCIe-switch models ports configuration structures
*/
static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
.name = "89HPES24NT6AG2",
.port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
};
static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
.name = "89HPES32NT8AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
.name = "89HPES32NT8BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
.name = "89HPES12NT12G2",
.port_cnt = 3, .ports = {0, 8, 16}
};
static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
.name = "89HPES16NT16G2",
.port_cnt = 4, .ports = {0, 8, 12, 16}
};
static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
.name = "89HPES24NT24G2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
.name = "89HPES32NT24AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
.name = "89HPES32NT24BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
/*
* PCI-ids table of the supported IDT PCIe-switch devices
*/
static const struct pci_device_id idt_pci_tbl[] = {
{IDT_PCI_DEVICE_IDS(89HPES24NT6AG2, idt_89hpes24nt6ag2_config)},
{IDT_PCI_DEVICE_IDS(89HPES32NT8AG2, idt_89hpes32nt8ag2_config)},
{IDT_PCI_DEVICE_IDS(89HPES32NT8BG2, idt_89hpes32nt8bg2_config)},
{IDT_PCI_DEVICE_IDS(89HPES12NT12G2, idt_89hpes12nt12g2_config)},
{IDT_PCI_DEVICE_IDS(89HPES16NT16G2, idt_89hpes16nt16g2_config)},
{IDT_PCI_DEVICE_IDS(89HPES24NT24G2, idt_89hpes24nt24g2_config)},
{IDT_PCI_DEVICE_IDS(89HPES32NT24AG2, idt_89hpes32nt24ag2_config)},
{IDT_PCI_DEVICE_IDS(89HPES32NT24BG2, idt_89hpes32nt24bg2_config)},
{0}
};
MODULE_DEVICE_TABLE(pci, idt_pci_tbl);
/*
* IDT PCIe-switch NT-function device driver structure definition
*/
static struct pci_driver idt_pci_driver = {
.name = KBUILD_MODNAME,
.probe = idt_pci_probe,
.remove = idt_pci_remove,
.id_table = idt_pci_tbl,
};
static int __init idt_pci_driver_init(void)
{
int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
/* Create the top DebugFS directory if the FS is initialized */
if (debugfs_initialized())
dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
/* Register the NTB hardware driver to handle the PCI device */
ret = pci_register_driver(&idt_pci_driver);
if (ret)
debugfs_remove_recursive(dbgfs_topdir);
return ret;
}
module_init(idt_pci_driver_init);
static void __exit idt_pci_driver_exit(void)
{
/* Unregister the NTB hardware driver */
pci_unregister_driver(&idt_pci_driver);
/* Discard the top DebugFS directory */
debugfs_remove_recursive(dbgfs_topdir);
}
module_exit(idt_pci_driver_exit);
| linux-master | drivers/ntb/hw/idt/ntb_hw_idt.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of AMD Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* AMD PCIe NTB Linux driver
*
* Contact Information:
* Xiangliang Yu <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ntb.h>
#include "ntb_hw_amd.h"
#define NTB_NAME "ntb_hw_amd"
#define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver"
#define NTB_VER "1.0"
MODULE_DESCRIPTION(NTB_DESC);
MODULE_VERSION(NTB_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("AMD Inc.");
static const struct file_operations amd_ntb_debugfs_info;
static struct dentry *debugfs_dir;
static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
{
if (idx < 0 || idx > ndev->mw_count)
return -EINVAL;
return ndev->dev_data->mw_idx << idx;
}
static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
return ntb_ndev(ntb)->mw_count;
}
static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
if (addr_align)
*addr_align = SZ_4K;
if (size_align)
*size_align = 1;
if (size_max)
*size_max = pci_resource_len(ndev->ntb.pdev, bar);
return 0;
}
static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
unsigned long xlat_reg, limit_reg = 0;
resource_size_t mw_size;
void __iomem *mmio, *peer_mmio;
u64 base_addr, limit, reg_val;
int bar;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
mw_size = pci_resource_len(ntb->pdev, bar);
/* make sure the range fits in the usable mw size */
if (size > mw_size)
return -EINVAL;
mmio = ndev->self_mmio;
peer_mmio = ndev->peer_mmio;
base_addr = pci_resource_start(ntb->pdev, bar);
if (bar != 1) {
xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
/* Set the limit if supported */
limit = size;
/* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg);
reg_val = read64(peer_mmio + xlat_reg);
if (reg_val != addr) {
write64(0, peer_mmio + xlat_reg);
return -EIO;
}
/* set and verify setting the limit */
write64(limit, peer_mmio + limit_reg);
reg_val = read64(peer_mmio + limit_reg);
if (reg_val != limit) {
write64(base_addr, mmio + limit_reg);
write64(0, peer_mmio + xlat_reg);
return -EIO;
}
} else {
xlat_reg = AMD_BAR1XLAT_OFFSET;
limit_reg = AMD_BAR1LMT_OFFSET;
/* Set the limit if supported */
limit = size;
/* set and verify setting the translation address */
write64(addr, peer_mmio + xlat_reg);
reg_val = read64(peer_mmio + xlat_reg);
if (reg_val != addr) {
write64(0, peer_mmio + xlat_reg);
return -EIO;
}
/* set and verify setting the limit */
writel(limit, peer_mmio + limit_reg);
reg_val = readl(peer_mmio + limit_reg);
if (reg_val != limit) {
writel(base_addr, mmio + limit_reg);
writel(0, peer_mmio + xlat_reg);
return -EIO;
}
}
return 0;
}
static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
{
struct pci_dev *pdev = NULL;
struct pci_dev *pci_swds = NULL;
struct pci_dev *pci_swus = NULL;
u32 stat;
int rc;
if (ndev->ntb.topo == NTB_TOPO_SEC) {
/* Locate the pointer to Downstream Switch for this device */
pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
if (pci_swds) {
/*
* Locate the pointer to Upstream Switch for
* the Downstream Switch.
*/
pci_swus = pci_upstream_bridge(pci_swds);
if (pci_swus) {
rc = pcie_capability_read_dword(pci_swus,
PCI_EXP_LNKCTL,
&stat);
if (rc)
return 0;
} else {
return 0;
}
} else {
return 0;
}
} else if (ndev->ntb.topo == NTB_TOPO_PRI) {
/*
* For NTB primary, we simply read the Link Status and control
* register of the NTB device itself.
*/
pdev = ndev->ntb.pdev;
rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
if (rc)
return 0;
} else {
/* Catch all for everything else */
return 0;
}
ndev->lnk_sta = stat;
return 1;
}
static int amd_link_is_up(struct amd_ntb_dev *ndev)
{
int ret;
/*
* We consider the link to be up under two conditions:
*
* - When a link-up event is received. This is indicated by
* AMD_LINK_UP_EVENT set in peer_sta.
* - When driver on both sides of the link have been loaded.
* This is indicated by bit 1 being set in the peer
* SIDEINFO register.
*
* This function should return 1 when the latter of the above
* two conditions is true.
*
* Now consider the sequence of events - Link-Up event occurs,
* then the peer side driver loads. In this case, we would have
* received LINK_UP event and bit 1 of peer SIDEINFO is also
* set. What happens now if the link goes down? Bit 1 of
* peer SIDEINFO remains set, but LINK_DOWN bit is set in
* peer_sta. So we should return 0 from this function. Not only
* that, we clear bit 1 of peer SIDEINFO to 0, since the peer
* side driver did not even get a chance to clear it before
* the link went down. This can be the case of surprise link
* removal.
*
* LINK_UP event will always occur before the peer side driver
* gets loaded the very first time. So there can be a case when
* the LINK_UP event has occurred, but the peer side driver hasn't
* yet loaded. We return 0 in that case.
*
* There is also a special case when the primary side driver is
* unloaded and then loaded again. Since there is no change in
* the status of NTB secondary in this case, there is no Link-Up
* or Link-Down notification received. We recognize this condition
* with peer_sta being set to 0.
*
* If bit 1 of peer SIDEINFO register is not set, then we
* simply return 0 irrespective of the link up or down status
* set in peer_sta.
*/
ret = amd_poll_link(ndev);
if (ret) {
/*
* We need to check the below only for NTB primary. For NTB
* secondary, simply checking the result of PSIDE_INFO
* register will suffice.
*/
if (ndev->ntb.topo == NTB_TOPO_PRI) {
if ((ndev->peer_sta & AMD_LINK_UP_EVENT) ||
(ndev->peer_sta == 0))
return ret;
else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) {
/* Clear peer sideinfo register */
amd_clear_side_info_reg(ndev, true);
return 0;
}
} else { /* NTB_TOPO_SEC */
return ret;
}
}
return 0;
}
static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
int ret = 0;
if (amd_link_is_up(ndev)) {
if (speed)
*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
if (width)
*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
dev_dbg(&ntb->pdev->dev, "link is up.\n");
ret = 1;
} else {
if (speed)
*speed = NTB_SPEED_NONE;
if (width)
*width = NTB_WIDTH_NONE;
dev_dbg(&ntb->pdev->dev, "link is down.\n");
}
return ret;
}
static int amd_ntb_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed,
enum ntb_width max_width)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
/* Enable event interrupt */
ndev->int_mask &= ~AMD_EVENT_INTMASK;
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
return 0;
}
static int amd_ntb_link_disable(struct ntb_dev *ntb)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
/* Disable event interrupt */
ndev->int_mask |= AMD_EVENT_INTMASK;
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
if (ndev->ntb.topo == NTB_TOPO_SEC)
return -EINVAL;
dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
return 0;
}
static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
{
/* The same as for inbound MWs */
return ntb_ndev(ntb)->mw_count;
}
static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
phys_addr_t *base, resource_size_t *size)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
int bar;
bar = ndev_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar);
if (size)
*size = pci_resource_len(ndev->ntb.pdev, bar);
return 0;
}
static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->db_valid_mask;
}
static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->db_count;
}
static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
if (db_vector < 0 || db_vector > ndev->db_count)
return 0;
return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector);
}
static u64 amd_ntb_db_read(struct ntb_dev *ntb)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
}
static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
return 0;
}
static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
unsigned long flags;
if (db_bits & ~ndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&ndev->db_mask_lock, flags);
ndev->db_mask |= db_bits;
writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
return 0;
}
static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
unsigned long flags;
if (db_bits & ~ndev->db_valid_mask)
return -EINVAL;
spin_lock_irqsave(&ndev->db_mask_lock, flags);
ndev->db_mask &= ~db_bits;
writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
return 0;
}
static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
return 0;
}
static int amd_ntb_spad_count(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->spad_count;
}
static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
u32 offset;
if (idx < 0 || idx >= ndev->spad_count)
return 0;
offset = ndev->self_spad + (idx << 2);
return readl(mmio + AMD_SPAD_OFFSET + offset);
}
static int amd_ntb_spad_write(struct ntb_dev *ntb,
int idx, u32 val)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
u32 offset;
if (idx < 0 || idx >= ndev->spad_count)
return -EINVAL;
offset = ndev->self_spad + (idx << 2);
writel(val, mmio + AMD_SPAD_OFFSET + offset);
return 0;
}
static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
u32 offset;
if (sidx < 0 || sidx >= ndev->spad_count)
return -EINVAL;
offset = ndev->peer_spad + (sidx << 2);
return readl(mmio + AMD_SPAD_OFFSET + offset);
}
static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
int sidx, u32 val)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
void __iomem *mmio = ndev->self_mmio;
u32 offset;
if (sidx < 0 || sidx >= ndev->spad_count)
return -EINVAL;
offset = ndev->peer_spad + (sidx << 2);
writel(val, mmio + AMD_SPAD_OFFSET + offset);
return 0;
}
static const struct ntb_dev_ops amd_ntb_ops = {
.mw_count = amd_ntb_mw_count,
.mw_get_align = amd_ntb_mw_get_align,
.mw_set_trans = amd_ntb_mw_set_trans,
.peer_mw_count = amd_ntb_peer_mw_count,
.peer_mw_get_addr = amd_ntb_peer_mw_get_addr,
.link_is_up = amd_ntb_link_is_up,
.link_enable = amd_ntb_link_enable,
.link_disable = amd_ntb_link_disable,
.db_valid_mask = amd_ntb_db_valid_mask,
.db_vector_count = amd_ntb_db_vector_count,
.db_vector_mask = amd_ntb_db_vector_mask,
.db_read = amd_ntb_db_read,
.db_clear = amd_ntb_db_clear,
.db_set_mask = amd_ntb_db_set_mask,
.db_clear_mask = amd_ntb_db_clear_mask,
.peer_db_set = amd_ntb_peer_db_set,
.spad_count = amd_ntb_spad_count,
.spad_read = amd_ntb_spad_read,
.spad_write = amd_ntb_spad_write,
.peer_spad_read = amd_ntb_peer_spad_read,
.peer_spad_write = amd_ntb_peer_spad_write,
};
static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
{
void __iomem *mmio = ndev->self_mmio;
int reg;
reg = readl(mmio + AMD_SMUACK_OFFSET);
reg |= bit;
writel(reg, mmio + AMD_SMUACK_OFFSET);
}
static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
{
void __iomem *mmio = ndev->self_mmio;
struct device *dev = &ndev->ntb.pdev->dev;
u32 status;
status = readl(mmio + AMD_INTSTAT_OFFSET);
if (!(status & AMD_EVENT_INTMASK))
return;
dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
status &= AMD_EVENT_INTMASK;
switch (status) {
case AMD_PEER_FLUSH_EVENT:
ndev->peer_sta |= AMD_PEER_FLUSH_EVENT;
dev_info(dev, "Flush is done.\n");
break;
case AMD_PEER_RESET_EVENT:
case AMD_LINK_DOWN_EVENT:
ndev->peer_sta |= status;
if (status == AMD_LINK_DOWN_EVENT)
ndev->peer_sta &= ~AMD_LINK_UP_EVENT;
amd_ack_smu(ndev, status);
/* link down first */
ntb_link_event(&ndev->ntb);
/* polling peer status */
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
break;
case AMD_PEER_D3_EVENT:
case AMD_PEER_PMETO_EVENT:
case AMD_LINK_UP_EVENT:
ndev->peer_sta |= status;
if (status == AMD_LINK_UP_EVENT)
ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT;
else if (status == AMD_PEER_D3_EVENT)
ndev->peer_sta &= ~AMD_PEER_D0_EVENT;
amd_ack_smu(ndev, status);
/* link down */
ntb_link_event(&ndev->ntb);
break;
case AMD_PEER_D0_EVENT:
mmio = ndev->peer_mmio;
status = readl(mmio + AMD_PMESTAT_OFFSET);
/* check if this is WAKEUP event */
if (status & 0x1)
dev_info(dev, "Wakeup is done.\n");
ndev->peer_sta |= AMD_PEER_D0_EVENT;
ndev->peer_sta &= ~AMD_PEER_D3_EVENT;
amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
/* start a timer to poll link status */
schedule_delayed_work(&ndev->hb_timer,
AMD_LINK_HB_TIMEOUT);
break;
default:
dev_info(dev, "event status = 0x%x.\n", status);
break;
}
/* Clear the interrupt status */
writel(status, mmio + AMD_INTSTAT_OFFSET);
}
static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec)
{
struct device *dev = &ndev->ntb.pdev->dev;
u64 status;
status = amd_ntb_db_read(&ndev->ntb);
dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec);
/*
* Since we had reserved highest order bit of DB for signaling peer of
* a special event, this is the only status bit we should be concerned
* here now.
*/
if (status & BIT(ndev->db_last_bit)) {
ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit));
/* send link down event notification */
ntb_link_event(&ndev->ntb);
/*
* If we are here, that means the peer has signalled a special
* event which notifies that the peer driver has been
* un-loaded for some reason. Since there is a chance that the
* peer will load its driver again sometime, we schedule link
* polling routine.
*/
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
}
}
static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
{
dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
amd_handle_event(ndev, vec);
if (vec < AMD_DB_CNT) {
amd_handle_db_event(ndev, vec);
ntb_db_event(&ndev->ntb, vec);
}
return IRQ_HANDLED;
}
static irqreturn_t ndev_vec_isr(int irq, void *dev)
{
struct amd_ntb_vec *nvec = dev;
return ndev_interrupt(nvec->ndev, nvec->num);
}
static irqreturn_t ndev_irq_isr(int irq, void *dev)
{
struct amd_ntb_dev *ndev = dev;
return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
}
static int ndev_init_isr(struct amd_ntb_dev *ndev,
int msix_min, int msix_max)
{
struct pci_dev *pdev;
int rc, i, msix_count, node;
pdev = ndev->ntb.pdev;
node = dev_to_node(&pdev->dev);
ndev->db_mask = ndev->db_valid_mask;
/* Try to set up msix irq */
ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
GFP_KERNEL, node);
if (!ndev->vec)
goto err_msix_vec_alloc;
ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
GFP_KERNEL, node);
if (!ndev->msix)
goto err_msix_alloc;
for (i = 0; i < msix_max; ++i)
ndev->msix[i].entry = i;
msix_count = pci_enable_msix_range(pdev, ndev->msix,
msix_min, msix_max);
if (msix_count < 0)
goto err_msix_enable;
/* NOTE: Disable MSIX if msix count is less than 16 because of
* hardware limitation.
*/
if (msix_count < msix_min) {
pci_disable_msix(pdev);
goto err_msix_enable;
}
for (i = 0; i < msix_count; ++i) {
ndev->vec[i].ndev = ndev;
ndev->vec[i].num = i;
rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
"ndev_vec_isr", &ndev->vec[i]);
if (rc)
goto err_msix_request;
}
dev_dbg(&pdev->dev, "Using msix interrupts\n");
ndev->db_count = msix_min;
ndev->msix_vec_count = msix_max;
return 0;
err_msix_request:
while (i-- > 0)
free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
err_msix_enable:
kfree(ndev->msix);
err_msix_alloc:
kfree(ndev->vec);
err_msix_vec_alloc:
ndev->msix = NULL;
ndev->vec = NULL;
/* Try to set up msi irq */
rc = pci_enable_msi(pdev);
if (rc)
goto err_msi_enable;
rc = request_irq(pdev->irq, ndev_irq_isr, 0,
"ndev_irq_isr", ndev);
if (rc)
goto err_msi_request;
dev_dbg(&pdev->dev, "Using msi interrupts\n");
ndev->db_count = 1;
ndev->msix_vec_count = 1;
return 0;
err_msi_request:
pci_disable_msi(pdev);
err_msi_enable:
/* Try to set up intx irq */
pci_intx(pdev, 1);
rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
"ndev_irq_isr", ndev);
if (rc)
goto err_intx_request;
dev_dbg(&pdev->dev, "Using intx interrupts\n");
ndev->db_count = 1;
ndev->msix_vec_count = 1;
return 0;
err_intx_request:
return rc;
}
static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
{
struct pci_dev *pdev;
void __iomem *mmio = ndev->self_mmio;
int i;
pdev = ndev->ntb.pdev;
/* Mask all doorbell interrupts */
ndev->db_mask = ndev->db_valid_mask;
writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
if (ndev->msix) {
i = ndev->msix_vec_count;
while (i--)
free_irq(ndev->msix[i].vector, &ndev->vec[i]);
pci_disable_msix(pdev);
kfree(ndev->msix);
kfree(ndev->vec);
} else {
free_irq(pdev->irq, ndev);
if (pci_dev_msi_enabled(pdev))
pci_disable_msi(pdev);
else
pci_intx(pdev, 0);
}
}
static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct amd_ntb_dev *ndev;
void __iomem *mmio;
char *buf;
size_t buf_size;
ssize_t ret, off;
union { u64 v64; u32 v32; u16 v16; } u;
ndev = filp->private_data;
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
off = 0;
off += scnprintf(buf + off, buf_size - off,
"NTB Device Information:\n");
off += scnprintf(buf + off, buf_size - off,
"Connection Topology -\t%s\n",
ntb_topo_string(ndev->ntb.topo));
off += scnprintf(buf + off, buf_size - off,
"LNK STA -\t\t%#06x\n", ndev->lnk_sta);
if (!amd_link_is_up(ndev)) {
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tDown\n");
} else {
off += scnprintf(buf + off, buf_size - off,
"Link Status -\t\tUp\n");
off += scnprintf(buf + off, buf_size - off,
"Link Speed -\t\tPCI-E Gen %u\n",
NTB_LNK_STA_SPEED(ndev->lnk_sta));
off += scnprintf(buf + off, buf_size - off,
"Link Width -\t\tx%u\n",
NTB_LNK_STA_WIDTH(ndev->lnk_sta));
}
off += scnprintf(buf + off, buf_size - off,
"Memory Window Count -\t%u\n", ndev->mw_count);
off += scnprintf(buf + off, buf_size - off,
"Scratchpad Count -\t%u\n", ndev->spad_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Count -\t%u\n", ndev->db_count);
off += scnprintf(buf + off, buf_size - off,
"MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Mask -\t\t\t%#06x\n", u.v32);
u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"Doorbell Bell -\t\t\t%#06x\n", u.v32);
off += scnprintf(buf + off, buf_size - off,
"\nNTB Incoming XLAT:\n");
u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"XLAT1 -\t\t%#018llx\n", u.v64);
u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"XLAT23 -\t\t%#018llx\n", u.v64);
u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"XLAT45 -\t\t%#018llx\n", u.v64);
u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"LMT1 -\t\t\t%#06x\n", u.v32);
u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"LMT23 -\t\t\t%#018llx\n", u.v64);
u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
off += scnprintf(buf + off, buf_size - off,
"LMT45 -\t\t\t%#018llx\n", u.v64);
ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
kfree(buf);
return ret;
}
static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
{
if (!debugfs_dir) {
ndev->debugfs_dir = NULL;
ndev->debugfs_info = NULL;
} else {
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
ndev->debugfs_info =
debugfs_create_file("info", S_IRUSR,
ndev->debugfs_dir, ndev,
&amd_ntb_debugfs_info);
}
}
static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
{
debugfs_remove_recursive(ndev->debugfs_dir);
}
static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
struct pci_dev *pdev)
{
ndev->ntb.pdev = pdev;
ndev->ntb.topo = NTB_TOPO_NONE;
ndev->ntb.ops = &amd_ntb_ops;
ndev->int_mask = AMD_EVENT_INTMASK;
spin_lock_init(&ndev->db_mask_lock);
}
static int amd_poll_link(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->peer_mmio;
u32 reg;
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
reg &= AMD_SIDE_READY;
dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
ndev->cntl_sta = reg;
amd_ntb_get_link_status(ndev);
return ndev->cntl_sta;
}
static void amd_link_hb(struct work_struct *work)
{
struct amd_ntb_dev *ndev = hb_ndev(work);
if (amd_poll_link(ndev))
ntb_link_event(&ndev->ntb);
if (!amd_link_is_up(ndev))
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
}
static int amd_init_isr(struct amd_ntb_dev *ndev)
{
return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
}
static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
{
void __iomem *mmio = NULL;
unsigned int reg;
if (peer)
mmio = ndev->peer_mmio;
else
mmio = ndev->self_mmio;
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
if (!(reg & AMD_SIDE_READY)) {
reg |= AMD_SIDE_READY;
writel(reg, mmio + AMD_SIDEINFO_OFFSET);
}
}
static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
{
void __iomem *mmio = NULL;
unsigned int reg;
if (peer)
mmio = ndev->peer_mmio;
else
mmio = ndev->self_mmio;
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
if (reg & AMD_SIDE_READY) {
reg &= ~AMD_SIDE_READY;
writel(reg, mmio + AMD_SIDEINFO_OFFSET);
readl(mmio + AMD_SIDEINFO_OFFSET);
}
}
static void amd_init_side_info(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
u32 ntb_ctl;
amd_set_side_info_reg(ndev, false);
ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
}
static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
u32 ntb_ctl;
amd_clear_side_info_reg(ndev, false);
ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
}
static int amd_init_ntb(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
ndev->mw_count = ndev->dev_data->mw_count;
ndev->spad_count = AMD_SPADS_CNT;
ndev->db_count = AMD_DB_CNT;
switch (ndev->ntb.topo) {
case NTB_TOPO_PRI:
case NTB_TOPO_SEC:
ndev->spad_count >>= 1;
if (ndev->ntb.topo == NTB_TOPO_PRI) {
ndev->self_spad = 0;
ndev->peer_spad = 0x20;
} else {
ndev->self_spad = 0x20;
ndev->peer_spad = 0;
}
INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
break;
default:
dev_err(&ndev->ntb.pdev->dev,
"AMD NTB does not support B2B mode.\n");
return -EINVAL;
}
/* Mask event interrupts */
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
return 0;
}
static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
u32 info;
info = readl(mmio + AMD_SIDEINFO_OFFSET);
if (info & AMD_SIDE_MASK)
return NTB_TOPO_SEC;
else
return NTB_TOPO_PRI;
}
static int amd_init_dev(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
struct pci_dev *pdev;
int rc = 0;
pdev = ndev->ntb.pdev;
ndev->ntb.topo = amd_get_topo(ndev);
dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
ntb_topo_string(ndev->ntb.topo));
rc = amd_init_ntb(ndev);
if (rc)
return rc;
rc = amd_init_isr(ndev);
if (rc) {
dev_err(&pdev->dev, "fail to init isr.\n");
return rc;
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
/*
* We reserve the highest order bit of the DB register which will
* be used to notify peer when the driver on this side is being
* un-loaded.
*/
ndev->db_last_bit =
find_last_bit((unsigned long *)&ndev->db_valid_mask,
hweight64(ndev->db_valid_mask));
writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET);
/*
* Since now there is one less bit to account for, the DB count
* and DB mask should be adjusted accordingly.
*/
ndev->db_count -= 1;
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
/* Enable Link-Up and Link-Down event interrupts */
ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT);
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
return 0;
}
static void amd_deinit_dev(struct amd_ntb_dev *ndev)
{
cancel_delayed_work_sync(&ndev->hb_timer);
ndev_deinit_isr(ndev);
}
static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
struct pci_dev *pdev)
{
int rc;
pci_set_drvdata(pdev, ndev);
rc = pci_enable_device(pdev);
if (rc)
goto err_pci_enable;
rc = pci_request_regions(pdev, NTB_NAME);
if (rc)
goto err_pci_regions;
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
rc = -EIO;
goto err_dma_mask;
}
ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
return 0;
err_dma_mask:
pci_release_regions(pdev);
err_pci_regions:
pci_disable_device(pdev);
err_pci_enable:
pci_set_drvdata(pdev, NULL);
return rc;
}
static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
pci_iounmap(pdev, ndev->self_mmio);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static int amd_ntb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct amd_ntb_dev *ndev;
int rc, node;
node = dev_to_node(&pdev->dev);
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
if (!ndev) {
rc = -ENOMEM;
goto err_ndev;
}
ndev->dev_data = (struct ntb_dev_data *)id->driver_data;
ndev_init_struct(ndev, pdev);
rc = amd_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
rc = amd_init_dev(ndev);
if (rc)
goto err_init_dev;
/* write side info */
amd_init_side_info(ndev);
amd_poll_link(ndev);
ndev_init_debugfs(ndev);
rc = ntb_register_device(&ndev->ntb);
if (rc)
goto err_register;
dev_info(&pdev->dev, "NTB device registered.\n");
return 0;
err_register:
ndev_deinit_debugfs(ndev);
amd_deinit_dev(ndev);
err_init_dev:
amd_ntb_deinit_pci(ndev);
err_init_pci:
kfree(ndev);
err_ndev:
return rc;
}
static void amd_ntb_pci_remove(struct pci_dev *pdev)
{
struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
/*
* Clear the READY bit in SIDEINFO register before sending DB event
* to the peer. This will make sure that when the peer handles the
* DB event, it correctly reads this bit as being 0.
*/
amd_deinit_side_info(ndev);
ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
amd_deinit_dev(ndev);
amd_ntb_deinit_pci(ndev);
kfree(ndev);
}
static void amd_ntb_pci_shutdown(struct pci_dev *pdev)
{
struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
/* Send link down notification */
ntb_link_event(&ndev->ntb);
amd_deinit_side_info(ndev);
ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
amd_deinit_dev(ndev);
amd_ntb_deinit_pci(ndev);
kfree(ndev);
}
static const struct file_operations amd_ntb_debugfs_info = {
.owner = THIS_MODULE,
.open = simple_open,
.read = ndev_debugfs_read,
};
static const struct ntb_dev_data dev_data[] = {
{ /* for device 145b */
.mw_count = 3,
.mw_idx = 1,
},
{ /* for device 148b */
.mw_count = 2,
.mw_idx = 2,
},
};
static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
static struct pci_driver amd_ntb_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = amd_ntb_pci_tbl,
.probe = amd_ntb_pci_probe,
.remove = amd_ntb_pci_remove,
.shutdown = amd_ntb_pci_shutdown,
};
static int __init amd_ntb_pci_driver_init(void)
{
int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = pci_register_driver(&amd_ntb_pci_driver);
if (ret)
debugfs_remove_recursive(debugfs_dir);
return ret;
}
module_init(amd_ntb_pci_driver_init);
static void __exit amd_ntb_pci_driver_exit(void)
{
pci_unregister_driver(&amd_ntb_pci_driver);
debugfs_remove_recursive(debugfs_dir);
}
module_exit(amd_ntb_pci_driver_exit);
| linux-master | drivers/ntb/hw/amd/ntb_hw_amd.c |
// SPDX-License-Identifier: GPL-2.0
/**
* Host side endpoint driver to implement Non-Transparent Bridge functionality
*
* Copyright (C) 2020 Texas Instruments
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/ntb.h>
#define NTB_EPF_COMMAND 0x0
#define CMD_CONFIGURE_DOORBELL 1
#define CMD_TEARDOWN_DOORBELL 2
#define CMD_CONFIGURE_MW 3
#define CMD_TEARDOWN_MW 4
#define CMD_LINK_UP 5
#define CMD_LINK_DOWN 6
#define NTB_EPF_ARGUMENT 0x4
#define MSIX_ENABLE BIT(16)
#define NTB_EPF_CMD_STATUS 0x8
#define COMMAND_STATUS_OK 1
#define COMMAND_STATUS_ERROR 2
#define NTB_EPF_LINK_STATUS 0x0A
#define LINK_STATUS_UP BIT(0)
#define NTB_EPF_TOPOLOGY 0x0C
#define NTB_EPF_LOWER_ADDR 0x10
#define NTB_EPF_UPPER_ADDR 0x14
#define NTB_EPF_LOWER_SIZE 0x18
#define NTB_EPF_UPPER_SIZE 0x1C
#define NTB_EPF_MW_COUNT 0x20
#define NTB_EPF_MW1_OFFSET 0x24
#define NTB_EPF_SPAD_OFFSET 0x28
#define NTB_EPF_SPAD_COUNT 0x2C
#define NTB_EPF_DB_ENTRY_SIZE 0x30
#define NTB_EPF_DB_DATA(n) (0x34 + (n) * 4)
#define NTB_EPF_DB_OFFSET(n) (0xB4 + (n) * 4)
#define NTB_EPF_MIN_DB_COUNT 3
#define NTB_EPF_MAX_DB_COUNT 31
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
enum pci_barno {
BAR_0,
BAR_1,
BAR_2,
BAR_3,
BAR_4,
BAR_5,
};
struct ntb_epf_dev {
struct ntb_dev ntb;
struct device *dev;
/* Mutex to protect providing commands to NTB EPF */
struct mutex cmd_lock;
enum pci_barno ctrl_reg_bar;
enum pci_barno peer_spad_reg_bar;
enum pci_barno db_reg_bar;
enum pci_barno mw_bar;
unsigned int mw_count;
unsigned int spad_count;
unsigned int db_count;
void __iomem *ctrl_reg;
void __iomem *db_reg;
void __iomem *peer_spad_reg;
unsigned int self_spad;
unsigned int peer_spad;
int db_val;
u64 db_valid_mask;
};
#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
struct ntb_epf_data {
/* BAR that contains both control region and self spad region */
enum pci_barno ctrl_reg_bar;
/* BAR that contains peer spad region */
enum pci_barno peer_spad_reg_bar;
/* BAR that contains Doorbell region and Memory window '1' */
enum pci_barno db_reg_bar;
/* BAR that contains memory windows*/
enum pci_barno mw_bar;
};
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
u32 argument)
{
ktime_t timeout;
bool timedout;
int ret = 0;
u32 status;
mutex_lock(&ndev->cmd_lock);
writel(argument, ndev->ctrl_reg + NTB_EPF_ARGUMENT);
writel(command, ndev->ctrl_reg + NTB_EPF_COMMAND);
timeout = ktime_add_ms(ktime_get(), NTB_EPF_COMMAND_TIMEOUT);
while (1) {
timedout = ktime_after(ktime_get(), timeout);
status = readw(ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
if (status == COMMAND_STATUS_ERROR) {
ret = -EINVAL;
break;
}
if (status == COMMAND_STATUS_OK)
break;
if (WARN_ON(timedout)) {
ret = -ETIMEDOUT;
break;
}
usleep_range(5, 10);
}
writew(0, ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
mutex_unlock(&ndev->cmd_lock);
return ret;
}
static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
{
struct device *dev = ndev->dev;
if (idx < 0 || idx > ndev->mw_count) {
dev_err(dev, "Unsupported Memory Window index %d\n", idx);
return -EINVAL;
}
return idx + 2;
}
static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
if (pidx != NTB_DEF_PEER_IDX) {
dev_err(dev, "Unsupported Peer ID %d\n", pidx);
return -EINVAL;
}
return ndev->mw_count;
}
static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
resource_size_t *addr_align,
resource_size_t *size_align,
resource_size_t *size_max)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
int bar;
if (pidx != NTB_DEF_PEER_IDX) {
dev_err(dev, "Unsupported Peer ID %d\n", pidx);
return -EINVAL;
}
bar = ntb_epf_mw_to_bar(ndev, idx);
if (bar < 0)
return bar;
if (addr_align)
*addr_align = SZ_4K;
if (size_align)
*size_align = 1;
if (size_max)
*size_max = pci_resource_len(ndev->ntb.pdev, bar);
return 0;
}
static u64 ntb_epf_link_is_up(struct ntb_dev *ntb,
enum ntb_speed *speed,
enum ntb_width *width)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
u32 status;
status = readw(ndev->ctrl_reg + NTB_EPF_LINK_STATUS);
return status & LINK_STATUS_UP;
}
static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
u32 offset;
if (idx < 0 || idx >= ndev->spad_count) {
dev_err(dev, "READ: Invalid ScratchPad Index %d\n", idx);
return 0;
}
offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
offset += (idx << 2);
return readl(ndev->ctrl_reg + offset);
}
static int ntb_epf_spad_write(struct ntb_dev *ntb,
int idx, u32 val)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
u32 offset;
if (idx < 0 || idx >= ndev->spad_count) {
dev_err(dev, "WRITE: Invalid ScratchPad Index %d\n", idx);
return -EINVAL;
}
offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
offset += (idx << 2);
writel(val, ndev->ctrl_reg + offset);
return 0;
}
static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
u32 offset;
if (pidx != NTB_DEF_PEER_IDX) {
dev_err(dev, "Unsupported Peer ID %d\n", pidx);
return -EINVAL;
}
if (idx < 0 || idx >= ndev->spad_count) {
dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
return -EINVAL;
}
offset = (idx << 2);
return readl(ndev->peer_spad_reg + offset);
}
static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx,
int idx, u32 val)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
u32 offset;
if (pidx != NTB_DEF_PEER_IDX) {
dev_err(dev, "Unsupported Peer ID %d\n", pidx);
return -EINVAL;
}
if (idx < 0 || idx >= ndev->spad_count) {
dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
return -EINVAL;
}
offset = (idx << 2);
writel(val, ndev->peer_spad_reg + offset);
return 0;
}
static int ntb_epf_link_enable(struct ntb_dev *ntb,
enum ntb_speed max_speed,
enum ntb_width max_width)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
int ret;
ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0);
if (ret) {
dev_err(dev, "Fail to enable link\n");
return ret;
}
return 0;
}
static int ntb_epf_link_disable(struct ntb_dev *ntb)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
int ret;
ret = ntb_epf_send_command(ndev, CMD_LINK_DOWN, 0);
if (ret) {
dev_err(dev, "Fail to disable link\n");
return ret;
}
return 0;
}
static irqreturn_t ntb_epf_vec_isr(int irq, void *dev)
{
struct ntb_epf_dev *ndev = dev;
int irq_no;
irq_no = irq - pci_irq_vector(ndev->ntb.pdev, 0);
ndev->db_val = irq_no + 1;
if (irq_no == 0)
ntb_link_event(&ndev->ntb);
else
ntb_db_event(&ndev->ntb, irq_no);
return IRQ_HANDLED;
}
static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max)
{
struct pci_dev *pdev = ndev->ntb.pdev;
struct device *dev = ndev->dev;
u32 argument = MSIX_ENABLE;
int irq;
int ret;
int i;
irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSIX);
if (irq < 0) {
dev_dbg(dev, "Failed to get MSIX interrupts\n");
irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max,
PCI_IRQ_MSI);
if (irq < 0) {
dev_err(dev, "Failed to get MSI interrupts\n");
return irq;
}
argument &= ~MSIX_ENABLE;
}
for (i = 0; i < irq; i++) {
ret = request_irq(pci_irq_vector(pdev, i), ntb_epf_vec_isr,
0, "ntb_epf", ndev);
if (ret) {
dev_err(dev, "Failed to request irq\n");
goto err_request_irq;
}
}
ndev->db_count = irq - 1;
ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL,
argument | irq);
if (ret) {
dev_err(dev, "Failed to configure doorbell\n");
goto err_configure_db;
}
return 0;
err_configure_db:
for (i = 0; i < ndev->db_count + 1; i++)
free_irq(pci_irq_vector(pdev, i), ndev);
err_request_irq:
pci_free_irq_vectors(pdev);
return ret;
}
static int ntb_epf_peer_mw_count(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->mw_count;
}
static int ntb_epf_spad_count(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->spad_count;
}
static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb)
{
return ntb_ndev(ntb)->db_valid_mask;
}
static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
return 0;
}
static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
dma_addr_t addr, resource_size_t size)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
resource_size_t mw_size;
int bar;
if (pidx != NTB_DEF_PEER_IDX) {
dev_err(dev, "Unsupported Peer ID %d\n", pidx);
return -EINVAL;
}
bar = idx + ndev->mw_bar;
mw_size = pci_resource_len(ntb->pdev, bar);
if (size > mw_size) {
dev_err(dev, "Size:%pa is greater than the MW size %pa\n",
&size, &mw_size);
return -EINVAL;
}
writel(lower_32_bits(addr), ndev->ctrl_reg + NTB_EPF_LOWER_ADDR);
writel(upper_32_bits(addr), ndev->ctrl_reg + NTB_EPF_UPPER_ADDR);
writel(lower_32_bits(size), ndev->ctrl_reg + NTB_EPF_LOWER_SIZE);
writel(upper_32_bits(size), ndev->ctrl_reg + NTB_EPF_UPPER_SIZE);
ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx);
return 0;
}
static int ntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
struct device *dev = ndev->dev;
int ret = 0;
ntb_epf_send_command(ndev, CMD_TEARDOWN_MW, idx);
if (ret)
dev_err(dev, "Failed to teardown memory window\n");
return ret;
}
static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
phys_addr_t *base, resource_size_t *size)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
u32 offset = 0;
int bar;
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
bar = idx + ndev->mw_bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
if (size)
*size = pci_resource_len(ndev->ntb.pdev, bar) - offset;
return 0;
}
static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
u32 interrupt_num = ffs(db_bits) + 1;
struct device *dev = ndev->dev;
u32 db_entry_size;
u32 db_offset;
u32 db_data;
if (interrupt_num > ndev->db_count) {
dev_err(dev, "DB interrupt %d greater than Max Supported %d\n",
interrupt_num, ndev->db_count);
return -EINVAL;
}
db_entry_size = readl(ndev->ctrl_reg + NTB_EPF_DB_ENTRY_SIZE);
db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num));
db_offset = readl(ndev->ctrl_reg + NTB_EPF_DB_OFFSET(interrupt_num));
writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num) +
db_offset);
return 0;
}
static u64 ntb_epf_db_read(struct ntb_dev *ntb)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
return ndev->db_val;
}
static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
return 0;
}
static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct ntb_epf_dev *ndev = ntb_ndev(ntb);
ndev->db_val = 0;
return 0;
}
static const struct ntb_dev_ops ntb_epf_ops = {
.mw_count = ntb_epf_mw_count,
.spad_count = ntb_epf_spad_count,
.peer_mw_count = ntb_epf_peer_mw_count,
.db_valid_mask = ntb_epf_db_valid_mask,
.db_set_mask = ntb_epf_db_set_mask,
.mw_set_trans = ntb_epf_mw_set_trans,
.mw_clear_trans = ntb_epf_mw_clear_trans,
.peer_mw_get_addr = ntb_epf_peer_mw_get_addr,
.link_enable = ntb_epf_link_enable,
.spad_read = ntb_epf_spad_read,
.spad_write = ntb_epf_spad_write,
.peer_spad_read = ntb_epf_peer_spad_read,
.peer_spad_write = ntb_epf_peer_spad_write,
.peer_db_set = ntb_epf_peer_db_set,
.db_read = ntb_epf_db_read,
.mw_get_align = ntb_epf_mw_get_align,
.link_is_up = ntb_epf_link_is_up,
.db_clear_mask = ntb_epf_db_clear_mask,
.db_clear = ntb_epf_db_clear,
.link_disable = ntb_epf_link_disable,
};
static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev,
struct pci_dev *pdev)
{
ndev->ntb.pdev = pdev;
ndev->ntb.topo = NTB_TOPO_NONE;
ndev->ntb.ops = &ntb_epf_ops;
}
static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
{
struct device *dev = ndev->dev;
int ret;
/* One Link interrupt and rest doorbell interrupt */
ret = ntb_epf_init_isr(ndev, NTB_EPF_MIN_DB_COUNT + 1,
NTB_EPF_MAX_DB_COUNT + 1);
if (ret) {
dev_err(dev, "Failed to init ISR\n");
return ret;
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT);
ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
return 0;
}
static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
struct pci_dev *pdev)
{
struct device *dev = ndev->dev;
size_t spad_sz, spad_off;
int ret;
pci_set_drvdata(pdev, ndev);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(dev, "Cannot enable PCI device\n");
goto err_pci_enable;
}
ret = pci_request_regions(pdev, "ntb");
if (ret) {
dev_err(dev, "Cannot obtain PCI resources\n");
goto err_pci_regions;
}
pci_set_master(pdev);
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
goto err_pci_regions;
}
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
if (!ndev->ctrl_reg) {
ret = -EIO;
goto err_pci_regions;
}
if (ndev->peer_spad_reg_bar) {
ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
if (!ndev->peer_spad_reg) {
ret = -EIO;
goto err_pci_regions;
}
} else {
spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
if (!ndev->db_reg) {
ret = -EIO;
goto err_pci_regions;
}
return 0;
err_pci_regions:
pci_disable_device(pdev);
err_pci_enable:
pci_set_drvdata(pdev, NULL);
return ret;
}
static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
pci_iounmap(pdev, ndev->ctrl_reg);
pci_iounmap(pdev, ndev->peer_spad_reg);
pci_iounmap(pdev, ndev->db_reg);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
int i;
ntb_epf_send_command(ndev, CMD_TEARDOWN_DOORBELL, ndev->db_count + 1);
for (i = 0; i < ndev->db_count + 1; i++)
free_irq(pci_irq_vector(pdev, i), ndev);
pci_free_irq_vectors(pdev);
}
static int ntb_epf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
enum pci_barno peer_spad_reg_bar = BAR_1;
enum pci_barno ctrl_reg_bar = BAR_0;
enum pci_barno db_reg_bar = BAR_2;
enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
int ret;
if (pci_is_bridge(pdev))
return -ENODEV;
ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL);
if (!ndev)
return -ENOMEM;
data = (struct ntb_epf_data *)id->driver_data;
if (data) {
peer_spad_reg_bar = data->peer_spad_reg_bar;
ctrl_reg_bar = data->ctrl_reg_bar;
db_reg_bar = data->db_reg_bar;
mw_bar = data->mw_bar;
}
ndev->peer_spad_reg_bar = peer_spad_reg_bar;
ndev->ctrl_reg_bar = ctrl_reg_bar;
ndev->db_reg_bar = db_reg_bar;
ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
mutex_init(&ndev->cmd_lock);
ret = ntb_epf_init_pci(ndev, pdev);
if (ret) {
dev_err(dev, "Failed to init PCI\n");
return ret;
}
ret = ntb_epf_init_dev(ndev);
if (ret) {
dev_err(dev, "Failed to init device\n");
goto err_init_dev;
}
ret = ntb_register_device(&ndev->ntb);
if (ret) {
dev_err(dev, "Failed to register NTB device\n");
goto err_register_dev;
}
return 0;
err_register_dev:
ntb_epf_cleanup_isr(ndev);
err_init_dev:
ntb_epf_deinit_pci(ndev);
return ret;
}
static void ntb_epf_pci_remove(struct pci_dev *pdev)
{
struct ntb_epf_dev *ndev = pci_get_drvdata(pdev);
ntb_unregister_device(&ndev->ntb);
ntb_epf_cleanup_isr(ndev);
ntb_epf_deinit_pci(ndev);
}
static const struct ntb_epf_data j721e_data = {
.ctrl_reg_bar = BAR_0,
.peer_spad_reg_bar = BAR_1,
.db_reg_bar = BAR_2,
.mw_bar = BAR_2,
};
static const struct ntb_epf_data mx8_data = {
.ctrl_reg_bar = BAR_0,
.peer_spad_reg_bar = BAR_0,
.db_reg_bar = BAR_2,
.mw_bar = BAR_4,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
.driver_data = (kernel_ulong_t)&j721e_data,
},
{
PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
.driver_data = (kernel_ulong_t)&mx8_data,
},
{ },
};
static struct pci_driver ntb_epf_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ntb_epf_pci_tbl,
.probe = ntb_epf_pci_probe,
.remove = ntb_epf_pci_remove,
};
module_pci_driver(ntb_epf_pci_driver);
MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER");
MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ntb/hw/epf/ntb_hw_epf.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2017 T-Platforms All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2017 T-Platforms All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* PCIe NTB Debugging Tool Linux driver
*/
/*
* How to use this tool, by example.
*
* Assuming $DBG_DIR is something like:
* '/sys/kernel/debug/ntb_tool/0000:00:03.0'
* Suppose aside from local device there is at least one remote device
* connected to NTB with index 0.
*-----------------------------------------------------------------------------
* Eg: check local/peer device information.
*
* # Get local device port number
* root@self# cat $DBG_DIR/port
*
* # Check local device functionality
* root@self# ls $DBG_DIR
* db msg1 msg_sts peer4/ port
* db_event msg2 peer0/ peer5/ spad0
* db_mask msg3 peer1/ peer_db spad1
* link msg_event peer2/ peer_db_mask spad2
* msg0 msg_mask peer3/ peer_spad spad3
* # As one can see it supports:
* # 1) four inbound message registers
* # 2) four inbound scratchpads
* # 3) up to six peer devices
*
* # Check peer device port number
* root@self# cat $DBG_DIR/peer0/port
*
* # Check peer device(s) functionality to be used
* root@self# ls $DBG_DIR/peer0
* link mw_trans0 mw_trans6 port
* link_event mw_trans1 mw_trans7 spad0
* msg0 mw_trans2 peer_mw_trans0 spad1
* msg1 mw_trans3 peer_mw_trans1 spad2
* msg2 mw_trans4 peer_mw_trans2 spad3
* msg3 mw_trans5 peer_mw_trans3
* # As one can see we got:
* # 1) four outbound message registers
* # 2) four outbound scratchpads
* # 3) eight inbound memory windows
* # 4) four outbound memory windows
*-----------------------------------------------------------------------------
* Eg: NTB link tests
*
* # Set local link up/down
* root@self# echo Y > $DBG_DIR/link
* root@self# echo N > $DBG_DIR/link
*
* # Check if link with peer device is up/down:
* root@self# cat $DBG_DIR/peer0/link
*
* # Block until the link is up/down
* root@self# echo Y > $DBG_DIR/peer0/link_event
* root@self# echo N > $DBG_DIR/peer0/link_event
*-----------------------------------------------------------------------------
* Eg: Doorbell registers tests (some functionality might be absent)
*
* # Set/clear/get local doorbell
* root@self# echo 's 1' > $DBG_DIR/db
* root@self# echo 'c 1' > $DBG_DIR/db
* root@self# cat $DBG_DIR/db
*
* # Set/clear/get local doorbell mask
* root@self# echo 's 1' > $DBG_DIR/db_mask
* root@self# echo 'c 1' > $DBG_DIR/db_mask
* root@self# cat $DBG_DIR/db_mask
*
* # Ring/clear/get peer doorbell
* root@peer# echo 's 1' > $DBG_DIR/peer_db
* root@peer# echo 'c 1' > $DBG_DIR/peer_db
* root@peer# cat $DBG_DIR/peer_db
*
* # Set/clear/get peer doorbell mask
* root@self# echo 's 1' > $DBG_DIR/peer_db_mask
* root@self# echo 'c 1' > $DBG_DIR/peer_db_mask
* root@self# cat $DBG_DIR/peer_db_mask
*
* # Block until local doorbell is set with specified value
* root@self# echo 1 > $DBG_DIR/db_event
*-----------------------------------------------------------------------------
* Eg: Message registers tests (functionality might be absent)
*
* # Set/clear/get in/out message registers status
* root@self# echo 's 1' > $DBG_DIR/msg_sts
* root@self# echo 'c 1' > $DBG_DIR/msg_sts
* root@self# cat $DBG_DIR/msg_sts
*
* # Set/clear in/out message registers mask
* root@self# echo 's 1' > $DBG_DIR/msg_mask
* root@self# echo 'c 1' > $DBG_DIR/msg_mask
*
* # Get inbound message register #0 value and source of port index
* root@self# cat $DBG_DIR/msg0
*
* # Send some data to peer over outbound message register #0
* root@self# echo 0x01020304 > $DBG_DIR/peer0/msg0
*-----------------------------------------------------------------------------
* Eg: Scratchpad registers tests (functionality might be absent)
*
* # Write/read to/from local scratchpad register #0
* root@peer# echo 0x01020304 > $DBG_DIR/spad0
* root@peer# cat $DBG_DIR/spad0
*
* # Write/read to/from peer scratchpad register #0
* root@peer# echo 0x01020304 > $DBG_DIR/peer0/spad0
* root@peer# cat $DBG_DIR/peer0/spad0
*-----------------------------------------------------------------------------
* Eg: Memory windows tests
*
* # Create inbound memory window buffer of specified size/get its base address
* root@peer# echo 16384 > $DBG_DIR/peer0/mw_trans0
* root@peer# cat $DBG_DIR/peer0/mw_trans0
*
* # Write/read data to/from inbound memory window
* root@peer# echo Hello > $DBG_DIR/peer0/mw0
* root@peer# head -c 7 $DBG_DIR/peer0/mw0
*
* # Map outbound memory window/check it settings (on peer device)
* root@peer# echo 0xADD0BA5E:16384 > $DBG_DIR/peer0/peer_mw_trans0
* root@peer# cat $DBG_DIR/peer0/peer_mw_trans0
*
* # Write/read data to/from outbound memory window (on peer device)
* root@peer# echo olleH > $DBG_DIR/peer0/peer_mw0
* root@peer# head -c 7 $DBG_DIR/peer0/peer_mw0
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/ntb.h>
#define DRIVER_NAME "ntb_tool"
#define DRIVER_VERSION "2.0"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR("Allen Hubbe <[email protected]>");
MODULE_DESCRIPTION("PCIe NTB Debugging Tool");
/*
* Inbound and outbound memory windows descriptor. Union members selection
* depends on the MW type the structure describes. mm_base/dma_base are the
* virtual and DMA address of an inbound MW. io_base/tr_base are the MMIO
* mapped virtual and xlat addresses of an outbound MW respectively.
*/
struct tool_mw {
int widx;
int pidx;
struct tool_ctx *tc;
union {
u8 *mm_base;
u8 __iomem *io_base;
};
union {
dma_addr_t dma_base;
u64 tr_base;
};
resource_size_t size;
struct dentry *dbgfs_file;
};
/*
* Wrapper structure is used to distinguish the outbound MW peers reference
* within the corresponding DebugFS directory IO operation.
*/
struct tool_mw_wrap {
int pidx;
struct tool_mw *mw;
};
struct tool_msg {
int midx;
int pidx;
struct tool_ctx *tc;
};
struct tool_spad {
int sidx;
int pidx;
struct tool_ctx *tc;
};
struct tool_peer {
int pidx;
struct tool_ctx *tc;
int inmw_cnt;
struct tool_mw *inmws;
int outmw_cnt;
struct tool_mw_wrap *outmws;
int outmsg_cnt;
struct tool_msg *outmsgs;
int outspad_cnt;
struct tool_spad *outspads;
struct dentry *dbgfs_dir;
};
struct tool_ctx {
struct ntb_dev *ntb;
wait_queue_head_t link_wq;
wait_queue_head_t db_wq;
wait_queue_head_t msg_wq;
int outmw_cnt;
struct tool_mw *outmws;
int peer_cnt;
struct tool_peer *peers;
int inmsg_cnt;
struct tool_msg *inmsgs;
int inspad_cnt;
struct tool_spad *inspads;
struct dentry *dbgfs_dir;
};
#define TOOL_FOPS_RDWR(__name, __read, __write) \
const struct file_operations __name = { \
.owner = THIS_MODULE, \
.open = simple_open, \
.read = __read, \
.write = __write, \
}
#define TOOL_BUF_LEN 32
static struct dentry *tool_dbgfs_topdir;
/*==============================================================================
* NTB events handlers
*==============================================================================
*/
static void tool_link_event(void *ctx)
{
struct tool_ctx *tc = ctx;
enum ntb_speed speed;
enum ntb_width width;
int up;
up = ntb_link_is_up(tc->ntb, &speed, &width);
dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n",
up ? "up" : "down", speed, width);
wake_up(&tc->link_wq);
}
static void tool_db_event(void *ctx, int vec)
{
struct tool_ctx *tc = ctx;
u64 db_bits, db_mask;
db_mask = ntb_db_vector_mask(tc->ntb, vec);
db_bits = ntb_db_read(tc->ntb);
dev_dbg(&tc->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
vec, db_mask, db_bits);
wake_up(&tc->db_wq);
}
static void tool_msg_event(void *ctx)
{
struct tool_ctx *tc = ctx;
u64 msg_sts;
msg_sts = ntb_msg_read_sts(tc->ntb);
dev_dbg(&tc->ntb->dev, "message bits %#llx\n", msg_sts);
wake_up(&tc->msg_wq);
}
static const struct ntb_ctx_ops tool_ops = {
.link_event = tool_link_event,
.db_event = tool_db_event,
.msg_event = tool_msg_event
};
/*==============================================================================
* Common read/write methods
*==============================================================================
*/
static ssize_t tool_fn_read(struct tool_ctx *tc, char __user *ubuf,
size_t size, loff_t *offp,
u64 (*fn_read)(struct ntb_dev *))
{
size_t buf_size;
char buf[TOOL_BUF_LEN];
ssize_t pos;
if (!fn_read)
return -EINVAL;
buf_size = min(size, sizeof(buf));
pos = scnprintf(buf, buf_size, "%#llx\n", fn_read(tc->ntb));
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static ssize_t tool_fn_write(struct tool_ctx *tc,
const char __user *ubuf,
size_t size, loff_t *offp,
int (*fn_set)(struct ntb_dev *, u64),
int (*fn_clear)(struct ntb_dev *, u64))
{
char *buf, cmd;
ssize_t ret;
u64 bits;
int n;
if (*offp)
return 0;
buf = memdup_user_nul(ubuf, size);
if (IS_ERR(buf))
return PTR_ERR(buf);
n = sscanf(buf, "%c %lli", &cmd, &bits);
kfree(buf);
if (n != 2) {
ret = -EINVAL;
} else if (cmd == 's') {
if (!fn_set)
ret = -EINVAL;
else
ret = fn_set(tc->ntb, bits);
} else if (cmd == 'c') {
if (!fn_clear)
ret = -EINVAL;
else
ret = fn_clear(tc->ntb, bits);
} else {
ret = -EINVAL;
}
return ret ? : size;
}
/*==============================================================================
* Port read/write methods
*==============================================================================
*/
static ssize_t tool_port_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
char buf[TOOL_BUF_LEN];
int pos;
pos = scnprintf(buf, sizeof(buf), "%d\n", ntb_port_number(tc->ntb));
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static TOOL_FOPS_RDWR(tool_port_fops,
tool_port_read,
NULL);
static ssize_t tool_peer_port_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_peer *peer = filep->private_data;
struct tool_ctx *tc = peer->tc;
char buf[TOOL_BUF_LEN];
int pos;
pos = scnprintf(buf, sizeof(buf), "%d\n",
ntb_peer_port_number(tc->ntb, peer->pidx));
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static TOOL_FOPS_RDWR(tool_peer_port_fops,
tool_peer_port_read,
NULL);
static int tool_init_peers(struct tool_ctx *tc)
{
int pidx;
tc->peer_cnt = ntb_peer_port_count(tc->ntb);
tc->peers = devm_kcalloc(&tc->ntb->dev, tc->peer_cnt,
sizeof(*tc->peers), GFP_KERNEL);
if (tc->peers == NULL)
return -ENOMEM;
for (pidx = 0; pidx < tc->peer_cnt; pidx++) {
tc->peers[pidx].pidx = pidx;
tc->peers[pidx].tc = tc;
}
return 0;
}
/*==============================================================================
* Link state read/write methods
*==============================================================================
*/
static ssize_t tool_link_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
bool val;
int ret;
ret = kstrtobool_from_user(ubuf, size, &val);
if (ret)
return ret;
if (val)
ret = ntb_link_enable(tc->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
else
ret = ntb_link_disable(tc->ntb);
if (ret)
return ret;
return size;
}
static TOOL_FOPS_RDWR(tool_link_fops,
NULL,
tool_link_write);
static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_peer *peer = filep->private_data;
struct tool_ctx *tc = peer->tc;
char buf[3];
if (ntb_link_is_up(tc->ntb, NULL, NULL) & BIT(peer->pidx))
buf[0] = 'Y';
else
buf[0] = 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(ubuf, size, offp, buf, 2);
}
static TOOL_FOPS_RDWR(tool_peer_link_fops,
tool_peer_link_read,
NULL);
static ssize_t tool_peer_link_event_write(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_peer *peer = filep->private_data;
struct tool_ctx *tc = peer->tc;
u64 link_msk;
bool val;
int ret;
ret = kstrtobool_from_user(ubuf, size, &val);
if (ret)
return ret;
link_msk = BIT_ULL_MASK(peer->pidx);
if (wait_event_interruptible(tc->link_wq,
!!(ntb_link_is_up(tc->ntb, NULL, NULL) & link_msk) == val))
return -ERESTART;
return size;
}
static TOOL_FOPS_RDWR(tool_peer_link_event_fops,
NULL,
tool_peer_link_event_write);
/*==============================================================================
* Memory windows read/write/setting methods
*==============================================================================
*/
static ssize_t tool_mw_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw *inmw = filep->private_data;
if (inmw->mm_base == NULL)
return -ENXIO;
return simple_read_from_buffer(ubuf, size, offp,
inmw->mm_base, inmw->size);
}
static ssize_t tool_mw_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw *inmw = filep->private_data;
if (inmw->mm_base == NULL)
return -ENXIO;
return simple_write_to_buffer(inmw->mm_base, inmw->size, offp,
ubuf, size);
}
static TOOL_FOPS_RDWR(tool_mw_fops,
tool_mw_read,
tool_mw_write);
static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
size_t req_size)
{
resource_size_t size, addr_align, size_align;
struct tool_mw *inmw = &tc->peers[pidx].inmws[widx];
char buf[TOOL_BUF_LEN];
int ret;
if (inmw->mm_base != NULL)
return 0;
ret = ntb_mw_get_align(tc->ntb, pidx, widx, &addr_align,
&size_align, &size);
if (ret)
return ret;
inmw->size = min_t(resource_size_t, req_size, size);
inmw->size = round_up(inmw->size, addr_align);
inmw->size = round_up(inmw->size, size_align);
inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size,
&inmw->dma_base, GFP_KERNEL);
if (!inmw->mm_base)
return -ENOMEM;
if (!IS_ALIGNED(inmw->dma_base, addr_align)) {
ret = -ENOMEM;
goto err_free_dma;
}
ret = ntb_mw_set_trans(tc->ntb, pidx, widx, inmw->dma_base, inmw->size);
if (ret)
goto err_free_dma;
snprintf(buf, sizeof(buf), "mw%d", widx);
inmw->dbgfs_file = debugfs_create_file(buf, 0600,
tc->peers[pidx].dbgfs_dir, inmw,
&tool_mw_fops);
return 0;
err_free_dma:
dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base,
inmw->dma_base);
inmw->mm_base = NULL;
inmw->dma_base = 0;
inmw->size = 0;
return ret;
}
static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx)
{
struct tool_mw *inmw = &tc->peers[pidx].inmws[widx];
debugfs_remove(inmw->dbgfs_file);
if (inmw->mm_base != NULL) {
ntb_mw_clear_trans(tc->ntb, pidx, widx);
dma_free_coherent(&tc->ntb->pdev->dev, inmw->size,
inmw->mm_base, inmw->dma_base);
}
inmw->mm_base = NULL;
inmw->dma_base = 0;
inmw->size = 0;
inmw->dbgfs_file = NULL;
}
static ssize_t tool_mw_trans_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw *inmw = filep->private_data;
resource_size_t addr_align;
resource_size_t size_align;
resource_size_t size_max;
ssize_t ret, off = 0;
size_t buf_size;
char *buf;
buf_size = min_t(size_t, size, 512);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = ntb_mw_get_align(inmw->tc->ntb, inmw->pidx, inmw->widx,
&addr_align, &size_align, &size_max);
if (ret)
goto err;
off += scnprintf(buf + off, buf_size - off,
"Inbound MW \t%d\n",
inmw->widx);
off += scnprintf(buf + off, buf_size - off,
"Port \t%d (%d)\n",
ntb_peer_port_number(inmw->tc->ntb, inmw->pidx),
inmw->pidx);
off += scnprintf(buf + off, buf_size - off,
"Window Address \t0x%pK\n", inmw->mm_base);
off += scnprintf(buf + off, buf_size - off,
"DMA Address \t%pad\n",
&inmw->dma_base);
off += scnprintf(buf + off, buf_size - off,
"Window Size \t%pap\n",
&inmw->size);
off += scnprintf(buf + off, buf_size - off,
"Alignment \t%pap\n",
&addr_align);
off += scnprintf(buf + off, buf_size - off,
"Size Alignment \t%pap\n",
&size_align);
off += scnprintf(buf + off, buf_size - off,
"Size Max \t%pap\n",
&size_max);
ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
err:
kfree(buf);
return ret;
}
static ssize_t tool_mw_trans_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw *inmw = filep->private_data;
unsigned int val;
int ret;
ret = kstrtouint_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
tool_free_mw(inmw->tc, inmw->pidx, inmw->widx);
if (val) {
ret = tool_setup_mw(inmw->tc, inmw->pidx, inmw->widx, val);
if (ret)
return ret;
}
return size;
}
static TOOL_FOPS_RDWR(tool_mw_trans_fops,
tool_mw_trans_read,
tool_mw_trans_write);
static ssize_t tool_peer_mw_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw *outmw = filep->private_data;
loff_t pos = *offp;
ssize_t ret;
void *buf;
if (outmw->io_base == NULL)
return -EIO;
if (pos >= outmw->size || !size)
return 0;
if (size > outmw->size - pos)
size = outmw->size - pos;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy_fromio(buf, outmw->io_base + pos, size);
ret = copy_to_user(ubuf, buf, size);
if (ret == size) {
ret = -EFAULT;
goto err_free;
}
size -= ret;
*offp = pos + size;
ret = size;
err_free:
kfree(buf);
return ret;
}
static ssize_t tool_peer_mw_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw *outmw = filep->private_data;
ssize_t ret;
loff_t pos = *offp;
void *buf;
if (outmw->io_base == NULL)
return -EIO;
if (pos >= outmw->size || !size)
return 0;
if (size > outmw->size - pos)
size = outmw->size - pos;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = copy_from_user(buf, ubuf, size);
if (ret == size) {
ret = -EFAULT;
goto err_free;
}
size -= ret;
*offp = pos + size;
ret = size;
memcpy_toio(outmw->io_base + pos, buf, size);
err_free:
kfree(buf);
return ret;
}
static TOOL_FOPS_RDWR(tool_peer_mw_fops,
tool_peer_mw_read,
tool_peer_mw_write);
static int tool_setup_peer_mw(struct tool_ctx *tc, int pidx, int widx,
u64 req_addr, size_t req_size)
{
struct tool_mw *outmw = &tc->outmws[widx];
resource_size_t map_size;
phys_addr_t map_base;
char buf[TOOL_BUF_LEN];
int ret;
if (outmw->io_base != NULL)
return 0;
ret = ntb_peer_mw_get_addr(tc->ntb, widx, &map_base, &map_size);
if (ret)
return ret;
ret = ntb_peer_mw_set_trans(tc->ntb, pidx, widx, req_addr, req_size);
if (ret)
return ret;
outmw->io_base = ioremap_wc(map_base, map_size);
if (outmw->io_base == NULL) {
ret = -EFAULT;
goto err_clear_trans;
}
outmw->tr_base = req_addr;
outmw->size = req_size;
outmw->pidx = pidx;
snprintf(buf, sizeof(buf), "peer_mw%d", widx);
outmw->dbgfs_file = debugfs_create_file(buf, 0600,
tc->peers[pidx].dbgfs_dir, outmw,
&tool_peer_mw_fops);
return 0;
err_clear_trans:
ntb_peer_mw_clear_trans(tc->ntb, pidx, widx);
return ret;
}
static void tool_free_peer_mw(struct tool_ctx *tc, int widx)
{
struct tool_mw *outmw = &tc->outmws[widx];
debugfs_remove(outmw->dbgfs_file);
if (outmw->io_base != NULL) {
iounmap(tc->outmws[widx].io_base);
ntb_peer_mw_clear_trans(tc->ntb, outmw->pidx, widx);
}
outmw->io_base = NULL;
outmw->tr_base = 0;
outmw->size = 0;
outmw->pidx = -1;
outmw->dbgfs_file = NULL;
}
static ssize_t tool_peer_mw_trans_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw_wrap *outmw_wrap = filep->private_data;
struct tool_mw *outmw = outmw_wrap->mw;
resource_size_t map_size;
phys_addr_t map_base;
ssize_t off = 0;
size_t buf_size;
char *buf;
int ret;
ret = ntb_peer_mw_get_addr(outmw->tc->ntb, outmw->widx,
&map_base, &map_size);
if (ret)
return ret;
buf_size = min_t(size_t, size, 512);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
off += scnprintf(buf + off, buf_size - off,
"Outbound MW: \t%d\n", outmw->widx);
if (outmw->io_base != NULL) {
off += scnprintf(buf + off, buf_size - off,
"Port attached \t%d (%d)\n",
ntb_peer_port_number(outmw->tc->ntb, outmw->pidx),
outmw->pidx);
} else {
off += scnprintf(buf + off, buf_size - off,
"Port attached \t-1 (-1)\n");
}
off += scnprintf(buf + off, buf_size - off,
"Virtual address \t0x%pK\n", outmw->io_base);
off += scnprintf(buf + off, buf_size - off,
"Phys Address \t%pap\n", &map_base);
off += scnprintf(buf + off, buf_size - off,
"Mapping Size \t%pap\n", &map_size);
off += scnprintf(buf + off, buf_size - off,
"Translation Address \t0x%016llx\n", outmw->tr_base);
off += scnprintf(buf + off, buf_size - off,
"Window Size \t%pap\n", &outmw->size);
ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
kfree(buf);
return ret;
}
static ssize_t tool_peer_mw_trans_write(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_mw_wrap *outmw_wrap = filep->private_data;
struct tool_mw *outmw = outmw_wrap->mw;
size_t buf_size, wsize;
char buf[TOOL_BUF_LEN];
int ret, n;
u64 addr;
buf_size = min(size, (sizeof(buf) - 1));
if (copy_from_user(buf, ubuf, buf_size))
return -EFAULT;
buf[buf_size] = '\0';
n = sscanf(buf, "%lli:%zi", &addr, &wsize);
if (n != 2)
return -EINVAL;
tool_free_peer_mw(outmw->tc, outmw->widx);
if (wsize) {
ret = tool_setup_peer_mw(outmw->tc, outmw_wrap->pidx,
outmw->widx, addr, wsize);
if (ret)
return ret;
}
return size;
}
static TOOL_FOPS_RDWR(tool_peer_mw_trans_fops,
tool_peer_mw_trans_read,
tool_peer_mw_trans_write);
static int tool_init_mws(struct tool_ctx *tc)
{
int widx, pidx;
/* Initialize outbound memory windows */
tc->outmw_cnt = ntb_peer_mw_count(tc->ntb);
tc->outmws = devm_kcalloc(&tc->ntb->dev, tc->outmw_cnt,
sizeof(*tc->outmws), GFP_KERNEL);
if (tc->outmws == NULL)
return -ENOMEM;
for (widx = 0; widx < tc->outmw_cnt; widx++) {
tc->outmws[widx].widx = widx;
tc->outmws[widx].pidx = -1;
tc->outmws[widx].tc = tc;
}
/* Initialize inbound memory windows and outbound MWs wrapper */
for (pidx = 0; pidx < tc->peer_cnt; pidx++) {
tc->peers[pidx].inmw_cnt = ntb_mw_count(tc->ntb, pidx);
tc->peers[pidx].inmws =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].inmw_cnt,
sizeof(*tc->peers[pidx].inmws), GFP_KERNEL);
if (tc->peers[pidx].inmws == NULL)
return -ENOMEM;
for (widx = 0; widx < tc->peers[pidx].inmw_cnt; widx++) {
tc->peers[pidx].inmws[widx].widx = widx;
tc->peers[pidx].inmws[widx].pidx = pidx;
tc->peers[pidx].inmws[widx].tc = tc;
}
tc->peers[pidx].outmw_cnt = ntb_peer_mw_count(tc->ntb);
tc->peers[pidx].outmws =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt,
sizeof(*tc->peers[pidx].outmws), GFP_KERNEL);
if (tc->peers[pidx].outmws == NULL)
return -ENOMEM;
for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
tc->peers[pidx].outmws[widx].pidx = pidx;
tc->peers[pidx].outmws[widx].mw = &tc->outmws[widx];
}
}
return 0;
}
static void tool_clear_mws(struct tool_ctx *tc)
{
int widx, pidx;
/* Free outbound memory windows */
for (widx = 0; widx < tc->outmw_cnt; widx++)
tool_free_peer_mw(tc, widx);
/* Free outbound memory windows */
for (pidx = 0; pidx < tc->peer_cnt; pidx++)
for (widx = 0; widx < tc->peers[pidx].inmw_cnt; widx++)
tool_free_mw(tc, pidx, widx);
}
/*==============================================================================
* Doorbell read/write methods
*==============================================================================
*/
static ssize_t tool_db_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->db_read);
}
static ssize_t tool_db_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_write(tc, ubuf, size, offp, tc->ntb->ops->db_set,
tc->ntb->ops->db_clear);
}
static TOOL_FOPS_RDWR(tool_db_fops,
tool_db_read,
tool_db_write);
static ssize_t tool_db_valid_mask_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->db_valid_mask);
}
static TOOL_FOPS_RDWR(tool_db_valid_mask_fops,
tool_db_valid_mask_read,
NULL);
static ssize_t tool_db_mask_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->db_read_mask);
}
static ssize_t tool_db_mask_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_write(tc, ubuf, size, offp, tc->ntb->ops->db_set_mask,
tc->ntb->ops->db_clear_mask);
}
static TOOL_FOPS_RDWR(tool_db_mask_fops,
tool_db_mask_read,
tool_db_mask_write);
static ssize_t tool_peer_db_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->peer_db_read);
}
static ssize_t tool_peer_db_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_write(tc, ubuf, size, offp, tc->ntb->ops->peer_db_set,
tc->ntb->ops->peer_db_clear);
}
static TOOL_FOPS_RDWR(tool_peer_db_fops,
tool_peer_db_read,
tool_peer_db_write);
static ssize_t tool_peer_db_mask_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp,
tc->ntb->ops->peer_db_read_mask);
}
static ssize_t tool_peer_db_mask_write(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_write(tc, ubuf, size, offp,
tc->ntb->ops->peer_db_set_mask,
tc->ntb->ops->peer_db_clear_mask);
}
static TOOL_FOPS_RDWR(tool_peer_db_mask_fops,
tool_peer_db_mask_read,
tool_peer_db_mask_write);
static ssize_t tool_db_event_write(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
u64 val;
int ret;
ret = kstrtou64_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
if (wait_event_interruptible(tc->db_wq, ntb_db_read(tc->ntb) == val))
return -ERESTART;
return size;
}
static TOOL_FOPS_RDWR(tool_db_event_fops,
NULL,
tool_db_event_write);
/*==============================================================================
* Scratchpads read/write methods
*==============================================================================
*/
static ssize_t tool_spad_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_spad *spad = filep->private_data;
char buf[TOOL_BUF_LEN];
ssize_t pos;
if (!spad->tc->ntb->ops->spad_read)
return -EINVAL;
pos = scnprintf(buf, sizeof(buf), "%#x\n",
ntb_spad_read(spad->tc->ntb, spad->sidx));
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static ssize_t tool_spad_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_spad *spad = filep->private_data;
u32 val;
int ret;
if (!spad->tc->ntb->ops->spad_write) {
dev_dbg(&spad->tc->ntb->dev, "no spad write fn\n");
return -EINVAL;
}
ret = kstrtou32_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
ret = ntb_spad_write(spad->tc->ntb, spad->sidx, val);
return ret ?: size;
}
static TOOL_FOPS_RDWR(tool_spad_fops,
tool_spad_read,
tool_spad_write);
static ssize_t tool_peer_spad_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_spad *spad = filep->private_data;
char buf[TOOL_BUF_LEN];
ssize_t pos;
if (!spad->tc->ntb->ops->peer_spad_read)
return -EINVAL;
pos = scnprintf(buf, sizeof(buf), "%#x\n",
ntb_peer_spad_read(spad->tc->ntb, spad->pidx, spad->sidx));
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_spad *spad = filep->private_data;
u32 val;
int ret;
if (!spad->tc->ntb->ops->peer_spad_write) {
dev_dbg(&spad->tc->ntb->dev, "no spad write fn\n");
return -EINVAL;
}
ret = kstrtou32_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
ret = ntb_peer_spad_write(spad->tc->ntb, spad->pidx, spad->sidx, val);
return ret ?: size;
}
static TOOL_FOPS_RDWR(tool_peer_spad_fops,
tool_peer_spad_read,
tool_peer_spad_write);
static int tool_init_spads(struct tool_ctx *tc)
{
int sidx, pidx;
/* Initialize inbound scratchpad structures */
tc->inspad_cnt = ntb_spad_count(tc->ntb);
tc->inspads = devm_kcalloc(&tc->ntb->dev, tc->inspad_cnt,
sizeof(*tc->inspads), GFP_KERNEL);
if (tc->inspads == NULL)
return -ENOMEM;
for (sidx = 0; sidx < tc->inspad_cnt; sidx++) {
tc->inspads[sidx].sidx = sidx;
tc->inspads[sidx].pidx = -1;
tc->inspads[sidx].tc = tc;
}
/* Initialize outbound scratchpad structures */
for (pidx = 0; pidx < tc->peer_cnt; pidx++) {
tc->peers[pidx].outspad_cnt = ntb_spad_count(tc->ntb);
tc->peers[pidx].outspads =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outspad_cnt,
sizeof(*tc->peers[pidx].outspads), GFP_KERNEL);
if (tc->peers[pidx].outspads == NULL)
return -ENOMEM;
for (sidx = 0; sidx < tc->peers[pidx].outspad_cnt; sidx++) {
tc->peers[pidx].outspads[sidx].sidx = sidx;
tc->peers[pidx].outspads[sidx].pidx = pidx;
tc->peers[pidx].outspads[sidx].tc = tc;
}
}
return 0;
}
/*==============================================================================
* Messages read/write methods
*==============================================================================
*/
static ssize_t tool_inmsg_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_msg *msg = filep->private_data;
char buf[TOOL_BUF_LEN];
ssize_t pos;
u32 data;
int pidx;
data = ntb_msg_read(msg->tc->ntb, &pidx, msg->midx);
pos = scnprintf(buf, sizeof(buf), "0x%08x<-%d\n", data, pidx);
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static TOOL_FOPS_RDWR(tool_inmsg_fops,
tool_inmsg_read,
NULL);
static ssize_t tool_outmsg_write(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_msg *msg = filep->private_data;
u32 val;
int ret;
ret = kstrtou32_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
ret = ntb_peer_msg_write(msg->tc->ntb, msg->pidx, msg->midx, val);
return ret ? : size;
}
static TOOL_FOPS_RDWR(tool_outmsg_fops,
NULL,
tool_outmsg_write);
static ssize_t tool_msg_sts_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->msg_read_sts);
}
static ssize_t tool_msg_sts_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_write(tc, ubuf, size, offp, NULL,
tc->ntb->ops->msg_clear_sts);
}
static TOOL_FOPS_RDWR(tool_msg_sts_fops,
tool_msg_sts_read,
tool_msg_sts_write);
static ssize_t tool_msg_inbits_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->msg_inbits);
}
static TOOL_FOPS_RDWR(tool_msg_inbits_fops,
tool_msg_inbits_read,
NULL);
static ssize_t tool_msg_outbits_read(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->msg_outbits);
}
static TOOL_FOPS_RDWR(tool_msg_outbits_fops,
tool_msg_outbits_read,
NULL);
static ssize_t tool_msg_mask_write(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
return tool_fn_write(tc, ubuf, size, offp,
tc->ntb->ops->msg_set_mask,
tc->ntb->ops->msg_clear_mask);
}
static TOOL_FOPS_RDWR(tool_msg_mask_fops,
NULL,
tool_msg_mask_write);
static ssize_t tool_msg_event_write(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct tool_ctx *tc = filep->private_data;
u64 val;
int ret;
ret = kstrtou64_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
if (wait_event_interruptible(tc->msg_wq,
ntb_msg_read_sts(tc->ntb) == val))
return -ERESTART;
return size;
}
static TOOL_FOPS_RDWR(tool_msg_event_fops,
NULL,
tool_msg_event_write);
static int tool_init_msgs(struct tool_ctx *tc)
{
int midx, pidx;
/* Initialize inbound message structures */
tc->inmsg_cnt = ntb_msg_count(tc->ntb);
tc->inmsgs = devm_kcalloc(&tc->ntb->dev, tc->inmsg_cnt,
sizeof(*tc->inmsgs), GFP_KERNEL);
if (tc->inmsgs == NULL)
return -ENOMEM;
for (midx = 0; midx < tc->inmsg_cnt; midx++) {
tc->inmsgs[midx].midx = midx;
tc->inmsgs[midx].pidx = -1;
tc->inmsgs[midx].tc = tc;
}
/* Initialize outbound message structures */
for (pidx = 0; pidx < tc->peer_cnt; pidx++) {
tc->peers[pidx].outmsg_cnt = ntb_msg_count(tc->ntb);
tc->peers[pidx].outmsgs =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmsg_cnt,
sizeof(*tc->peers[pidx].outmsgs), GFP_KERNEL);
if (tc->peers[pidx].outmsgs == NULL)
return -ENOMEM;
for (midx = 0; midx < tc->peers[pidx].outmsg_cnt; midx++) {
tc->peers[pidx].outmsgs[midx].midx = midx;
tc->peers[pidx].outmsgs[midx].pidx = pidx;
tc->peers[pidx].outmsgs[midx].tc = tc;
}
}
return 0;
}
/*==============================================================================
* Initialization methods
*==============================================================================
*/
static struct tool_ctx *tool_create_data(struct ntb_dev *ntb)
{
struct tool_ctx *tc;
tc = devm_kzalloc(&ntb->dev, sizeof(*tc), GFP_KERNEL);
if (tc == NULL)
return ERR_PTR(-ENOMEM);
tc->ntb = ntb;
init_waitqueue_head(&tc->link_wq);
init_waitqueue_head(&tc->db_wq);
init_waitqueue_head(&tc->msg_wq);
if (ntb_db_is_unsafe(ntb))
dev_dbg(&ntb->dev, "doorbell is unsafe\n");
if (ntb_spad_is_unsafe(ntb))
dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
return tc;
}
static void tool_clear_data(struct tool_ctx *tc)
{
wake_up(&tc->link_wq);
wake_up(&tc->db_wq);
wake_up(&tc->msg_wq);
}
static int tool_init_ntb(struct tool_ctx *tc)
{
return ntb_set_ctx(tc->ntb, tc, &tool_ops);
}
static void tool_clear_ntb(struct tool_ctx *tc)
{
ntb_clear_ctx(tc->ntb);
ntb_link_disable(tc->ntb);
}
static void tool_setup_dbgfs(struct tool_ctx *tc)
{
int pidx, widx, sidx, midx;
char buf[TOOL_BUF_LEN];
/* This modules is useless without dbgfs... */
if (!tool_dbgfs_topdir) {
tc->dbgfs_dir = NULL;
return;
}
tc->dbgfs_dir = debugfs_create_dir(dev_name(&tc->ntb->dev),
tool_dbgfs_topdir);
debugfs_create_file("port", 0600, tc->dbgfs_dir,
tc, &tool_port_fops);
debugfs_create_file("link", 0600, tc->dbgfs_dir,
tc, &tool_link_fops);
debugfs_create_file("db", 0600, tc->dbgfs_dir,
tc, &tool_db_fops);
debugfs_create_file("db_valid_mask", 0600, tc->dbgfs_dir,
tc, &tool_db_valid_mask_fops);
debugfs_create_file("db_mask", 0600, tc->dbgfs_dir,
tc, &tool_db_mask_fops);
debugfs_create_file("db_event", 0600, tc->dbgfs_dir,
tc, &tool_db_event_fops);
debugfs_create_file("peer_db", 0600, tc->dbgfs_dir,
tc, &tool_peer_db_fops);
debugfs_create_file("peer_db_mask", 0600, tc->dbgfs_dir,
tc, &tool_peer_db_mask_fops);
if (tc->inspad_cnt != 0) {
for (sidx = 0; sidx < tc->inspad_cnt; sidx++) {
snprintf(buf, sizeof(buf), "spad%d", sidx);
debugfs_create_file(buf, 0600, tc->dbgfs_dir,
&tc->inspads[sidx], &tool_spad_fops);
}
}
if (tc->inmsg_cnt != 0) {
for (midx = 0; midx < tc->inmsg_cnt; midx++) {
snprintf(buf, sizeof(buf), "msg%d", midx);
debugfs_create_file(buf, 0600, tc->dbgfs_dir,
&tc->inmsgs[midx], &tool_inmsg_fops);
}
debugfs_create_file("msg_sts", 0600, tc->dbgfs_dir,
tc, &tool_msg_sts_fops);
debugfs_create_file("msg_inbits", 0600, tc->dbgfs_dir,
tc, &tool_msg_inbits_fops);
debugfs_create_file("msg_outbits", 0600, tc->dbgfs_dir,
tc, &tool_msg_outbits_fops);
debugfs_create_file("msg_mask", 0600, tc->dbgfs_dir,
tc, &tool_msg_mask_fops);
debugfs_create_file("msg_event", 0600, tc->dbgfs_dir,
tc, &tool_msg_event_fops);
}
for (pidx = 0; pidx < tc->peer_cnt; pidx++) {
snprintf(buf, sizeof(buf), "peer%d", pidx);
tc->peers[pidx].dbgfs_dir =
debugfs_create_dir(buf, tc->dbgfs_dir);
debugfs_create_file("port", 0600,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx], &tool_peer_port_fops);
debugfs_create_file("link", 0200,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx], &tool_peer_link_fops);
debugfs_create_file("link_event", 0200,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx], &tool_peer_link_event_fops);
for (widx = 0; widx < tc->peers[pidx].inmw_cnt; widx++) {
snprintf(buf, sizeof(buf), "mw_trans%d", widx);
debugfs_create_file(buf, 0600,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx].inmws[widx],
&tool_mw_trans_fops);
}
for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
snprintf(buf, sizeof(buf), "peer_mw_trans%d", widx);
debugfs_create_file(buf, 0600,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx].outmws[widx],
&tool_peer_mw_trans_fops);
}
for (sidx = 0; sidx < tc->peers[pidx].outspad_cnt; sidx++) {
snprintf(buf, sizeof(buf), "spad%d", sidx);
debugfs_create_file(buf, 0600,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx].outspads[sidx],
&tool_peer_spad_fops);
}
for (midx = 0; midx < tc->peers[pidx].outmsg_cnt; midx++) {
snprintf(buf, sizeof(buf), "msg%d", midx);
debugfs_create_file(buf, 0600,
tc->peers[pidx].dbgfs_dir,
&tc->peers[pidx].outmsgs[midx],
&tool_outmsg_fops);
}
}
}
static void tool_clear_dbgfs(struct tool_ctx *tc)
{
debugfs_remove_recursive(tc->dbgfs_dir);
}
static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
{
struct tool_ctx *tc;
int ret;
tc = tool_create_data(ntb);
if (IS_ERR(tc))
return PTR_ERR(tc);
ret = tool_init_peers(tc);
if (ret != 0)
goto err_clear_data;
ret = tool_init_mws(tc);
if (ret != 0)
goto err_clear_data;
ret = tool_init_spads(tc);
if (ret != 0)
goto err_clear_mws;
ret = tool_init_msgs(tc);
if (ret != 0)
goto err_clear_mws;
ret = tool_init_ntb(tc);
if (ret != 0)
goto err_clear_mws;
tool_setup_dbgfs(tc);
return 0;
err_clear_mws:
tool_clear_mws(tc);
err_clear_data:
tool_clear_data(tc);
return ret;
}
static void tool_remove(struct ntb_client *self, struct ntb_dev *ntb)
{
struct tool_ctx *tc = ntb->ctx;
tool_clear_dbgfs(tc);
tool_clear_ntb(tc);
tool_clear_mws(tc);
tool_clear_data(tc);
}
static struct ntb_client tool_client = {
.ops = {
.probe = tool_probe,
.remove = tool_remove,
}
};
static int __init tool_init(void)
{
int ret;
if (debugfs_initialized())
tool_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = ntb_register_client(&tool_client);
if (ret)
debugfs_remove_recursive(tool_dbgfs_topdir);
return ret;
}
module_init(tool_init);
static void __exit tool_exit(void)
{
ntb_unregister_client(&tool_client);
debugfs_remove_recursive(tool_dbgfs_topdir);
}
module_exit(tool_exit);
| linux-master | drivers/ntb/test/ntb_tool.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2017 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2017 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* PCIe NTB Perf Linux driver
*/
/*
* How to use this tool, by example.
*
* Assuming $DBG_DIR is something like:
* '/sys/kernel/debug/ntb_perf/0000:00:03.0'
* Suppose aside from local device there is at least one remote device
* connected to NTB with index 0.
*-----------------------------------------------------------------------------
* Eg: install driver with specified chunk/total orders and dma-enabled flag
*
* root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
*-----------------------------------------------------------------------------
* Eg: check NTB ports (index) and MW mapping information
*
* root@self# cat $DBG_DIR/info
*-----------------------------------------------------------------------------
* Eg: start performance test with peer (index 0) and get the test metrics
*
* root@self# echo 0 > $DBG_DIR/run
* root@self# cat $DBG_DIR/run
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pci.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/sizes.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ntb.h>
#define DRIVER_NAME "ntb_perf"
#define DRIVER_VERSION "2.0"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR("Dave Jiang <[email protected]>");
MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
#define MAX_THREADS_CNT 32
#define DEF_THREADS_CNT 1
#define MAX_CHUNK_SIZE SZ_1M
#define MAX_CHUNK_ORDER 20 /* no larger than 1M */
#define DMA_TRIES 100
#define DMA_MDELAY 10
#define MSG_TRIES 1000
#define MSG_UDELAY_LOW 1000000
#define MSG_UDELAY_HIGH 2000000
#define PERF_BUF_LEN 1024
static unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Upper limit of memory window size");
static unsigned char chunk_order = 19; /* 512K */
module_param(chunk_order, byte, 0644);
MODULE_PARM_DESC(chunk_order, "Data chunk order [2^n] to transfer");
static unsigned char total_order = 30; /* 1G */
module_param(total_order, byte, 0644);
MODULE_PARM_DESC(total_order, "Total data order [2^n] to transfer");
static bool use_dma; /* default to 0 */
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Use DMA engine to measure performance");
/*==============================================================================
* Perf driver data definition
*==============================================================================
*/
enum perf_cmd {
PERF_CMD_INVAL = -1,/* invalid spad command */
PERF_CMD_SSIZE = 0, /* send out buffer size */
PERF_CMD_RSIZE = 1, /* recv in buffer size */
PERF_CMD_SXLAT = 2, /* send in buffer xlat */
PERF_CMD_RXLAT = 3, /* recv out buffer xlat */
PERF_CMD_CLEAR = 4, /* clear allocated memory */
PERF_STS_DONE = 5, /* init is done */
PERF_STS_LNKUP = 6, /* link up state flag */
};
struct perf_ctx;
struct perf_peer {
struct perf_ctx *perf;
int pidx;
int gidx;
/* Outbound MW params */
u64 outbuf_xlat;
resource_size_t outbuf_size;
void __iomem *outbuf;
phys_addr_t out_phys_addr;
dma_addr_t dma_dst_addr;
/* Inbound MW params */
dma_addr_t inbuf_xlat;
resource_size_t inbuf_size;
void *inbuf;
/* NTB connection setup service */
struct work_struct service;
unsigned long sts;
struct completion init_comp;
};
#define to_peer_service(__work) \
container_of(__work, struct perf_peer, service)
struct perf_thread {
struct perf_ctx *perf;
int tidx;
/* DMA-based test sync parameters */
atomic_t dma_sync;
wait_queue_head_t dma_wait;
struct dma_chan *dma_chan;
/* Data source and measured statistics */
void *src;
u64 copied;
ktime_t duration;
int status;
struct work_struct work;
};
#define to_thread_work(__work) \
container_of(__work, struct perf_thread, work)
struct perf_ctx {
struct ntb_dev *ntb;
/* Global device index and peers descriptors */
int gidx;
int pcnt;
struct perf_peer *peers;
/* Performance measuring work-threads interface */
unsigned long busy_flag;
wait_queue_head_t twait;
atomic_t tsync;
u8 tcnt;
struct perf_peer *test_peer;
struct perf_thread threads[MAX_THREADS_CNT];
/* Scratchpad/Message IO operations */
int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
u64 *data);
struct dentry *dbgfs_dir;
};
/*
* Scratchpads-base commands interface
*/
#define PERF_SPAD_CNT(_pcnt) \
(3*((_pcnt) + 1))
#define PERF_SPAD_CMD(_gidx) \
(3*(_gidx))
#define PERF_SPAD_LDATA(_gidx) \
(3*(_gidx) + 1)
#define PERF_SPAD_HDATA(_gidx) \
(3*(_gidx) + 2)
#define PERF_SPAD_NOTIFY(_gidx) \
(BIT_ULL(_gidx))
/*
* Messages-base commands interface
*/
#define PERF_MSG_CNT 3
#define PERF_MSG_CMD 0
#define PERF_MSG_LDATA 1
#define PERF_MSG_HDATA 2
/*==============================================================================
* Static data declarations
*==============================================================================
*/
static struct dentry *perf_dbgfs_topdir;
static struct workqueue_struct *perf_wq __read_mostly;
/*==============================================================================
* NTB cross-link commands execution service
*==============================================================================
*/
static void perf_terminate_test(struct perf_ctx *perf);
static inline bool perf_link_is_up(struct perf_peer *peer)
{
u64 link;
link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
return !!(link & BIT_ULL_MASK(peer->pidx));
}
static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
u64 data)
{
struct perf_ctx *perf = peer->perf;
int try;
u32 sts;
dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
/*
* Perform predefined number of attempts before give up.
* We are sending the data to the port specific scratchpad, so
* to prevent a multi-port access race-condition. Additionally
* there is no need in local locking since only thread-safe
* service work is using this method.
*/
for (try = 0; try < MSG_TRIES; try++) {
if (!perf_link_is_up(peer))
return -ENOLINK;
sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
PERF_SPAD_CMD(perf->gidx));
if (sts != PERF_CMD_INVAL) {
usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
continue;
}
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_LDATA(perf->gidx),
lower_32_bits(data));
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_HDATA(perf->gidx),
upper_32_bits(data));
ntb_peer_spad_write(perf->ntb, peer->pidx,
PERF_SPAD_CMD(perf->gidx),
cmd);
ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
PERF_SPAD_NOTIFY(peer->gidx));
break;
}
return try < MSG_TRIES ? 0 : -EAGAIN;
}
static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx,
enum perf_cmd *cmd, u64 *data)
{
struct perf_peer *peer;
u32 val;
ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
/*
* We start scanning all over, since cleared DB may have been set
* by any peer. Yes, it makes peer with smaller index being
* serviced with greater priority, but it's convenient for spad
* and message code unification and simplicity.
*/
for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) {
peer = &perf->peers[*pidx];
if (!perf_link_is_up(peer))
continue;
val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
if (val == PERF_CMD_INVAL)
continue;
*cmd = val;
val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
*data = val;
val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
*data |= (u64)val << 32;
/* Next command can be retrieved from now */
ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
PERF_CMD_INVAL);
dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
return 0;
}
return -ENODATA;
}
static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
u64 data)
{
struct perf_ctx *perf = peer->perf;
int try, ret;
u64 outbits;
dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
/*
* Perform predefined number of attempts before give up. Message
* registers are free of race-condition problem when accessed
* from different ports, so we don't need splitting registers
* by global device index. We also won't have local locking,
* since the method is used from service work only.
*/
outbits = ntb_msg_outbits(perf->ntb);
for (try = 0; try < MSG_TRIES; try++) {
if (!perf_link_is_up(peer))
return -ENOLINK;
ret = ntb_msg_clear_sts(perf->ntb, outbits);
if (ret)
return ret;
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
lower_32_bits(data));
if (ntb_msg_read_sts(perf->ntb) & outbits) {
usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
continue;
}
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
upper_32_bits(data));
/* This call shall trigger peer message event */
ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
break;
}
return try < MSG_TRIES ? 0 : -EAGAIN;
}
static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx,
enum perf_cmd *cmd, u64 *data)
{
u64 inbits;
u32 val;
inbits = ntb_msg_inbits(perf->ntb);
if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
return -ENODATA;
val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD);
*cmd = val;
val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA);
*data = val;
val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA);
*data |= (u64)val << 32;
/* Next command can be retrieved from now */
ntb_msg_clear_sts(perf->ntb, inbits);
dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
return 0;
}
static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
{
struct perf_ctx *perf = peer->perf;
if (cmd == PERF_CMD_SSIZE || cmd == PERF_CMD_SXLAT)
return perf->cmd_send(peer, cmd, data);
dev_err(&perf->ntb->dev, "Send invalid command\n");
return -EINVAL;
}
static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
{
switch (cmd) {
case PERF_CMD_SSIZE:
case PERF_CMD_RSIZE:
case PERF_CMD_SXLAT:
case PERF_CMD_RXLAT:
case PERF_CMD_CLEAR:
break;
default:
dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
return -EINVAL;
}
/* No need of memory barrier, since bit ops have invernal lock */
set_bit(cmd, &peer->sts);
dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
(void)queue_work(system_highpri_wq, &peer->service);
return 0;
}
static int perf_cmd_recv(struct perf_ctx *perf)
{
struct perf_peer *peer;
int ret, pidx, cmd;
u64 data;
while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) {
peer = &perf->peers[pidx];
switch (cmd) {
case PERF_CMD_SSIZE:
peer->inbuf_size = data;
return perf_cmd_exec(peer, PERF_CMD_RSIZE);
case PERF_CMD_SXLAT:
peer->outbuf_xlat = data;
return perf_cmd_exec(peer, PERF_CMD_RXLAT);
default:
dev_err(&perf->ntb->dev, "Recv invalid command\n");
return -EINVAL;
}
}
/* Return 0 if no data left to process, otherwise an error */
return ret == -ENODATA ? 0 : ret;
}
static void perf_link_event(void *ctx)
{
struct perf_ctx *perf = ctx;
struct perf_peer *peer;
bool lnk_up;
int pidx;
for (pidx = 0; pidx < perf->pcnt; pidx++) {
peer = &perf->peers[pidx];
lnk_up = perf_link_is_up(peer);
if (lnk_up &&
!test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
perf_cmd_exec(peer, PERF_CMD_SSIZE);
} else if (!lnk_up &&
test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
perf_cmd_exec(peer, PERF_CMD_CLEAR);
}
}
}
static void perf_db_event(void *ctx, int vec)
{
struct perf_ctx *perf = ctx;
dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec,
ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb));
/* Just receive all available commands */
(void)perf_cmd_recv(perf);
}
static void perf_msg_event(void *ctx)
{
struct perf_ctx *perf = ctx;
dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n",
ntb_msg_read_sts(perf->ntb));
/* Messages are only sent one-by-one */
(void)perf_cmd_recv(perf);
}
static const struct ntb_ctx_ops perf_ops = {
.link_event = perf_link_event,
.db_event = perf_db_event,
.msg_event = perf_msg_event
};
static void perf_free_outbuf(struct perf_peer *peer)
{
(void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
}
static int perf_setup_outbuf(struct perf_peer *peer)
{
struct perf_ctx *perf = peer->perf;
int ret;
/* Outbuf size can be unaligned due to custom max_mw_size */
ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
peer->outbuf_xlat, peer->outbuf_size);
if (ret) {
dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n");
return ret;
}
/* Initialization is finally done */
set_bit(PERF_STS_DONE, &peer->sts);
complete_all(&peer->init_comp);
return 0;
}
static void perf_free_inbuf(struct perf_peer *peer)
{
if (!peer->inbuf)
return;
(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size,
peer->inbuf, peer->inbuf_xlat);
peer->inbuf = NULL;
}
static int perf_setup_inbuf(struct perf_peer *peer)
{
resource_size_t xlat_align, size_align, size_max;
struct perf_ctx *perf = peer->perf;
int ret;
/* Get inbound MW parameters */
ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
&xlat_align, &size_align, &size_max);
if (ret) {
dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n");
return ret;
}
if (peer->inbuf_size > size_max) {
dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n",
&peer->inbuf_size, &size_max);
return -EINVAL;
}
peer->inbuf_size = round_up(peer->inbuf_size, size_align);
perf_free_inbuf(peer);
peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev,
peer->inbuf_size, &peer->inbuf_xlat,
GFP_KERNEL);
if (!peer->inbuf) {
dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
&peer->inbuf_size);
return -ENOMEM;
}
if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
ret = -EINVAL;
dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
goto err_free_inbuf;
}
ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
peer->inbuf_xlat, peer->inbuf_size);
if (ret) {
dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n");
goto err_free_inbuf;
}
/*
* We submit inbuf xlat transmission cmd for execution here to follow
* the code architecture, even though this method is called from service
* work itself so the command will be executed right after it returns.
*/
(void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
return 0;
err_free_inbuf:
perf_free_inbuf(peer);
return ret;
}
static void perf_service_work(struct work_struct *work)
{
struct perf_peer *peer = to_peer_service(work);
if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
perf_setup_inbuf(peer);
if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
perf_setup_outbuf(peer);
if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
init_completion(&peer->init_comp);
clear_bit(PERF_STS_DONE, &peer->sts);
if (test_bit(0, &peer->perf->busy_flag) &&
peer == peer->perf->test_peer) {
dev_warn(&peer->perf->ntb->dev,
"Freeing while test on-fly\n");
perf_terminate_test(peer->perf);
}
perf_free_outbuf(peer);
perf_free_inbuf(peer);
}
}
static int perf_init_service(struct perf_ctx *perf)
{
u64 mask;
if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) {
dev_err(&perf->ntb->dev, "Not enough memory windows\n");
return -EINVAL;
}
if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) {
perf->cmd_send = perf_msg_cmd_send;
perf->cmd_recv = perf_msg_cmd_recv;
dev_dbg(&perf->ntb->dev, "Message service initialized\n");
return 0;
}
dev_dbg(&perf->ntb->dev, "Message service unsupported\n");
mask = GENMASK_ULL(perf->pcnt, 0);
if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) &&
(ntb_db_valid_mask(perf->ntb) & mask) == mask) {
perf->cmd_send = perf_spad_cmd_send;
perf->cmd_recv = perf_spad_cmd_recv;
dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n");
return 0;
}
dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n");
dev_err(&perf->ntb->dev, "Command services unsupported\n");
return -EINVAL;
}
static int perf_enable_service(struct perf_ctx *perf)
{
u64 mask, incmd_bit;
int ret, sidx, scnt;
mask = ntb_db_valid_mask(perf->ntb);
(void)ntb_db_set_mask(perf->ntb, mask);
ret = ntb_set_ctx(perf->ntb, perf, &perf_ops);
if (ret)
return ret;
if (perf->cmd_send == perf_msg_cmd_send) {
u64 inbits, outbits;
inbits = ntb_msg_inbits(perf->ntb);
outbits = ntb_msg_outbits(perf->ntb);
(void)ntb_msg_set_mask(perf->ntb, inbits | outbits);
incmd_bit = BIT_ULL(__ffs64(inbits));
ret = ntb_msg_clear_mask(perf->ntb, incmd_bit);
dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit);
} else {
scnt = ntb_spad_count(perf->ntb);
for (sidx = 0; sidx < scnt; sidx++)
ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL);
incmd_bit = PERF_SPAD_NOTIFY(perf->gidx);
ret = ntb_db_clear_mask(perf->ntb, incmd_bit);
dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit);
}
if (ret) {
ntb_clear_ctx(perf->ntb);
return ret;
}
ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
/* Might be not necessary */
ntb_link_event(perf->ntb);
return 0;
}
static void perf_disable_service(struct perf_ctx *perf)
{
int pidx;
if (perf->cmd_send == perf_msg_cmd_send) {
u64 inbits;
inbits = ntb_msg_inbits(perf->ntb);
(void)ntb_msg_set_mask(perf->ntb, inbits);
} else {
(void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
}
ntb_clear_ctx(perf->ntb);
for (pidx = 0; pidx < perf->pcnt; pidx++)
perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR);
for (pidx = 0; pidx < perf->pcnt; pidx++)
flush_work(&perf->peers[pidx].service);
for (pidx = 0; pidx < perf->pcnt; pidx++) {
struct perf_peer *peer = &perf->peers[pidx];
ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
}
ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
ntb_link_disable(perf->ntb);
}
/*==============================================================================
* Performance measuring work-thread
*==============================================================================
*/
static void perf_dma_copy_callback(void *data)
{
struct perf_thread *pthr = data;
atomic_dec(&pthr->dma_sync);
wake_up(&pthr->dma_wait);
}
static int perf_copy_chunk(struct perf_thread *pthr,
void __iomem *dst, void *src, size_t len)
{
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
struct device *dma_dev;
int try = 0, ret = 0;
struct perf_peer *peer = pthr->perf->test_peer;
void __iomem *vbase;
void __iomem *dst_vaddr;
dma_addr_t dst_dma_addr;
if (!use_dma) {
memcpy_toio(dst, src, len);
goto ret_check_tsync;
}
dma_dev = pthr->dma_chan->device->dev;
if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
offset_in_page(dst), len))
return -EIO;
vbase = peer->outbuf;
dst_vaddr = dst;
dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
unmap = dmaengine_get_unmap_data(dma_dev, 1, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
unmap->len = len;
unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src),
offset_in_page(src), len, DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, unmap->addr[0])) {
ret = -EIO;
goto err_free_resource;
}
unmap->to_cnt = 1;
do {
tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr,
unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx)
msleep(DMA_MDELAY);
} while (!tx && (try++ < DMA_TRIES));
if (!tx) {
ret = -EIO;
goto err_free_resource;
}
tx->callback = perf_dma_copy_callback;
tx->callback_param = pthr;
dma_set_unmap(tx, unmap);
ret = dma_submit_error(dmaengine_submit(tx));
if (ret) {
dmaengine_unmap_put(unmap);
goto err_free_resource;
}
dmaengine_unmap_put(unmap);
atomic_inc(&pthr->dma_sync);
dma_async_issue_pending(pthr->dma_chan);
ret_check_tsync:
return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
err_free_resource:
dmaengine_unmap_put(unmap);
return ret;
}
static bool perf_dma_filter(struct dma_chan *chan, void *data)
{
struct perf_ctx *perf = data;
int node;
node = dev_to_node(&perf->ntb->dev);
return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
}
static int perf_init_test(struct perf_thread *pthr)
{
struct perf_ctx *perf = pthr->perf;
dma_cap_mask_t dma_mask;
struct perf_peer *peer = pthr->perf->test_peer;
pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
dev_to_node(&perf->ntb->dev));
if (!pthr->src)
return -ENOMEM;
get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
if (!use_dma)
return 0;
dma_cap_zero(dma_mask);
dma_cap_set(DMA_MEMCPY, dma_mask);
pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
if (!pthr->dma_chan) {
dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
pthr->tidx);
goto err_free;
}
peer->dma_dst_addr =
dma_map_resource(pthr->dma_chan->device->dev,
peer->out_phys_addr, peer->outbuf_size,
DMA_FROM_DEVICE, 0);
if (dma_mapping_error(pthr->dma_chan->device->dev,
peer->dma_dst_addr)) {
dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n",
pthr->tidx);
peer->dma_dst_addr = 0;
dma_release_channel(pthr->dma_chan);
goto err_free;
}
dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n",
pthr->tidx,
&peer->out_phys_addr,
&peer->dma_dst_addr);
atomic_set(&pthr->dma_sync, 0);
return 0;
err_free:
atomic_dec(&perf->tsync);
wake_up(&perf->twait);
kfree(pthr->src);
return -ENODEV;
}
static int perf_run_test(struct perf_thread *pthr)
{
struct perf_peer *peer = pthr->perf->test_peer;
struct perf_ctx *perf = pthr->perf;
void __iomem *flt_dst, *bnd_dst;
u64 total_size, chunk_size;
void *flt_src;
int ret = 0;
total_size = 1ULL << total_order;
chunk_size = 1ULL << chunk_order;
chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
flt_src = pthr->src;
bnd_dst = peer->outbuf + peer->outbuf_size;
flt_dst = peer->outbuf;
pthr->duration = ktime_get();
/* Copied field is cleared on test launch stage */
while (pthr->copied < total_size) {
ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
if (ret) {
dev_err(&perf->ntb->dev, "%d: Got error %d on test\n",
pthr->tidx, ret);
return ret;
}
pthr->copied += chunk_size;
flt_dst += chunk_size;
flt_src += chunk_size;
if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
flt_dst = peer->outbuf;
flt_src = pthr->src;
}
/* Give up CPU to give a chance for other threads to use it */
schedule();
}
return 0;
}
static int perf_sync_test(struct perf_thread *pthr)
{
struct perf_ctx *perf = pthr->perf;
if (!use_dma)
goto no_dma_ret;
wait_event(pthr->dma_wait,
(atomic_read(&pthr->dma_sync) == 0 ||
atomic_read(&perf->tsync) < 0));
if (atomic_read(&perf->tsync) < 0)
return -EINTR;
no_dma_ret:
pthr->duration = ktime_sub(ktime_get(), pthr->duration);
dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n",
pthr->tidx, pthr->copied);
dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n",
pthr->tidx, ktime_to_us(pthr->duration));
dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
return 0;
}
static void perf_clear_test(struct perf_thread *pthr)
{
struct perf_ctx *perf = pthr->perf;
if (!use_dma)
goto no_dma_notify;
/*
* If test finished without errors, termination isn't needed.
* We call it anyway just to be sure of the transfers completion.
*/
(void)dmaengine_terminate_sync(pthr->dma_chan);
if (pthr->perf->test_peer->dma_dst_addr)
dma_unmap_resource(pthr->dma_chan->device->dev,
pthr->perf->test_peer->dma_dst_addr,
pthr->perf->test_peer->outbuf_size,
DMA_FROM_DEVICE, 0);
dma_release_channel(pthr->dma_chan);
no_dma_notify:
atomic_dec(&perf->tsync);
wake_up(&perf->twait);
kfree(pthr->src);
}
static void perf_thread_work(struct work_struct *work)
{
struct perf_thread *pthr = to_thread_work(work);
int ret;
/*
* Perform stages in compliance with use_dma flag value.
* Test status is changed only if error happened, otherwise
* status -ENODATA is kept while test is on-fly. Results
* synchronization is performed only if test fininshed
* without an error or interruption.
*/
ret = perf_init_test(pthr);
if (ret) {
pthr->status = ret;
return;
}
ret = perf_run_test(pthr);
if (ret) {
pthr->status = ret;
goto err_clear_test;
}
pthr->status = perf_sync_test(pthr);
err_clear_test:
perf_clear_test(pthr);
}
static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt)
{
if (tcnt == 0 || tcnt > MAX_THREADS_CNT)
return -EINVAL;
if (test_and_set_bit_lock(0, &perf->busy_flag))
return -EBUSY;
perf->tcnt = tcnt;
clear_bit_unlock(0, &perf->busy_flag);
return 0;
}
static void perf_terminate_test(struct perf_ctx *perf)
{
int tidx;
atomic_set(&perf->tsync, -1);
wake_up(&perf->twait);
for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
wake_up(&perf->threads[tidx].dma_wait);
cancel_work_sync(&perf->threads[tidx].work);
}
}
static int perf_submit_test(struct perf_peer *peer)
{
struct perf_ctx *perf = peer->perf;
struct perf_thread *pthr;
int tidx, ret;
ret = wait_for_completion_interruptible(&peer->init_comp);
if (ret < 0)
return ret;
if (test_and_set_bit_lock(0, &perf->busy_flag))
return -EBUSY;
perf->test_peer = peer;
atomic_set(&perf->tsync, perf->tcnt);
for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
pthr = &perf->threads[tidx];
pthr->status = -ENODATA;
pthr->copied = 0;
pthr->duration = ktime_set(0, 0);
if (tidx < perf->tcnt)
(void)queue_work(perf_wq, &pthr->work);
}
ret = wait_event_interruptible(perf->twait,
atomic_read(&perf->tsync) <= 0);
if (ret == -ERESTARTSYS) {
perf_terminate_test(perf);
ret = -EINTR;
}
clear_bit_unlock(0, &perf->busy_flag);
return ret;
}
static int perf_read_stats(struct perf_ctx *perf, char *buf,
size_t size, ssize_t *pos)
{
struct perf_thread *pthr;
int tidx;
if (test_and_set_bit_lock(0, &perf->busy_flag))
return -EBUSY;
(*pos) += scnprintf(buf + *pos, size - *pos,
" Peer %d test statistics:\n", perf->test_peer->pidx);
for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
pthr = &perf->threads[tidx];
if (pthr->status == -ENODATA)
continue;
if (pthr->status) {
(*pos) += scnprintf(buf + *pos, size - *pos,
"%d: error status %d\n", tidx, pthr->status);
continue;
}
(*pos) += scnprintf(buf + *pos, size - *pos,
"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
tidx, pthr->copied, ktime_to_us(pthr->duration),
div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
}
clear_bit_unlock(0, &perf->busy_flag);
return 0;
}
static void perf_init_threads(struct perf_ctx *perf)
{
struct perf_thread *pthr;
int tidx;
perf->tcnt = DEF_THREADS_CNT;
perf->test_peer = &perf->peers[0];
init_waitqueue_head(&perf->twait);
for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
pthr = &perf->threads[tidx];
pthr->perf = perf;
pthr->tidx = tidx;
pthr->status = -ENODATA;
init_waitqueue_head(&pthr->dma_wait);
INIT_WORK(&pthr->work, perf_thread_work);
}
}
static void perf_clear_threads(struct perf_ctx *perf)
{
perf_terminate_test(perf);
}
/*==============================================================================
* DebugFS nodes
*==============================================================================
*/
static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct perf_ctx *perf = filep->private_data;
struct perf_peer *peer;
size_t buf_size;
ssize_t pos = 0;
int ret, pidx;
char *buf;
buf_size = min_t(size_t, size, 0x1000U);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pos += scnprintf(buf + pos, buf_size - pos,
" Performance measuring tool info:\n\n");
pos += scnprintf(buf + pos, buf_size - pos,
"Local port %d, Global index %d\n", ntb_port_number(perf->ntb),
perf->gidx);
pos += scnprintf(buf + pos, buf_size - pos, "Test status: ");
if (test_bit(0, &perf->busy_flag)) {
pos += scnprintf(buf + pos, buf_size - pos,
"on-fly with port %d (%d)\n",
ntb_peer_port_number(perf->ntb, perf->test_peer->pidx),
perf->test_peer->pidx);
} else {
pos += scnprintf(buf + pos, buf_size - pos, "idle\n");
}
for (pidx = 0; pidx < perf->pcnt; pidx++) {
peer = &perf->peers[pidx];
pos += scnprintf(buf + pos, buf_size - pos,
"Port %d (%d), Global index %d:\n",
ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
peer->gidx);
pos += scnprintf(buf + pos, buf_size - pos,
"\tLink status: %s\n",
test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer size %pa\n", &peer->outbuf_size);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
if (!peer->inbuf) {
pos += scnprintf(buf + pos, buf_size - pos,
"\tIn buffer addr: unallocated\n");
continue;
}
pos += scnprintf(buf + pos, buf_size - pos,
"\tIn buffer addr 0x%pK\n", peer->inbuf);
pos += scnprintf(buf + pos, buf_size - pos,
"\tIn buffer size %pa\n", &peer->inbuf_size);
pos += scnprintf(buf + pos, buf_size - pos,
"\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
}
ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
kfree(buf);
return ret;
}
static const struct file_operations perf_dbgfs_info = {
.open = simple_open,
.read = perf_dbgfs_read_info
};
static ssize_t perf_dbgfs_read_run(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct perf_ctx *perf = filep->private_data;
ssize_t ret, pos = 0;
char *buf;
buf = kmalloc(PERF_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos);
if (ret)
goto err_free;
ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
err_free:
kfree(buf);
return ret;
}
static ssize_t perf_dbgfs_write_run(struct file *filep, const char __user *ubuf,
size_t size, loff_t *offp)
{
struct perf_ctx *perf = filep->private_data;
struct perf_peer *peer;
int pidx, ret;
ret = kstrtoint_from_user(ubuf, size, 0, &pidx);
if (ret)
return ret;
if (pidx < 0 || pidx >= perf->pcnt)
return -EINVAL;
peer = &perf->peers[pidx];
ret = perf_submit_test(peer);
if (ret)
return ret;
return size;
}
static const struct file_operations perf_dbgfs_run = {
.open = simple_open,
.read = perf_dbgfs_read_run,
.write = perf_dbgfs_write_run
};
static ssize_t perf_dbgfs_read_tcnt(struct file *filep, char __user *ubuf,
size_t size, loff_t *offp)
{
struct perf_ctx *perf = filep->private_data;
char buf[8];
ssize_t pos;
pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt);
return simple_read_from_buffer(ubuf, size, offp, buf, pos);
}
static ssize_t perf_dbgfs_write_tcnt(struct file *filep,
const char __user *ubuf,
size_t size, loff_t *offp)
{
struct perf_ctx *perf = filep->private_data;
int ret;
u8 val;
ret = kstrtou8_from_user(ubuf, size, 0, &val);
if (ret)
return ret;
ret = perf_set_tcnt(perf, val);
if (ret)
return ret;
return size;
}
static const struct file_operations perf_dbgfs_tcnt = {
.open = simple_open,
.read = perf_dbgfs_read_tcnt,
.write = perf_dbgfs_write_tcnt
};
static void perf_setup_dbgfs(struct perf_ctx *perf)
{
struct pci_dev *pdev = perf->ntb->pdev;
perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
if (IS_ERR(perf->dbgfs_dir)) {
dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
return;
}
debugfs_create_file("info", 0600, perf->dbgfs_dir, perf,
&perf_dbgfs_info);
debugfs_create_file("run", 0600, perf->dbgfs_dir, perf,
&perf_dbgfs_run);
debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf,
&perf_dbgfs_tcnt);
/* They are made read-only for test exec safety and integrity */
debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order);
debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order);
debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma);
}
static void perf_clear_dbgfs(struct perf_ctx *perf)
{
debugfs_remove_recursive(perf->dbgfs_dir);
}
/*==============================================================================
* Basic driver initialization
*==============================================================================
*/
static struct perf_ctx *perf_create_data(struct ntb_dev *ntb)
{
struct perf_ctx *perf;
perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL);
if (!perf)
return ERR_PTR(-ENOMEM);
perf->pcnt = ntb_peer_port_count(ntb);
perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers),
GFP_KERNEL);
if (!perf->peers)
return ERR_PTR(-ENOMEM);
perf->ntb = ntb;
return perf;
}
static int perf_setup_peer_mw(struct perf_peer *peer)
{
struct perf_ctx *perf = peer->perf;
phys_addr_t phys_addr;
int ret;
/* Get outbound MW parameters and map it */
ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
&peer->outbuf_size);
if (ret)
return ret;
peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
peer->outbuf_size);
if (!peer->outbuf)
return -ENOMEM;
peer->out_phys_addr = phys_addr;
if (max_mw_size && peer->outbuf_size > max_mw_size) {
peer->outbuf_size = max_mw_size;
dev_warn(&peer->perf->ntb->dev,
"Peer %d outbuf reduced to %pa\n", peer->pidx,
&peer->outbuf_size);
}
return 0;
}
static int perf_init_peers(struct perf_ctx *perf)
{
struct perf_peer *peer;
int pidx, lport, ret;
lport = ntb_port_number(perf->ntb);
perf->gidx = -1;
for (pidx = 0; pidx < perf->pcnt; pidx++) {
peer = &perf->peers[pidx];
peer->perf = perf;
peer->pidx = pidx;
if (lport < ntb_peer_port_number(perf->ntb, pidx)) {
if (perf->gidx == -1)
perf->gidx = pidx;
peer->gidx = pidx + 1;
} else {
peer->gidx = pidx;
}
INIT_WORK(&peer->service, perf_service_work);
init_completion(&peer->init_comp);
}
if (perf->gidx == -1)
perf->gidx = pidx;
/*
* Hardware with only two ports may not have unique port
* numbers. In this case, the gidxs should all be zero.
*/
if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 &&
ntb_peer_port_number(perf->ntb, 0) == 0) {
perf->gidx = 0;
perf->peers[0].gidx = 0;
}
for (pidx = 0; pidx < perf->pcnt; pidx++) {
ret = perf_setup_peer_mw(&perf->peers[pidx]);
if (ret)
return ret;
}
dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx);
return 0;
}
static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
struct perf_ctx *perf;
int ret;
perf = perf_create_data(ntb);
if (IS_ERR(perf))
return PTR_ERR(perf);
ret = perf_init_peers(perf);
if (ret)
return ret;
perf_init_threads(perf);
ret = perf_init_service(perf);
if (ret)
return ret;
ret = perf_enable_service(perf);
if (ret)
return ret;
perf_setup_dbgfs(perf);
return 0;
}
static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
{
struct perf_ctx *perf = ntb->ctx;
perf_clear_dbgfs(perf);
perf_disable_service(perf);
perf_clear_threads(perf);
}
static struct ntb_client perf_client = {
.ops = {
.probe = perf_probe,
.remove = perf_remove
}
};
static int __init perf_init(void)
{
int ret;
if (chunk_order > MAX_CHUNK_ORDER) {
chunk_order = MAX_CHUNK_ORDER;
pr_info("Chunk order reduced to %hhu\n", chunk_order);
}
if (total_order < chunk_order) {
total_order = chunk_order;
pr_info("Total data order reduced to %hhu\n", total_order);
}
perf_wq = alloc_workqueue("perf_wq", WQ_UNBOUND | WQ_SYSFS, 0);
if (!perf_wq)
return -ENOMEM;
if (debugfs_initialized())
perf_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = ntb_register_client(&perf_client);
if (ret) {
debugfs_remove_recursive(perf_dbgfs_topdir);
destroy_workqueue(perf_wq);
}
return ret;
}
module_init(perf_init);
static void __exit perf_exit(void)
{
ntb_unregister_client(&perf_client);
debugfs_remove_recursive(perf_dbgfs_topdir);
destroy_workqueue(perf_wq);
}
module_exit(perf_exit);
| linux-master | drivers/ntb/test/ntb_perf.c |
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2017 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2017 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copy
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* PCIe NTB Pingpong Linux driver
*/
/*
* How to use this tool, by example.
*
* Assuming $DBG_DIR is something like:
* '/sys/kernel/debug/ntb_perf/0000:00:03.0'
* Suppose aside from local device there is at least one remote device
* connected to NTB with index 0.
*-----------------------------------------------------------------------------
* Eg: install driver with specified delay between doorbell event and response
*
* root@self# insmod ntb_pingpong.ko delay_ms=1000
*-----------------------------------------------------------------------------
* Eg: get number of ping-pong cycles performed
*
* root@self# cat $DBG_DIR/count
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/hrtimer.h>
#include <linux/debugfs.h>
#include <linux/ntb.h>
#define DRIVER_NAME "ntb_pingpong"
#define DRIVER_VERSION "2.0"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR("Allen Hubbe <[email protected]>");
MODULE_DESCRIPTION("PCIe NTB Simple Pingpong Client");
static unsigned int unsafe;
module_param(unsafe, uint, 0644);
MODULE_PARM_DESC(unsafe, "Run even though ntb operations may be unsafe");
static unsigned int delay_ms = 1000;
module_param(delay_ms, uint, 0644);
MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
struct pp_ctx {
struct ntb_dev *ntb;
struct hrtimer timer;
u64 in_db;
u64 out_db;
int out_pidx;
u64 nmask;
u64 pmask;
atomic_t count;
spinlock_t lock;
struct dentry *dbgfs_dir;
};
#define to_pp_timer(__timer) \
container_of(__timer, struct pp_ctx, timer)
static struct dentry *pp_dbgfs_topdir;
static int pp_find_next_peer(struct pp_ctx *pp)
{
u64 link, out_db;
int pidx;
link = ntb_link_is_up(pp->ntb, NULL, NULL);
/* Find next available peer */
if (link & pp->nmask)
pidx = __ffs64(link & pp->nmask);
else if (link & pp->pmask)
pidx = __ffs64(link & pp->pmask);
else
return -ENODEV;
out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx));
spin_lock(&pp->lock);
pp->out_pidx = pidx;
pp->out_db = out_db;
spin_unlock(&pp->lock);
return 0;
}
static void pp_setup(struct pp_ctx *pp)
{
int ret;
ntb_db_set_mask(pp->ntb, pp->in_db);
hrtimer_cancel(&pp->timer);
ret = pp_find_next_peer(pp);
if (ret == -ENODEV) {
dev_dbg(&pp->ntb->dev, "Got no peers, so cancel\n");
return;
}
dev_dbg(&pp->ntb->dev, "Ping-pong started with port %d, db %#llx\n",
ntb_peer_port_number(pp->ntb, pp->out_pidx), pp->out_db);
hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
}
static void pp_clear(struct pp_ctx *pp)
{
hrtimer_cancel(&pp->timer);
ntb_db_set_mask(pp->ntb, pp->in_db);
dev_dbg(&pp->ntb->dev, "Ping-pong cancelled\n");
}
static void pp_ping(struct pp_ctx *pp)
{
u32 count;
count = atomic_read(&pp->count);
spin_lock(&pp->lock);
ntb_peer_spad_write(pp->ntb, pp->out_pidx, 0, count);
ntb_peer_msg_write(pp->ntb, pp->out_pidx, 0, count);
dev_dbg(&pp->ntb->dev, "Ping port %d spad %#x, msg %#x\n",
ntb_peer_port_number(pp->ntb, pp->out_pidx), count, count);
ntb_peer_db_set(pp->ntb, pp->out_db);
ntb_db_clear_mask(pp->ntb, pp->in_db);
spin_unlock(&pp->lock);
}
static void pp_pong(struct pp_ctx *pp)
{
u32 msg_data, spad_data;
int pidx = 0;
/* Read pong data */
spad_data = ntb_spad_read(pp->ntb, 0);
msg_data = ntb_msg_read(pp->ntb, &pidx, 0);
ntb_msg_clear_sts(pp->ntb, -1);
/*
* Scratchpad and message data may differ, since message register can't
* be rewritten unless status is cleared. Additionally either of them
* might be unsupported
*/
dev_dbg(&pp->ntb->dev, "Pong spad %#x, msg %#x (port %d)\n",
spad_data, msg_data, ntb_peer_port_number(pp->ntb, pidx));
atomic_inc(&pp->count);
ntb_db_set_mask(pp->ntb, pp->in_db);
ntb_db_clear(pp->ntb, pp->in_db);
hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
}
static enum hrtimer_restart pp_timer_func(struct hrtimer *t)
{
struct pp_ctx *pp = to_pp_timer(t);
pp_ping(pp);
return HRTIMER_NORESTART;
}
static void pp_link_event(void *ctx)
{
struct pp_ctx *pp = ctx;
pp_setup(pp);
}
static void pp_db_event(void *ctx, int vec)
{
struct pp_ctx *pp = ctx;
pp_pong(pp);
}
static const struct ntb_ctx_ops pp_ops = {
.link_event = pp_link_event,
.db_event = pp_db_event
};
static int pp_check_ntb(struct ntb_dev *ntb)
{
u64 pmask;
if (ntb_db_is_unsafe(ntb)) {
dev_dbg(&ntb->dev, "Doorbell is unsafe\n");
if (!unsafe)
return -EINVAL;
}
if (ntb_spad_is_unsafe(ntb)) {
dev_dbg(&ntb->dev, "Scratchpad is unsafe\n");
if (!unsafe)
return -EINVAL;
}
pmask = GENMASK_ULL(ntb_peer_port_count(ntb), 0);
if ((ntb_db_valid_mask(ntb) & pmask) != pmask) {
dev_err(&ntb->dev, "Unsupported DB configuration\n");
return -EINVAL;
}
if (ntb_spad_count(ntb) < 1 && ntb_msg_count(ntb) < 1) {
dev_err(&ntb->dev, "Scratchpads and messages unsupported\n");
return -EINVAL;
} else if (ntb_spad_count(ntb) < 1) {
dev_dbg(&ntb->dev, "Scratchpads unsupported\n");
} else if (ntb_msg_count(ntb) < 1) {
dev_dbg(&ntb->dev, "Messages unsupported\n");
}
return 0;
}
static struct pp_ctx *pp_create_data(struct ntb_dev *ntb)
{
struct pp_ctx *pp;
pp = devm_kzalloc(&ntb->dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return ERR_PTR(-ENOMEM);
pp->ntb = ntb;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->lock);
hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pp->timer.function = pp_timer_func;
return pp;
}
static void pp_init_flds(struct pp_ctx *pp)
{
int pidx, lport, pcnt;
/* Find global port index */
lport = ntb_port_number(pp->ntb);
pcnt = ntb_peer_port_count(pp->ntb);
for (pidx = 0; pidx < pcnt; pidx++) {
if (lport < ntb_peer_port_number(pp->ntb, pidx))
break;
}
pp->in_db = BIT_ULL(lport);
pp->pmask = GENMASK_ULL(pidx, 0) >> 1;
pp->nmask = GENMASK_ULL(pcnt - 1, pidx);
dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n",
pp->in_db, pp->pmask, pp->nmask);
}
static int pp_mask_events(struct pp_ctx *pp)
{
u64 db_mask, msg_mask;
int ret;
db_mask = ntb_db_valid_mask(pp->ntb);
ret = ntb_db_set_mask(pp->ntb, db_mask);
if (ret)
return ret;
/* Skip message events masking if unsupported */
if (ntb_msg_count(pp->ntb) < 1)
return 0;
msg_mask = ntb_msg_outbits(pp->ntb) | ntb_msg_inbits(pp->ntb);
return ntb_msg_set_mask(pp->ntb, msg_mask);
}
static int pp_setup_ctx(struct pp_ctx *pp)
{
int ret;
ret = ntb_set_ctx(pp->ntb, pp, &pp_ops);
if (ret)
return ret;
ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
/* Might be not necessary */
ntb_link_event(pp->ntb);
return 0;
}
static void pp_clear_ctx(struct pp_ctx *pp)
{
ntb_link_disable(pp->ntb);
ntb_clear_ctx(pp->ntb);
}
static void pp_setup_dbgfs(struct pp_ctx *pp)
{
struct pci_dev *pdev = pp->ntb->pdev;
pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir);
debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
}
static void pp_clear_dbgfs(struct pp_ctx *pp)
{
debugfs_remove_recursive(pp->dbgfs_dir);
}
static int pp_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
struct pp_ctx *pp;
int ret;
ret = pp_check_ntb(ntb);
if (ret)
return ret;
pp = pp_create_data(ntb);
if (IS_ERR(pp))
return PTR_ERR(pp);
pp_init_flds(pp);
ret = pp_mask_events(pp);
if (ret)
return ret;
ret = pp_setup_ctx(pp);
if (ret)
return ret;
pp_setup_dbgfs(pp);
return 0;
}
static void pp_remove(struct ntb_client *client, struct ntb_dev *ntb)
{
struct pp_ctx *pp = ntb->ctx;
pp_clear_dbgfs(pp);
pp_clear_ctx(pp);
pp_clear(pp);
}
static struct ntb_client pp_client = {
.ops = {
.probe = pp_probe,
.remove = pp_remove
}
};
static int __init pp_init(void)
{
int ret;
if (debugfs_initialized())
pp_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = ntb_register_client(&pp_client);
if (ret)
debugfs_remove_recursive(pp_dbgfs_topdir);
return ret;
}
module_init(pp_init);
static void __exit pp_exit(void)
{
ntb_unregister_client(&pp_client);
debugfs_remove_recursive(pp_dbgfs_topdir);
}
module_exit(pp_exit);
| linux-master | drivers/ntb/test/ntb_pingpong.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/ntb.h>
#include <linux/pci.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION("0.1");
MODULE_AUTHOR("Logan Gunthorpe <[email protected]>");
MODULE_DESCRIPTION("Test for sending MSI interrupts over an NTB memory window");
static int num_irqs = 4;
module_param(num_irqs, int, 0644);
MODULE_PARM_DESC(num_irqs, "number of irqs to use");
struct ntb_msit_ctx {
struct ntb_dev *ntb;
struct dentry *dbgfs_dir;
struct work_struct setup_work;
struct ntb_msit_isr_ctx {
int irq_idx;
int irq_num;
int occurrences;
struct ntb_msit_ctx *nm;
struct ntb_msi_desc desc;
} *isr_ctx;
struct ntb_msit_peer {
struct ntb_msit_ctx *nm;
int pidx;
int num_irqs;
struct completion init_comp;
struct ntb_msi_desc *msi_desc;
} peers[];
};
static struct dentry *ntb_msit_dbgfs_topdir;
static irqreturn_t ntb_msit_isr(int irq, void *dev)
{
struct ntb_msit_isr_ctx *isr_ctx = dev;
struct ntb_msit_ctx *nm = isr_ctx->nm;
dev_dbg(&nm->ntb->dev, "Interrupt Occurred: %d",
isr_ctx->irq_idx);
isr_ctx->occurrences++;
return IRQ_HANDLED;
}
static void ntb_msit_setup_work(struct work_struct *work)
{
struct ntb_msit_ctx *nm = container_of(work, struct ntb_msit_ctx,
setup_work);
int irq_count = 0;
int irq;
int ret;
uintptr_t i;
ret = ntb_msi_setup_mws(nm->ntb);
if (ret) {
dev_err(&nm->ntb->dev, "Unable to setup MSI windows: %d\n",
ret);
return;
}
for (i = 0; i < num_irqs; i++) {
nm->isr_ctx[i].irq_idx = i;
nm->isr_ctx[i].nm = nm;
if (!nm->isr_ctx[i].irq_num) {
irq = ntbm_msi_request_irq(nm->ntb, ntb_msit_isr,
KBUILD_MODNAME,
&nm->isr_ctx[i],
&nm->isr_ctx[i].desc);
if (irq < 0)
break;
nm->isr_ctx[i].irq_num = irq;
}
ret = ntb_spad_write(nm->ntb, 2 * i + 1,
nm->isr_ctx[i].desc.addr_offset);
if (ret)
break;
ret = ntb_spad_write(nm->ntb, 2 * i + 2,
nm->isr_ctx[i].desc.data);
if (ret)
break;
irq_count++;
}
ntb_spad_write(nm->ntb, 0, irq_count);
ntb_peer_db_set(nm->ntb, BIT(ntb_port_number(nm->ntb)));
}
static void ntb_msit_desc_changed(void *ctx)
{
struct ntb_msit_ctx *nm = ctx;
int i;
dev_dbg(&nm->ntb->dev, "MSI Descriptors Changed\n");
for (i = 0; i < num_irqs; i++) {
ntb_spad_write(nm->ntb, 2 * i + 1,
nm->isr_ctx[i].desc.addr_offset);
ntb_spad_write(nm->ntb, 2 * i + 2,
nm->isr_ctx[i].desc.data);
}
ntb_peer_db_set(nm->ntb, BIT(ntb_port_number(nm->ntb)));
}
static void ntb_msit_link_event(void *ctx)
{
struct ntb_msit_ctx *nm = ctx;
if (!ntb_link_is_up(nm->ntb, NULL, NULL))
return;
schedule_work(&nm->setup_work);
}
static void ntb_msit_copy_peer_desc(struct ntb_msit_ctx *nm, int peer)
{
int i;
struct ntb_msi_desc *desc = nm->peers[peer].msi_desc;
int irq_count = nm->peers[peer].num_irqs;
for (i = 0; i < irq_count; i++) {
desc[i].addr_offset = ntb_peer_spad_read(nm->ntb, peer,
2 * i + 1);
desc[i].data = ntb_peer_spad_read(nm->ntb, peer, 2 * i + 2);
}
dev_info(&nm->ntb->dev, "Found %d interrupts on peer %d\n",
irq_count, peer);
complete_all(&nm->peers[peer].init_comp);
}
static void ntb_msit_db_event(void *ctx, int vec)
{
struct ntb_msit_ctx *nm = ctx;
struct ntb_msi_desc *desc;
u64 peer_mask = ntb_db_read(nm->ntb);
u32 irq_count;
int peer;
ntb_db_clear(nm->ntb, peer_mask);
for (peer = 0; peer < sizeof(peer_mask) * 8; peer++) {
if (!(peer_mask & BIT(peer)))
continue;
irq_count = ntb_peer_spad_read(nm->ntb, peer, 0);
if (irq_count == -1)
continue;
desc = kcalloc(irq_count, sizeof(*desc), GFP_ATOMIC);
if (!desc)
continue;
kfree(nm->peers[peer].msi_desc);
nm->peers[peer].msi_desc = desc;
nm->peers[peer].num_irqs = irq_count;
ntb_msit_copy_peer_desc(nm, peer);
}
}
static const struct ntb_ctx_ops ntb_msit_ops = {
.link_event = ntb_msit_link_event,
.db_event = ntb_msit_db_event,
};
static int ntb_msit_dbgfs_trigger(void *data, u64 idx)
{
struct ntb_msit_peer *peer = data;
if (idx >= peer->num_irqs)
return -EINVAL;
dev_dbg(&peer->nm->ntb->dev, "trigger irq %llu on peer %u\n",
idx, peer->pidx);
return ntb_msi_peer_trigger(peer->nm->ntb, peer->pidx,
&peer->msi_desc[idx]);
}
DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_trigger_fops, NULL,
ntb_msit_dbgfs_trigger, "%llu\n");
static int ntb_msit_dbgfs_port_get(void *data, u64 *port)
{
struct ntb_msit_peer *peer = data;
*port = ntb_peer_port_number(peer->nm->ntb, peer->pidx);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_port_fops, ntb_msit_dbgfs_port_get,
NULL, "%llu\n");
static int ntb_msit_dbgfs_count_get(void *data, u64 *count)
{
struct ntb_msit_peer *peer = data;
*count = peer->num_irqs;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_count_fops, ntb_msit_dbgfs_count_get,
NULL, "%llu\n");
static int ntb_msit_dbgfs_ready_get(void *data, u64 *ready)
{
struct ntb_msit_peer *peer = data;
*ready = try_wait_for_completion(&peer->init_comp);
return 0;
}
static int ntb_msit_dbgfs_ready_set(void *data, u64 ready)
{
struct ntb_msit_peer *peer = data;
return wait_for_completion_interruptible(&peer->init_comp);
}
DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_ready_fops, ntb_msit_dbgfs_ready_get,
ntb_msit_dbgfs_ready_set, "%llu\n");
static int ntb_msit_dbgfs_occurrences_get(void *data, u64 *occurrences)
{
struct ntb_msit_isr_ctx *isr_ctx = data;
*occurrences = isr_ctx->occurrences;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_occurrences_fops,
ntb_msit_dbgfs_occurrences_get,
NULL, "%llu\n");
static int ntb_msit_dbgfs_local_port_get(void *data, u64 *port)
{
struct ntb_msit_ctx *nm = data;
*port = ntb_port_number(nm->ntb);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(ntb_msit_local_port_fops,
ntb_msit_dbgfs_local_port_get,
NULL, "%llu\n");
static void ntb_msit_create_dbgfs(struct ntb_msit_ctx *nm)
{
struct pci_dev *pdev = nm->ntb->pdev;
char buf[32];
int i;
struct dentry *peer_dir;
nm->dbgfs_dir = debugfs_create_dir(pci_name(pdev),
ntb_msit_dbgfs_topdir);
debugfs_create_file("port", 0400, nm->dbgfs_dir, nm,
&ntb_msit_local_port_fops);
for (i = 0; i < ntb_peer_port_count(nm->ntb); i++) {
nm->peers[i].pidx = i;
nm->peers[i].nm = nm;
init_completion(&nm->peers[i].init_comp);
snprintf(buf, sizeof(buf), "peer%d", i);
peer_dir = debugfs_create_dir(buf, nm->dbgfs_dir);
debugfs_create_file_unsafe("trigger", 0200, peer_dir,
&nm->peers[i],
&ntb_msit_trigger_fops);
debugfs_create_file_unsafe("port", 0400, peer_dir,
&nm->peers[i], &ntb_msit_port_fops);
debugfs_create_file_unsafe("count", 0400, peer_dir,
&nm->peers[i],
&ntb_msit_count_fops);
debugfs_create_file_unsafe("ready", 0600, peer_dir,
&nm->peers[i],
&ntb_msit_ready_fops);
}
for (i = 0; i < num_irqs; i++) {
snprintf(buf, sizeof(buf), "irq%d_occurrences", i);
debugfs_create_file_unsafe(buf, 0400, nm->dbgfs_dir,
&nm->isr_ctx[i],
&ntb_msit_occurrences_fops);
}
}
static void ntb_msit_remove_dbgfs(struct ntb_msit_ctx *nm)
{
debugfs_remove_recursive(nm->dbgfs_dir);
}
static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
struct ntb_msit_ctx *nm;
int peers;
int ret;
peers = ntb_peer_port_count(ntb);
if (peers <= 0)
return -EINVAL;
if (ntb_spad_is_unsafe(ntb) || ntb_spad_count(ntb) < 2 * num_irqs + 1) {
dev_err(&ntb->dev, "NTB MSI test requires at least %d spads for %d irqs\n",
2 * num_irqs + 1, num_irqs);
return -EFAULT;
}
ret = ntb_spad_write(ntb, 0, -1);
if (ret) {
dev_err(&ntb->dev, "Unable to write spads: %d\n", ret);
return ret;
}
ret = ntb_db_clear_mask(ntb, GENMASK(peers - 1, 0));
if (ret) {
dev_err(&ntb->dev, "Unable to clear doorbell mask: %d\n", ret);
return ret;
}
ret = ntb_msi_init(ntb, ntb_msit_desc_changed);
if (ret) {
dev_err(&ntb->dev, "Unable to initialize MSI library: %d\n",
ret);
return ret;
}
nm = devm_kzalloc(&ntb->dev, struct_size(nm, peers, peers), GFP_KERNEL);
if (!nm)
return -ENOMEM;
nm->isr_ctx = devm_kcalloc(&ntb->dev, num_irqs, sizeof(*nm->isr_ctx),
GFP_KERNEL);
if (!nm->isr_ctx)
return -ENOMEM;
INIT_WORK(&nm->setup_work, ntb_msit_setup_work);
nm->ntb = ntb;
ntb_msit_create_dbgfs(nm);
ret = ntb_set_ctx(ntb, nm, &ntb_msit_ops);
if (ret)
goto remove_dbgfs;
if (!nm->isr_ctx) {
ret = -ENOMEM;
goto remove_dbgfs;
}
ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
return 0;
remove_dbgfs:
ntb_msit_remove_dbgfs(nm);
devm_kfree(&ntb->dev, nm->isr_ctx);
devm_kfree(&ntb->dev, nm);
return ret;
}
static void ntb_msit_remove(struct ntb_client *client, struct ntb_dev *ntb)
{
struct ntb_msit_ctx *nm = ntb->ctx;
int i;
ntb_link_disable(ntb);
ntb_db_set_mask(ntb, ntb_db_valid_mask(ntb));
ntb_msi_clear_mws(ntb);
for (i = 0; i < ntb_peer_port_count(ntb); i++)
kfree(nm->peers[i].msi_desc);
ntb_clear_ctx(ntb);
ntb_msit_remove_dbgfs(nm);
}
static struct ntb_client ntb_msit_client = {
.ops = {
.probe = ntb_msit_probe,
.remove = ntb_msit_remove
}
};
static int __init ntb_msit_init(void)
{
int ret;
if (debugfs_initialized())
ntb_msit_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME,
NULL);
ret = ntb_register_client(&ntb_msit_client);
if (ret)
debugfs_remove_recursive(ntb_msit_dbgfs_topdir);
return ret;
}
module_init(ntb_msit_init);
static void __exit ntb_msit_exit(void)
{
ntb_unregister_client(&ntb_msit_client);
debugfs_remove_recursive(ntb_msit_dbgfs_topdir);
}
module_exit(ntb_msit_exit);
| linux-master | drivers/ntb/test/ntb_msi_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <drm/drm_accel.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_print.h>
static DEFINE_SPINLOCK(accel_minor_lock);
static struct idr accel_minors_idr;
static struct dentry *accel_debugfs_root;
static struct class *accel_class;
static struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
};
static char *accel_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "accel/%s", dev_name(dev));
}
static int accel_sysfs_init(void)
{
accel_class = class_create("accel");
if (IS_ERR(accel_class))
return PTR_ERR(accel_class);
accel_class->devnode = accel_devnode;
return 0;
}
static void accel_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(accel_class))
return;
class_destroy(accel_class);
accel_class = NULL;
}
static int accel_name_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
master = dev->master;
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
if (master && master->unique)
seq_printf(m, " master=%s", master->unique);
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_puts(m, "\n");
mutex_unlock(&dev->master_mutex);
return 0;
}
static const struct drm_info_list accel_debugfs_list[] = {
{"name", accel_name_info, 0}
};
#define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list)
/**
* accel_debugfs_init() - Initialize debugfs for accel minor
* @minor: Pointer to the drm_minor instance.
* @minor_id: The minor's id
*
* This function initializes the drm minor's debugfs members and creates
* a root directory for the minor in debugfs. It also creates common files
* for accelerators and calls the driver's debugfs init callback.
*/
void accel_debugfs_init(struct drm_minor *minor, int minor_id)
{
struct drm_device *dev = minor->dev;
char name[64];
INIT_LIST_HEAD(&minor->debugfs_list);
mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, accel_debugfs_root);
drm_debugfs_create_files(accel_debugfs_list, ACCEL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
if (dev->driver->debugfs_init)
dev->driver->debugfs_init(minor);
}
/**
* accel_set_device_instance_params() - Set some device parameters for accel device
* @kdev: Pointer to the device instance.
* @index: The minor's index
*
* This function creates the dev_t of the device using the accel major and
* the device's minor number. In addition, it sets the class and type of the
* device instance to the accel sysfs class and device type, respectively.
*/
void accel_set_device_instance_params(struct device *kdev, int index)
{
kdev->devt = MKDEV(ACCEL_MAJOR, index);
kdev->class = accel_class;
kdev->type = &accel_sysfs_device_minor;
}
/**
* accel_minor_alloc() - Allocates a new accel minor
*
* This function access the accel minors idr and allocates from it
* a new id to represent a new accel minor
*
* Return: A new id on success or error code in case idr_alloc failed
*/
int accel_minor_alloc(void)
{
unsigned long flags;
int r;
spin_lock_irqsave(&accel_minor_lock, flags);
r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
spin_unlock_irqrestore(&accel_minor_lock, flags);
return r;
}
/**
* accel_minor_remove() - Remove an accel minor
* @index: The minor id to remove.
*
* This function access the accel minors idr and removes from
* it the member with the id that is passed to this function.
*/
void accel_minor_remove(int index)
{
unsigned long flags;
spin_lock_irqsave(&accel_minor_lock, flags);
idr_remove(&accel_minors_idr, index);
spin_unlock_irqrestore(&accel_minor_lock, flags);
}
/**
* accel_minor_replace() - Replace minor pointer in accel minors idr.
* @minor: Pointer to the new minor.
* @index: The minor id to replace.
*
* This function access the accel minors idr structure and replaces the pointer
* that is associated with an existing id. Because the minor pointer can be
* NULL, we need to explicitly pass the index.
*
* Return: 0 for success, negative value for error
*/
void accel_minor_replace(struct drm_minor *minor, int index)
{
unsigned long flags;
spin_lock_irqsave(&accel_minor_lock, flags);
idr_replace(&accel_minors_idr, minor, index);
spin_unlock_irqrestore(&accel_minor_lock, flags);
}
/*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with accel_minor_release().
*
* The object can be only a drm_minor that represents an accel device.
*
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
*/
static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
{
struct drm_minor *minor;
unsigned long flags;
spin_lock_irqsave(&accel_minor_lock, flags);
minor = idr_find(&accel_minors_idr, minor_id);
if (minor)
drm_dev_get(minor->dev);
spin_unlock_irqrestore(&accel_minor_lock, flags);
if (!minor) {
return ERR_PTR(-ENODEV);
} else if (drm_dev_is_unplugged(minor->dev)) {
drm_dev_put(minor->dev);
return ERR_PTR(-ENODEV);
}
return minor;
}
static void accel_minor_release(struct drm_minor *minor)
{
drm_dev_put(minor->dev);
}
/**
* accel_open - open method for ACCEL file
* @inode: device inode
* @filp: file pointer.
*
* This function must be used by drivers as their &file_operations.open method.
* It looks up the correct ACCEL device and instantiates all the per-file
* resources for it. It also calls the &drm_driver.open driver callback.
*
* Return: 0 on success or negative errno value on failure.
*/
int accel_open(struct inode *inode, struct file *filp)
{
struct drm_device *dev;
struct drm_minor *minor;
int retcode;
minor = accel_minor_acquire(iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
dev = minor->dev;
atomic_fetch_inc(&dev->open_count);
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
return 0;
err_undo:
atomic_dec(&dev->open_count);
accel_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL_GPL(accel_open);
static int accel_stub_open(struct inode *inode, struct file *filp)
{
const struct file_operations *new_fops;
struct drm_minor *minor;
int err;
minor = accel_minor_acquire(iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
new_fops = fops_get(minor->dev->driver->fops);
if (!new_fops) {
err = -ENODEV;
goto out;
}
replace_fops(filp, new_fops);
if (filp->f_op->open)
err = filp->f_op->open(inode, filp);
else
err = 0;
out:
accel_minor_release(minor);
return err;
}
static const struct file_operations accel_stub_fops = {
.owner = THIS_MODULE,
.open = accel_stub_open,
.llseek = noop_llseek,
};
void accel_core_exit(void)
{
unregister_chrdev(ACCEL_MAJOR, "accel");
debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
idr_destroy(&accel_minors_idr);
}
int __init accel_core_init(void)
{
int ret;
idr_init(&accel_minors_idr);
ret = accel_sysfs_init();
if (ret < 0) {
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
goto error;
}
accel_debugfs_root = debugfs_create_dir("accel", NULL);
ret = register_chrdev(ACCEL_MAJOR, "accel", &accel_stub_fops);
if (ret < 0)
DRM_ERROR("Cannot register ACCEL major: %d\n", ret);
error:
/*
* Any cleanup due to errors will be done in drm_core_exit() that
* will call accel_core_exit()
*/
return ret;
}
| linux-master | drivers/accel/drm_accel.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/memblock.h>
#include <linux/mhi.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/sizes.h>
#include "mhi_controller.h"
#include "qaic.h"
#define MAX_RESET_TIME_SEC 25
static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
static struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
.num = 0,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_LOOPBACK",
.num = 1,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_SAHARA",
.num = 2,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_SAHARA",
.num = 3,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_DIAG",
.num = 4,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_DIAG",
.num = 5,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_SSR",
.num = 6,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_SSR",
.num = 7,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_QDSS",
.num = 8,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_QDSS",
.num = 9,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_CONTROL",
.num = 10,
.num_elements = 128,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_CONTROL",
.num = 11,
.num_elements = 128,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_LOGGING",
.num = 12,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_LOGGING",
.num = 13,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_STATUS",
.num = 14,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_STATUS",
.num = 15,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_TELEMETRY",
.num = 16,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_TELEMETRY",
.num = 17,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_DEBUG",
.num = 18,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_DEBUG",
.num = 19,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.name = "QAIC_TIMESYNC",
.num = 20,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
{
.num = 21,
.name = "QAIC_TIMESYNC",
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
.wake_capable = false,
},
};
static struct mhi_event_config aic100_events[] = {
{
.num_elements = 32,
.irq_moderation_ms = 0,
.irq = 0,
.channel = U32_MAX,
.priority = 1,
.mode = MHI_DB_BRST_DISABLE,
.data_type = MHI_ER_CTRL,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
};
static struct mhi_controller_config aic100_config = {
.max_channels = 128,
.timeout_ms = 0, /* controlled by mhi_timeout */
.buf_len = 0,
.num_channels = ARRAY_SIZE(aic100_channels),
.ch_cfg = aic100_channels,
.num_events = ARRAY_SIZE(aic100_events),
.event_cfg = aic100_events,
.use_bounce_buf = false,
.m2_no_db = false,
};
static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
{
u32 tmp = readl_relaxed(addr);
if (tmp == U32_MAX)
return -EIO;
*out = tmp;
return 0;
}
static void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 val)
{
writel_relaxed(val, addr);
}
static int mhi_runtime_get(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static void mhi_runtime_put(struct mhi_controller *mhi_cntrl)
{
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback reason)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_cntrl->cntrl_dev));
/* this event occurs in atomic context */
if (reason == MHI_CB_FATAL_ERROR)
pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
/* this event occurs in non-atomic context */
if (reason == MHI_CB_SYS_ERROR)
qaic_dev_reset_clean_local_state(qdev, true);
}
static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
{
u8 time_sec = 1;
int current_ee;
int ret;
/* Reset the device to bring the device in PBL EE */
mhi_soc_reset(mhi_cntrl);
/*
* Keep checking the execution environment(EE) after every 1 second
* interval.
*/
do {
msleep(1000);
current_ee = mhi_get_exec_env(mhi_cntrl);
} while (current_ee != MHI_EE_PBL && time_sec++ <= MAX_RESET_TIME_SEC);
/* If the device is in PBL EE retry power up */
if (current_ee == MHI_EE_PBL)
ret = mhi_async_power_up(mhi_cntrl);
else
ret = -EIO;
return ret;
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
int mhi_irq)
{
struct mhi_controller *mhi_cntrl;
int ret;
mhi_cntrl = devm_kzalloc(&pci_dev->dev, sizeof(*mhi_cntrl), GFP_KERNEL);
if (!mhi_cntrl)
return ERR_PTR(-ENOMEM);
mhi_cntrl->cntrl_dev = &pci_dev->dev;
/*
* Covers the entire possible physical ram region. Remote side is
* going to calculate a size of this range, so subtract 1 to prevent
* rollover.
*/
mhi_cntrl->iova_start = 0;
mhi_cntrl->iova_stop = PHYS_ADDR_MAX - 1;
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->read_reg = mhi_read_reg;
mhi_cntrl->write_reg = mhi_write_reg;
mhi_cntrl->regs = mhi_bar;
mhi_cntrl->reg_len = SZ_4K;
mhi_cntrl->nr_irqs = 1;
mhi_cntrl->irq = devm_kmalloc(&pci_dev->dev, sizeof(*mhi_cntrl->irq), GFP_KERNEL);
if (!mhi_cntrl->irq)
return ERR_PTR(-ENOMEM);
mhi_cntrl->irq[0] = mhi_irq;
mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
/* use latest configured timeout */
aic100_config.timeout_ms = mhi_timeout_ms;
ret = mhi_register_controller(mhi_cntrl, &aic100_config);
if (ret) {
pci_err(pci_dev, "mhi_register_controller failed %d\n", ret);
return ERR_PTR(ret);
}
ret = mhi_prepare_for_power_up(mhi_cntrl);
if (ret) {
pci_err(pci_dev, "mhi_prepare_for_power_up failed %d\n", ret);
goto prepare_power_up_fail;
}
ret = mhi_async_power_up(mhi_cntrl);
/*
* If EIO is returned it is possible that device is in SBL EE, which is
* undesired. SOC reset the device and try to power up again.
*/
if (ret == -EIO && MHI_EE_SBL == mhi_get_exec_env(mhi_cntrl)) {
pci_err(pci_dev, "Found device in SBL at MHI init. Attempting a reset.\n");
ret = mhi_reset_and_async_power_up(mhi_cntrl);
}
if (ret) {
pci_err(pci_dev, "mhi_async_power_up failed %d\n", ret);
goto power_up_fail;
}
return mhi_cntrl;
power_up_fail:
mhi_unprepare_after_power_down(mhi_cntrl);
prepare_power_up_fail:
mhi_unregister_controller(mhi_cntrl);
return ERR_PTR(ret);
}
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up)
{
mhi_power_down(mhi_cntrl, link_up);
mhi_unprepare_after_power_down(mhi_cntrl);
mhi_unregister_controller(mhi_cntrl);
}
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl)
{
mhi_power_down(mhi_cntrl, true);
}
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl)
{
struct pci_dev *pci_dev = container_of(mhi_cntrl->cntrl_dev, struct pci_dev, dev);
int ret;
ret = mhi_async_power_up(mhi_cntrl);
if (ret)
pci_err(pci_dev, "mhi_async_power_up failed after reset %d\n", ret);
}
| linux-master | drivers/accel/qaic/mhi_controller.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <asm/byteorder.h>
#include <linux/completion.h>
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <uapi/drm/qaic_accel.h>
#include "qaic.h"
#define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
#define QAIC_DBC_Q_GAP SZ_256
#define QAIC_DBC_Q_BUF_ALIGN SZ_4K
#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */
#define QAIC_WRAPPER_MAX_SIZE SZ_4K
#define QAIC_MHI_RETRY_WAIT_MS 100
#define QAIC_MHI_RETRY_MAX 20
static unsigned int control_resp_timeout_s = 60; /* 60 sec default */
module_param(control_resp_timeout_s, uint, 0600);
MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM");
struct manage_msg {
u32 len;
u32 count;
u8 data[];
};
/*
* wire encoding structures for the manage protocol.
* All fields are little endian on the wire
*/
struct wire_msg_hdr {
__le32 crc32; /* crc of everything following this field in the message */
__le32 magic_number;
__le32 sequence_number;
__le32 len; /* length of this message */
__le32 count; /* number of transactions in this message */
__le32 handle; /* unique id to track the resources consumed */
__le32 partition_id; /* partition id for the request (signed) */
__le32 padding; /* must be 0 */
} __packed;
struct wire_msg {
struct wire_msg_hdr hdr;
u8 data[];
} __packed;
struct wire_trans_hdr {
__le32 type;
__le32 len;
} __packed;
/* Each message sent from driver to device are organized in a list of wrapper_msg */
struct wrapper_msg {
struct list_head list;
struct kref ref_count;
u32 len; /* length of data to transfer */
struct wrapper_list *head;
union {
struct wire_msg msg;
struct wire_trans_hdr trans;
};
};
struct wrapper_list {
struct list_head list;
spinlock_t lock; /* Protects the list state during additions and removals */
};
struct wire_trans_passthrough {
struct wire_trans_hdr hdr;
u8 data[];
} __packed;
struct wire_addr_size_pair {
__le64 addr;
__le64 size;
} __packed;
struct wire_trans_dma_xfer {
struct wire_trans_hdr hdr;
__le32 tag;
__le32 count;
__le32 dma_chunk_id;
__le32 padding;
struct wire_addr_size_pair data[];
} __packed;
/* Initiated by device to continue the DMA xfer of a large piece of data */
struct wire_trans_dma_xfer_cont {
struct wire_trans_hdr hdr;
__le32 dma_chunk_id;
__le32 padding;
__le64 xferred_size;
} __packed;
struct wire_trans_activate_to_dev {
struct wire_trans_hdr hdr;
__le64 req_q_addr;
__le64 rsp_q_addr;
__le32 req_q_size;
__le32 rsp_q_size;
__le32 buf_len;
__le32 options; /* unused, but BIT(16) has meaning to the device */
} __packed;
struct wire_trans_activate_from_dev {
struct wire_trans_hdr hdr;
__le32 status;
__le32 dbc_id;
__le64 options; /* unused */
} __packed;
struct wire_trans_deactivate_from_dev {
struct wire_trans_hdr hdr;
__le32 status;
__le32 dbc_id;
} __packed;
struct wire_trans_terminate_to_dev {
struct wire_trans_hdr hdr;
__le32 handle;
__le32 padding;
} __packed;
struct wire_trans_terminate_from_dev {
struct wire_trans_hdr hdr;
__le32 status;
__le32 padding;
} __packed;
struct wire_trans_status_to_dev {
struct wire_trans_hdr hdr;
} __packed;
struct wire_trans_status_from_dev {
struct wire_trans_hdr hdr;
__le16 major;
__le16 minor;
__le32 status;
__le64 status_flags;
} __packed;
struct wire_trans_validate_part_to_dev {
struct wire_trans_hdr hdr;
__le32 part_id;
__le32 padding;
} __packed;
struct wire_trans_validate_part_from_dev {
struct wire_trans_hdr hdr;
__le32 status;
__le32 padding;
} __packed;
struct xfer_queue_elem {
/*
* Node in list of ongoing transfer request on control channel.
* Maintained by root device struct.
*/
struct list_head list;
/* Sequence number of this transfer request */
u32 seq_num;
/* This is used to wait on until completion of transfer request */
struct completion xfer_done;
/* Received data from device */
void *buf;
};
struct dma_xfer {
/* Node in list of DMA transfers which is used for cleanup */
struct list_head list;
/* SG table of memory used for DMA */
struct sg_table *sgt;
/* Array pages used for DMA */
struct page **page_list;
/* Number of pages used for DMA */
unsigned long nr_pages;
};
struct ioctl_resources {
/* List of all DMA transfers which is used later for cleanup */
struct list_head dma_xfers;
/* Base address of request queue which belongs to a DBC */
void *buf;
/*
* Base bus address of request queue which belongs to a DBC. Response
* queue base bus address can be calculated by adding size of request
* queue to base bus address of request queue.
*/
dma_addr_t dma_addr;
/* Total size of request queue and response queue in byte */
u32 total_size;
/* Total number of elements that can be queued in each of request and response queue */
u32 nelem;
/* Base address of response queue which belongs to a DBC */
void *rsp_q_base;
/* Status of the NNC message received */
u32 status;
/* DBC id of the DBC received from device */
u32 dbc_id;
/*
* DMA transfer request messages can be big in size and it may not be
* possible to send them in one shot. In such cases the messages are
* broken into chunks, this field stores ID of such chunks.
*/
u32 dma_chunk_id;
/* Total number of bytes transferred for a DMA xfer request */
u64 xferred_dma_size;
/* Header of transaction message received from user. Used during DMA xfer request. */
void *trans_hdr;
};
struct resp_work {
struct work_struct work;
struct qaic_device *qdev;
void *buf;
};
/*
* Since we're working with little endian messages, its useful to be able to
* increment without filling a whole line with conversions back and forth just
* to add one(1) to a message count.
*/
static __le32 incr_le32(__le32 val)
{
return cpu_to_le32(le32_to_cpu(val) + 1);
}
static u32 gen_crc(void *msg)
{
struct wrapper_list *wrappers = msg;
struct wrapper_msg *w;
u32 crc = ~0;
list_for_each_entry(w, &wrappers->list, list)
crc = crc32(crc, &w->msg, w->len);
return crc ^ ~0;
}
static u32 gen_crc_stub(void *msg)
{
return 0;
}
static bool valid_crc(void *msg)
{
struct wire_msg_hdr *hdr = msg;
bool ret;
u32 crc;
/*
* The output of this algorithm is always converted to the native
* endianness.
*/
crc = le32_to_cpu(hdr->crc32);
hdr->crc32 = 0;
ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc;
hdr->crc32 = cpu_to_le32(crc);
return ret;
}
static bool valid_crc_stub(void *msg)
{
return true;
}
static void free_wrapper(struct kref *ref)
{
struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count);
list_del(&wrapper->list);
kfree(wrapper);
}
static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources,
struct qaic_user *usr)
{
u32 dbc_id = resources->dbc_id;
if (resources->buf) {
wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use);
qdev->dbc[dbc_id].req_q_base = resources->buf;
qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base;
qdev->dbc[dbc_id].dma_addr = resources->dma_addr;
qdev->dbc[dbc_id].total_size = resources->total_size;
qdev->dbc[dbc_id].nelem = resources->nelem;
enable_dbc(qdev, dbc_id, usr);
qdev->dbc[dbc_id].in_use = true;
resources->buf = NULL;
}
}
static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources)
{
if (resources->buf)
dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf,
resources->dma_addr);
resources->buf = NULL;
}
static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources)
{
struct dma_xfer *xfer;
struct dma_xfer *x;
int i;
list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) {
dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
sg_free_table(xfer->sgt);
kfree(xfer->sgt);
for (i = 0; i < xfer->nr_pages; ++i)
put_page(xfer->page_list[i]);
kfree(xfer->page_list);
list_del(&xfer->list);
kfree(xfer);
}
}
static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size)
{
struct wrapper_msg *w = kzalloc(size, GFP_KERNEL);
if (!w)
return NULL;
list_add_tail(&w->list, &wrappers->list);
kref_init(&w->ref_count);
w->head = wrappers;
return w;
}
static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
u32 *user_len)
{
struct qaic_manage_trans_passthrough *in_trans = trans;
struct wire_trans_passthrough *out_trans;
struct wrapper_msg *trans_wrapper;
struct wrapper_msg *wrapper;
struct wire_msg *msg;
u32 msg_hdr_len;
wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (in_trans->hdr.len % 8 != 0)
return -EINVAL;
if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers,
offsetof(struct wrapper_msg, trans) + in_trans->hdr.len);
if (!trans_wrapper)
return -ENOMEM;
trans_wrapper->len = in_trans->hdr.len;
out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans;
memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr));
msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
msg->hdr.count = incr_le32(msg->hdr.count);
*user_len += in_trans->hdr.len;
out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV);
out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
return 0;
}
/* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
static int find_and_map_user_pages(struct qaic_device *qdev,
struct qaic_manage_trans_dma_xfer *in_trans,
struct ioctl_resources *resources, struct dma_xfer *xfer)
{
u64 xfer_start_addr, remaining, end, total;
unsigned long need_pages;
struct page **page_list;
unsigned long nr_pages;
struct sg_table *sgt;
int ret;
int i;
if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
return -EINVAL;
if (in_trans->size < resources->xferred_dma_size)
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
return 0;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;
total = remaining + offset_in_page(xfer_start_addr);
if (total >= SIZE_MAX)
return -EINVAL;
need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
nr_pages = need_pages;
while (1) {
page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN);
if (!page_list) {
nr_pages = nr_pages / 2;
if (!nr_pages)
return -ENOMEM;
} else {
break;
}
}
ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
if (ret < 0)
goto free_page_list;
if (ret != nr_pages) {
nr_pages = ret;
ret = -EFAULT;
goto put_pages;
}
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto put_pages;
}
ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
offset_in_page(xfer_start_addr),
remaining, GFP_KERNEL);
if (ret) {
ret = -ENOMEM;
goto free_sgt;
}
ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0);
if (ret)
goto free_table;
xfer->sgt = sgt;
xfer->page_list = page_list;
xfer->nr_pages = nr_pages;
return need_pages > nr_pages ? 1 : 0;
free_table:
sg_free_table(sgt);
free_sgt:
kfree(sgt);
put_pages:
for (i = 0; i < nr_pages; ++i)
put_page(page_list[i]);
free_page_list:
kfree(page_list);
return ret;
}
/* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size,
struct wire_trans_dma_xfer **out_trans)
{
struct wrapper_msg *trans_wrapper;
struct sg_table *sgt = xfer->sgt;
struct wire_addr_size_pair *asp;
struct scatterlist *sg;
struct wrapper_msg *w;
unsigned int dma_len;
u64 dma_chunk_len;
void *boundary;
int nents_dma;
int nents;
int i;
nents = sgt->nents;
nents_dma = nents;
*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
for_each_sgtable_sg(sgt, sg, i) {
*size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */
if (*size < SZ_1K) {
nents_dma = i;
break;
}
}
trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
if (!trans_wrapper)
return -ENOMEM;
*out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans;
asp = (*out_trans)->data;
boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE;
*size = 0;
dma_len = 0;
w = trans_wrapper;
dma_chunk_len = 0;
for_each_sg(sgt->sgl, sg, nents_dma, i) {
asp->size = cpu_to_le64(dma_len);
dma_chunk_len += dma_len;
if (dma_len) {
asp++;
if ((void *)asp + sizeof(*asp) > boundary) {
w->len = (void *)asp - (void *)&w->msg;
*size += w->len;
w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
if (!w)
return -ENOMEM;
boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE;
asp = (struct wire_addr_size_pair *)&w->msg;
}
}
asp->addr = cpu_to_le64(sg_dma_address(sg));
dma_len = sg_dma_len(sg);
}
/* finalize the last segment */
asp->size = cpu_to_le64(dma_len);
w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg;
*size += w->len;
dma_chunk_len += dma_len;
resources->xferred_dma_size += dma_chunk_len;
return nents_dma < nents ? 1 : 0;
}
static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
{
int i;
dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
sg_free_table(xfer->sgt);
kfree(xfer->sgt);
for (i = 0; i < xfer->nr_pages; ++i)
put_page(xfer->page_list[i]);
kfree(xfer->page_list);
}
static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr)
{
struct qaic_manage_trans_dma_xfer *in_trans = trans;
struct wire_trans_dma_xfer *out_trans;
struct wrapper_msg *wrapper;
struct dma_xfer *xfer;
struct wire_msg *msg;
bool need_cont_dma;
u32 msg_hdr_len;
u32 size;
int ret;
wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
/* There should be enough space to hold at least one ASP entry. */
if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOMEM;
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
if (!xfer)
return -ENOMEM;
ret = find_and_map_user_pages(qdev, in_trans, resources, xfer);
if (ret < 0)
goto free_xfer;
need_cont_dma = (bool)ret;
ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans);
if (ret < 0)
goto cleanup_xfer;
need_cont_dma = need_cont_dma || (bool)ret;
msg->hdr.len = cpu_to_le32(msg_hdr_len + size);
msg->hdr.count = incr_le32(msg->hdr.count);
out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
out_trans->hdr.len = cpu_to_le32(size);
out_trans->tag = cpu_to_le32(in_trans->tag);
out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) /
sizeof(struct wire_addr_size_pair));
*user_len += in_trans->hdr.len;
if (resources->dma_chunk_id) {
out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
} else if (need_cont_dma) {
while (resources->dma_chunk_id == 0)
resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id);
out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
}
resources->trans_hdr = trans;
list_add(&xfer->list, &resources->dma_xfers);
return 0;
cleanup_xfer:
cleanup_xfer(qdev, xfer);
free_xfer:
kfree(xfer);
return ret;
}
static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
u32 *user_len, struct ioctl_resources *resources)
{
struct qaic_manage_trans_activate_to_dev *in_trans = trans;
struct wire_trans_activate_to_dev *out_trans;
struct wrapper_msg *trans_wrapper;
struct wrapper_msg *wrapper;
struct wire_msg *msg;
dma_addr_t dma_addr;
u32 msg_hdr_len;
void *buf;
u32 nelem;
u32 size;
int ret;
wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
if (!in_trans->queue_size)
return -EINVAL;
if (in_trans->pad)
return -EINVAL;
nelem = in_trans->queue_size;
size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
return -EINVAL;
if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
return -EINVAL;
size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN);
buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL);
if (!buf)
return -ENOMEM;
trans_wrapper = add_wrapper(wrappers,
offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
if (!trans_wrapper) {
ret = -ENOMEM;
goto free_dma;
}
trans_wrapper->len = sizeof(*out_trans);
out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans;
out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV);
out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans));
out_trans->buf_len = cpu_to_le32(size);
out_trans->req_q_addr = cpu_to_le64(dma_addr);
out_trans->req_q_size = cpu_to_le32(nelem);
out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size());
out_trans->rsp_q_size = cpu_to_le32(nelem);
out_trans->options = cpu_to_le32(in_trans->options);
*user_len += in_trans->hdr.len;
msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans));
msg->hdr.count = incr_le32(msg->hdr.count);
resources->buf = buf;
resources->dma_addr = dma_addr;
resources->total_size = size;
resources->nelem = nelem;
resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size();
return 0;
free_dma:
dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr);
return ret;
}
static int encode_deactivate(struct qaic_device *qdev, void *trans,
u32 *user_len, struct qaic_user *usr)
{
struct qaic_manage_trans_deactivate *in_trans = trans;
if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad)
return -EINVAL;
*user_len += in_trans->hdr.len;
return disable_dbc(qdev, in_trans->dbc_id, usr);
}
static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
u32 *user_len)
{
struct qaic_manage_trans_status_to_dev *in_trans = trans;
struct wire_trans_status_to_dev *out_trans;
struct wrapper_msg *trans_wrapper;
struct wrapper_msg *wrapper;
struct wire_msg *msg;
u32 msg_hdr_len;
wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
if (!trans_wrapper)
return -ENOMEM;
trans_wrapper->len = sizeof(*out_trans);
out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans;
out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV);
out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
msg->hdr.count = incr_le32(msg->hdr.count);
*user_len += in_trans->hdr.len;
return 0;
}
static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
struct wrapper_list *wrappers, struct ioctl_resources *resources,
struct qaic_user *usr)
{
struct qaic_manage_trans_hdr *trans_hdr;
struct wrapper_msg *wrapper;
struct wire_msg *msg;
u32 user_len = 0;
int ret;
int i;
if (!user_msg->count ||
user_msg->len < sizeof(*trans_hdr)) {
ret = -EINVAL;
goto out;
}
wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
msg = &wrapper->msg;
msg->hdr.len = cpu_to_le32(sizeof(msg->hdr));
if (resources->dma_chunk_id) {
ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr);
msg->hdr.count = cpu_to_le32(1);
goto out;
}
for (i = 0; i < user_msg->count; ++i) {
if (user_len > user_msg->len - sizeof(*trans_hdr)) {
ret = -EINVAL;
break;
}
trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
if (trans_hdr->len < sizeof(trans_hdr) ||
size_add(user_len, trans_hdr->len) > user_msg->len) {
ret = -EINVAL;
break;
}
switch (trans_hdr->type) {
case QAIC_TRANS_PASSTHROUGH_FROM_USR:
ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len);
break;
case QAIC_TRANS_DMA_XFER_FROM_USR:
ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr);
break;
case QAIC_TRANS_ACTIVATE_FROM_USR:
ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources);
break;
case QAIC_TRANS_DEACTIVATE_FROM_USR:
ret = encode_deactivate(qdev, trans_hdr, &user_len, usr);
break;
case QAIC_TRANS_STATUS_FROM_USR:
ret = encode_status(qdev, trans_hdr, wrappers, &user_len);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
break;
}
if (user_len != user_msg->len)
ret = -EINVAL;
out:
if (ret) {
free_dma_xfers(qdev, resources);
free_dbc_buf(qdev, resources);
return ret;
}
return 0;
}
static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
u32 *msg_len)
{
struct qaic_manage_trans_passthrough *out_trans;
struct wire_trans_passthrough *in_trans = trans;
u32 len;
out_trans = (void *)user_msg->data + user_msg->len;
len = le32_to_cpu(in_trans->hdr.len);
if (len % 8 != 0)
return -EINVAL;
if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr));
user_msg->len += len;
*msg_len += len;
out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
out_trans->hdr.len = len;
return 0;
}
static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr)
{
struct qaic_manage_trans_activate_from_dev *out_trans;
struct wire_trans_activate_from_dev *in_trans = trans;
u32 len;
out_trans = (void *)user_msg->data + user_msg->len;
len = le32_to_cpu(in_trans->hdr.len);
if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
user_msg->len += len;
*msg_len += len;
out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
out_trans->hdr.len = len;
out_trans->status = le32_to_cpu(in_trans->status);
out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id);
out_trans->options = le64_to_cpu(in_trans->options);
if (!resources->buf)
/* how did we get an activate response without a request? */
return -EINVAL;
if (out_trans->dbc_id >= qdev->num_dbc)
/*
* The device assigned an invalid resource, which should never
* happen. Return an error so the user can try to recover.
*/
return -ENODEV;
if (out_trans->status)
/*
* Allocating resources failed on device side. This is not an
* expected behaviour, user is expected to handle this situation.
*/
return -ECANCELED;
resources->status = out_trans->status;
resources->dbc_id = out_trans->dbc_id;
save_dbc_buf(qdev, resources, usr);
return 0;
}
static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len,
struct qaic_user *usr)
{
struct wire_trans_deactivate_from_dev *in_trans = trans;
u32 dbc_id = le32_to_cpu(in_trans->dbc_id);
u32 status = le32_to_cpu(in_trans->status);
if (dbc_id >= qdev->num_dbc)
/*
* The device assigned an invalid resource, which should never
* happen. Inject an error so the user can try to recover.
*/
return -ENODEV;
if (status) {
/*
* Releasing resources failed on the device side, which puts
* us in a bind since they may still be in use, so enable the
* dbc. User is expected to retry deactivation.
*/
enable_dbc(qdev, dbc_id, usr);
return -ECANCELED;
}
release_dbc(qdev, dbc_id);
*msg_len += sizeof(*in_trans);
return 0;
}
static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
u32 *user_len, struct wire_msg *msg)
{
struct qaic_manage_trans_status_from_dev *out_trans;
struct wire_trans_status_from_dev *in_trans = trans;
u32 len;
out_trans = (void *)user_msg->data + user_msg->len;
len = le32_to_cpu(in_trans->hdr.len);
if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV;
out_trans->hdr.len = len;
out_trans->major = le16_to_cpu(in_trans->major);
out_trans->minor = le16_to_cpu(in_trans->minor);
out_trans->status_flags = le64_to_cpu(in_trans->status_flags);
out_trans->status = le32_to_cpu(in_trans->status);
*user_len += le32_to_cpu(in_trans->hdr.len);
user_msg->len += len;
if (out_trans->status)
return -ECANCELED;
if (out_trans->status_flags & BIT(0) && !valid_crc(msg))
return -EPIPE;
return 0;
}
static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
struct wire_msg *msg, struct ioctl_resources *resources,
struct qaic_user *usr)
{
u32 msg_hdr_len = le32_to_cpu(msg->hdr.len);
struct wire_trans_hdr *trans_hdr;
u32 msg_len = 0;
int ret;
int i;
if (msg_hdr_len < sizeof(*trans_hdr) ||
msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -EINVAL;
user_msg->len = 0;
user_msg->count = le32_to_cpu(msg->hdr.count);
for (i = 0; i < user_msg->count; ++i) {
u32 hdr_len;
if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
return -EINVAL;
trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
hdr_len = le32_to_cpu(trans_hdr->len);
if (hdr_len < sizeof(*trans_hdr) ||
size_add(msg_len, hdr_len) > msg_hdr_len)
return -EINVAL;
switch (le32_to_cpu(trans_hdr->type)) {
case QAIC_TRANS_PASSTHROUGH_FROM_DEV:
ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len);
break;
case QAIC_TRANS_ACTIVATE_FROM_DEV:
ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr);
break;
case QAIC_TRANS_DEACTIVATE_FROM_DEV:
ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr);
break;
case QAIC_TRANS_STATUS_FROM_DEV:
ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg);
break;
default:
return -EINVAL;
}
if (ret)
return ret;
}
if (msg_len != (msg_hdr_len - sizeof(msg->hdr)))
return -EINVAL;
return 0;
}
static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num,
bool ignore_signal)
{
struct xfer_queue_elem elem;
struct wire_msg *out_buf;
struct wrapper_msg *w;
long ret = -EAGAIN;
int xfer_count = 0;
int retry_count;
if (qdev->in_reset) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENODEV);
}
/* Attempt to avoid a partial commit of a message */
list_for_each_entry(w, &wrappers->list, list)
xfer_count++;
for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
ret = 0;
break;
}
msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
if (signal_pending(current))
break;
}
if (ret) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(ret);
}
elem.seq_num = seq_num;
elem.buf = NULL;
init_completion(&elem.xfer_done);
if (likely(!qdev->cntl_lost_buf)) {
/*
* The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
* The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
*/
out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
if (!out_buf) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENOMEM);
}
ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf,
QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT);
if (ret) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(ret);
}
} else {
/*
* we lost a buffer because we queued a recv buf, but then
* queuing the corresponding tx buf failed. To try to avoid
* a memory leak, lets reclaim it and use it for this
* transaction.
*/
qdev->cntl_lost_buf = false;
}
list_for_each_entry(w, &wrappers->list, list) {
kref_get(&w->ref_count);
retry_count = 0;
ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
if (ret) {
qdev->cntl_lost_buf = true;
kref_put(&w->ref_count, free_wrapper);
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(ret);
}
}
list_add_tail(&elem.list, &qdev->cntl_xfer_list);
mutex_unlock(&qdev->cntl_mutex);
if (ignore_signal)
ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ);
else
ret = wait_for_completion_interruptible_timeout(&elem.xfer_done,
control_resp_timeout_s * HZ);
/*
* not using _interruptable because we have to cleanup or we'll
* likely cause memory corruption
*/
mutex_lock(&qdev->cntl_mutex);
if (!list_empty(&elem.list))
list_del(&elem.list);
if (!ret && !elem.buf)
ret = -ETIMEDOUT;
else if (ret > 0 && !elem.buf)
ret = -EIO;
mutex_unlock(&qdev->cntl_mutex);
if (ret < 0) {
kfree(elem.buf);
return ERR_PTR(ret);
} else if (!qdev->valid_crc(elem.buf)) {
kfree(elem.buf);
return ERR_PTR(-EPIPE);
}
return elem.buf;
}
/* Add a transaction to abort the outstanding DMA continuation */
static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id)
{
struct wire_trans_dma_xfer *out_trans;
u32 size = sizeof(*out_trans);
struct wrapper_msg *wrapper;
struct wrapper_msg *w;
struct wire_msg *msg;
wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
msg = &wrapper->msg;
/* Remove all but the first wrapper which has the msg header */
list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
if (!list_is_first(&wrapper->list, &wrappers->list))
kref_put(&wrapper->ref_count, free_wrapper);
wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
if (!wrapper)
return -ENOMEM;
out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans;
out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
out_trans->hdr.len = cpu_to_le32(size);
out_trans->tag = cpu_to_le32(0);
out_trans->count = cpu_to_le32(0);
out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id);
msg->hdr.len = cpu_to_le32(size + sizeof(*msg));
msg->hdr.count = cpu_to_le32(1);
wrapper->len = size;
return 0;
}
static struct wrapper_list *alloc_wrapper_list(void)
{
struct wrapper_list *wrappers;
wrappers = kmalloc(sizeof(*wrappers), GFP_KERNEL);
if (!wrappers)
return NULL;
INIT_LIST_HEAD(&wrappers->list);
spin_lock_init(&wrappers->lock);
return wrappers;
}
static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr,
struct manage_msg *user_msg, struct ioctl_resources *resources,
struct wire_msg **rsp)
{
struct wrapper_list *wrappers;
struct wrapper_msg *wrapper;
struct wrapper_msg *w;
bool all_done = false;
struct wire_msg *msg;
int ret;
wrappers = alloc_wrapper_list();
if (!wrappers)
return -ENOMEM;
wrapper = add_wrapper(wrappers, sizeof(*wrapper));
if (!wrapper) {
kfree(wrappers);
return -ENOMEM;
}
msg = &wrapper->msg;
wrapper->len = sizeof(*msg);
ret = encode_message(qdev, user_msg, wrappers, resources, usr);
if (ret && resources->dma_chunk_id)
ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id);
if (ret)
goto encode_failed;
ret = mutex_lock_interruptible(&qdev->cntl_mutex);
if (ret)
goto lock_failed;
msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
if (usr) {
msg->hdr.handle = cpu_to_le32(usr->handle);
msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id);
} else {
msg->hdr.handle = 0;
msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION);
}
msg->hdr.padding = cpu_to_le32(0);
msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
/* msg_xfer releases the mutex */
*rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false);
if (IS_ERR(*rsp))
ret = PTR_ERR(*rsp);
lock_failed:
free_dma_xfers(qdev, resources);
encode_failed:
spin_lock(&wrappers->lock);
list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
kref_put(&wrapper->ref_count, free_wrapper);
all_done = list_empty(&wrappers->list);
spin_unlock(&wrappers->lock);
if (all_done)
kfree(wrappers);
return ret;
}
static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg)
{
struct wire_trans_dma_xfer_cont *dma_cont = NULL;
struct ioctl_resources resources;
struct wire_msg *rsp = NULL;
int ret;
memset(&resources, 0, sizeof(struct ioctl_resources));
INIT_LIST_HEAD(&resources.dma_xfers);
if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH ||
user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr))
return -EINVAL;
dma_xfer_continue:
ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp);
if (ret)
return ret;
/* dma_cont should be the only transaction if present */
if (le32_to_cpu(rsp->hdr.count) == 1) {
dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data;
if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT)
dma_cont = NULL;
}
if (dma_cont) {
if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id &&
le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) {
kfree(rsp);
goto dma_xfer_continue;
}
ret = -EINVAL;
goto dma_cont_failed;
}
ret = decode_message(qdev, user_msg, rsp, &resources, usr);
dma_cont_failed:
free_dbc_buf(qdev, &resources);
kfree(rsp);
return ret;
}
int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qaic_manage_msg *user_msg = data;
struct qaic_device *qdev;
struct manage_msg *msg;
struct qaic_user *usr;
u8 __user *user_data;
int qdev_rcu_id;
int usr_rcu_id;
int ret;
if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -EINVAL;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return -ENODEV;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return -ENODEV;
}
msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
}
msg->len = user_msg->len;
msg->count = user_msg->count;
user_data = u64_to_user_ptr(user_msg->data);
if (copy_from_user(msg->data, user_data, user_msg->len)) {
ret = -EFAULT;
goto free_msg;
}
ret = qaic_manage(qdev, usr, msg);
/*
* If the qaic_manage() is successful then we copy the message onto
* userspace memory but we have an exception for -ECANCELED.
* For -ECANCELED, it means that device has NACKed the message with a
* status error code which userspace would like to know.
*/
if (ret == -ECANCELED || !ret) {
if (copy_to_user(user_data, msg->data, msg->len)) {
ret = -EFAULT;
} else {
user_msg->len = msg->len;
user_msg->count = msg->count;
}
}
free_msg:
kfree(msg);
out:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return ret;
}
int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor)
{
struct qaic_manage_trans_status_from_dev *status_result;
struct qaic_manage_trans_status_to_dev *status_query;
struct manage_msg *user_msg;
int ret;
user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL);
if (!user_msg) {
ret = -ENOMEM;
goto out;
}
user_msg->len = sizeof(*status_query);
user_msg->count = 1;
status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data;
status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR;
status_query->hdr.len = sizeof(status_query->hdr);
ret = qaic_manage(qdev, usr, user_msg);
if (ret)
goto kfree_user_msg;
status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data;
*major = status_result->major;
*minor = status_result->minor;
if (status_result->status_flags & BIT(0)) { /* device is using CRC */
/* By default qdev->gen_crc is programmed to generate CRC */
qdev->valid_crc = valid_crc;
} else {
/* By default qdev->valid_crc is programmed to bypass CRC */
qdev->gen_crc = gen_crc_stub;
}
kfree_user_msg:
kfree(user_msg);
out:
return ret;
}
static void resp_worker(struct work_struct *work)
{
struct resp_work *resp = container_of(work, struct resp_work, work);
struct qaic_device *qdev = resp->qdev;
struct wire_msg *msg = resp->buf;
struct xfer_queue_elem *elem;
struct xfer_queue_elem *i;
bool found = false;
mutex_lock(&qdev->cntl_mutex);
list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) {
found = true;
list_del_init(&elem->list);
elem->buf = msg;
complete_all(&elem->xfer_done);
break;
}
}
mutex_unlock(&qdev->cntl_mutex);
if (!found)
/* request must have timed out, drop packet */
kfree(msg);
kfree(resp);
}
static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper)
{
bool all_done = false;
spin_lock(&wrappers->lock);
kref_put(&wrapper->ref_count, free_wrapper);
all_done = list_empty(&wrappers->list);
spin_unlock(&wrappers->lock);
if (all_done)
kfree(wrappers);
}
void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
struct wire_msg *msg = mhi_result->buf_addr;
struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg);
free_wrapper_from_list(wrapper->head, wrapper);
}
void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
struct wire_msg *msg = mhi_result->buf_addr;
struct resp_work *resp;
if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) {
kfree(msg);
return;
}
resp = kmalloc(sizeof(*resp), GFP_ATOMIC);
if (!resp) {
kfree(msg);
return;
}
INIT_WORK(&resp->work, resp_worker);
resp->qdev = qdev;
resp->buf = msg;
queue_work(qdev->cntl_wq, &resp->work);
}
int qaic_control_open(struct qaic_device *qdev)
{
if (!qdev->cntl_ch)
return -ENODEV;
qdev->cntl_lost_buf = false;
/*
* By default qaic should assume that device has CRC enabled.
* Qaic comes to know if device has CRC enabled or disabled during the
* device status transaction, which is the first transaction performed
* on control channel.
*
* So CRC validation of first device status transaction response is
* ignored (by calling valid_crc_stub) and is done later during decoding
* if device has CRC enabled.
* Now that qaic knows whether device has CRC enabled or not it acts
* accordingly.
*/
qdev->gen_crc = gen_crc;
qdev->valid_crc = valid_crc_stub;
return mhi_prepare_for_transfer(qdev->cntl_ch);
}
void qaic_control_close(struct qaic_device *qdev)
{
mhi_unprepare_from_transfer(qdev->cntl_ch);
}
void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr)
{
struct wire_trans_terminate_to_dev *trans;
struct wrapper_list *wrappers;
struct wrapper_msg *wrapper;
struct wire_msg *msg;
struct wire_msg *rsp;
wrappers = alloc_wrapper_list();
if (!wrappers)
return;
wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans));
if (!wrapper)
return;
msg = &wrapper->msg;
trans = (struct wire_trans_terminate_to_dev *)msg->data;
trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV);
trans->hdr.len = cpu_to_le32(sizeof(*trans));
trans->handle = cpu_to_le32(usr->handle);
mutex_lock(&qdev->cntl_mutex);
wrapper->len = sizeof(msg->hdr) + sizeof(*trans);
msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
msg->hdr.len = cpu_to_le32(wrapper->len);
msg->hdr.count = cpu_to_le32(1);
msg->hdr.handle = cpu_to_le32(usr->handle);
msg->hdr.padding = cpu_to_le32(0);
msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
/*
* msg_xfer releases the mutex
* We don't care about the return of msg_xfer since we will not do
* anything different based on what happens.
* We ignore pending signals since one will be set if the user is
* killed, and we need give the device a chance to cleanup, otherwise
* DMA may still be in progress when we return.
*/
rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true);
if (!IS_ERR(rsp))
kfree(rsp);
free_wrapper_from_list(wrappers, wrapper);
}
void wake_all_cntl(struct qaic_device *qdev)
{
struct xfer_queue_elem *elem;
struct xfer_queue_elem *i;
mutex_lock(&qdev->cntl_mutex);
list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
list_del_init(&elem->list);
complete_all(&elem->xfer_done);
}
mutex_unlock(&qdev->cntl_mutex);
}
| linux-master | drivers/accel/qaic/qaic_control.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <drm/drm_accel.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
#include <uapi/drm/qaic_accel.h>
#include "mhi_controller.h"
#include "qaic.h"
MODULE_IMPORT_NS(DMA_BUF);
#define PCI_DEV_AIC100 0xa100
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
#define CNTL_MAJOR 5
#define CNTL_MINOR 0
bool datapath_polling;
module_param(datapath_polling, bool, 0400);
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
cleanup_srcu_struct(&usr->qddev_lock);
ida_free(&qaic_usrs, usr->handle);
kfree(usr);
}
static int qaic_open(struct drm_device *dev, struct drm_file *file)
{
struct qaic_drm_device *qddev = dev->dev_private;
struct qaic_device *qdev = qddev->qdev;
struct qaic_user *usr;
int rcu_id;
int ret;
rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto dev_unlock;
}
usr = kmalloc(sizeof(*usr), GFP_KERNEL);
if (!usr) {
ret = -ENOMEM;
goto dev_unlock;
}
usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL);
if (usr->handle < 0) {
ret = usr->handle;
goto free_usr;
}
usr->qddev = qddev;
atomic_set(&usr->chunk_id, 0);
init_srcu_struct(&usr->qddev_lock);
kref_init(&usr->ref_count);
ret = mutex_lock_interruptible(&qddev->users_mutex);
if (ret)
goto cleanup_usr;
list_add(&usr->node, &qddev->users);
mutex_unlock(&qddev->users_mutex);
file->driver_priv = usr;
srcu_read_unlock(&qdev->dev_lock, rcu_id);
return 0;
cleanup_usr:
cleanup_srcu_struct(&usr->qddev_lock);
ida_free(&qaic_usrs, usr->handle);
free_usr:
kfree(usr);
dev_unlock:
srcu_read_unlock(&qdev->dev_lock, rcu_id);
return ret;
}
static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
{
struct qaic_user *usr = file->driver_priv;
struct qaic_drm_device *qddev;
struct qaic_device *qdev;
int qdev_rcu_id;
int usr_rcu_id;
int i;
qddev = usr->qddev;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (qddev) {
qdev = qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (!qdev->in_reset) {
qaic_release_usr(qdev, usr);
for (i = 0; i < qdev->num_dbc; ++i)
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
release_dbc(qdev, i);
}
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
mutex_lock(&qddev->users_mutex);
if (!list_empty(&usr->node))
list_del_init(&usr->node);
mutex_unlock(&qddev->users_mutex);
}
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
kref_put(&usr->ref_count, free_usr);
file->driver_priv = NULL;
}
DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops);
static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
};
static const struct drm_driver qaic_accel_driver = {
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
.name = QAIC_NAME,
.desc = QAIC_DESC,
.date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
.postclose = qaic_postclose,
.ioctls = qaic_drm_ioctls,
.num_ioctls = ARRAY_SIZE(qaic_drm_ioctls),
.gem_prime_import = qaic_gem_prime_import,
};
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
{
struct qaic_drm_device *qddev;
struct drm_device *ddev;
struct device *pdev;
int ret;
/* Hold off implementing partitions until the uapi is determined */
if (partition_id != QAIC_NO_PARTITION)
return -EINVAL;
pdev = &qdev->pdev->dev;
qddev = kzalloc(sizeof(*qddev), GFP_KERNEL);
if (!qddev)
return -ENOMEM;
ddev = drm_dev_alloc(&qaic_accel_driver, pdev);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto ddev_fail;
}
ddev->dev_private = qddev;
qddev->ddev = ddev;
qddev->qdev = qdev;
qddev->partition_id = partition_id;
INIT_LIST_HEAD(&qddev->users);
mutex_init(&qddev->users_mutex);
qdev->qddev = qddev;
ret = drm_dev_register(ddev, 0);
if (ret) {
pci_dbg(qdev->pdev, "%s: drm_dev_register failed %d\n", __func__, ret);
goto drm_reg_fail;
}
return 0;
drm_reg_fail:
mutex_destroy(&qddev->users_mutex);
qdev->qddev = NULL;
drm_dev_put(ddev);
ddev_fail:
kfree(qddev);
return ret;
}
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
{
struct qaic_drm_device *qddev;
struct qaic_user *usr;
qddev = qdev->qddev;
qdev->qddev = NULL;
if (!qddev)
return;
/*
* Existing users get unresolvable errors till they close FDs.
* Need to sync carefully with users calling close(). The
* list of users can be modified elsewhere when the lock isn't
* held here, but the sync'ing the srcu with the mutex held
* could deadlock. Grab the mutex so that the list will be
* unmodified. The user we get will exist as long as the
* lock is held. Signal that the qcdev is going away, and
* grab a reference to the user so they don't go away for
* synchronize_srcu(). Then release the mutex to avoid
* deadlock and make sure the user has observed the signal.
* With the lock released, we cannot maintain any state of the
* user list.
*/
mutex_lock(&qddev->users_mutex);
while (!list_empty(&qddev->users)) {
usr = list_first_entry(&qddev->users, struct qaic_user, node);
list_del_init(&usr->node);
kref_get(&usr->ref_count);
usr->qddev = NULL;
mutex_unlock(&qddev->users_mutex);
synchronize_srcu(&usr->qddev_lock);
kref_put(&usr->ref_count, free_usr);
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
if (qddev->ddev) {
drm_dev_unregister(qddev->ddev);
drm_dev_put(qddev->ddev);
}
kfree(qddev);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
u16 major = -1, minor = -1;
struct qaic_device *qdev;
int ret;
/*
* Invoking this function indicates that the control channel to the
* device is available. We use that as a signal to indicate that
* the device side firmware has booted. The device side firmware
* manages the device resources, so we need to communicate with it
* via the control channel in order to utilize the device. Therefore
* we wait until this signal to create the drm dev that userspace will
* use to control the device, because without the device side firmware,
* userspace can't do anything useful.
*/
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
qdev->in_reset = false;
dev_set_drvdata(&mhi_dev->dev, qdev);
qdev->cntl_ch = mhi_dev;
ret = qaic_control_open(qdev);
if (ret) {
pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret);
return ret;
}
ret = get_cntl_version(qdev, NULL, &major, &minor);
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
__func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret);
ret = -EINVAL;
goto close_control;
}
ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
return ret;
close_control:
qaic_control_close(qdev);
return ret;
}
static void qaic_mhi_remove(struct mhi_device *mhi_dev)
{
/* This is redundant since we have already observed the device crash */
}
static void qaic_notify_reset(struct qaic_device *qdev)
{
int i;
qdev->in_reset = true;
/* wake up any waiters to avoid waiting for timeouts at sync */
wake_all_cntl(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
wakeup_dbc(qdev, i);
synchronize_srcu(&qdev->dev_lock);
}
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
{
int i;
qaic_notify_reset(qdev);
/* remove drmdevs to prevent new users from coming in */
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
/* start tearing things down */
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
if (exit_reset)
qdev->in_reset = false;
}
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct qaic_device *qdev;
int i;
qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return NULL;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
if (!qdev->dbc)
return NULL;
}
qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0);
if (!qdev->cntl_wq)
return NULL;
pci_set_drvdata(pdev, qdev);
qdev->pdev = pdev;
mutex_init(&qdev->cntl_mutex);
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
init_srcu_struct(&qdev->dev_lock);
for (i = 0; i < qdev->num_dbc; ++i) {
spin_lock_init(&qdev->dbc[i].xfer_lock);
qdev->dbc[i].qdev = qdev;
qdev->dbc[i].id = i;
INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
init_srcu_struct(&qdev->dbc[i].ch_lock);
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
}
return qdev;
}
static void cleanup_qdev(struct qaic_device *qdev)
{
int i;
for (i = 0; i < qdev->num_dbc; ++i)
cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
cleanup_srcu_struct(&qdev->dev_lock);
pci_set_drvdata(qdev->pdev, NULL);
destroy_workqueue(qdev->cntl_wq);
}
static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
{
int bars;
int ret;
bars = pci_select_bars(pdev, IORESOURCE_MEM);
/* make sure the device has the expected BARs */
if (bars != (BIT(0) | BIT(2) | BIT(4))) {
pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
__func__, bars);
return -EINVAL;
}
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (ret)
return ret;
qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(qdev->bar_0))
return PTR_ERR(qdev->bar_0);
qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
if (IS_ERR(qdev->bar_2))
return PTR_ERR(qdev->bar_2);
/* Managed release since we use pcim_enable_device above */
pci_set_master(pdev);
return 0;
}
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
{
int mhi_irq;
int ret;
int i;
/* Managed release since we use pcim_enable_device */
ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (ret < 0)
return ret;
if (ret < 32) {
pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n",
__func__, ret);
return -ENODEV;
}
mhi_irq = pci_irq_vector(pdev, 0);
if (mhi_irq < 0)
return mhi_irq;
for (i = 0; i < qdev->num_dbc; ++i) {
ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1),
dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
"qaic_dbc", &qdev->dbc[i]);
if (ret)
return ret;
if (datapath_polling) {
qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1);
disable_irq_nosync(qdev->dbc[i].irq);
INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
}
}
return mhi_irq;
}
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct qaic_device *qdev;
int mhi_irq;
int ret;
int i;
qdev = create_qdev(pdev, id);
if (!qdev)
return -ENOMEM;
ret = init_pci(qdev, pdev);
if (ret)
goto cleanup_qdev;
for (i = 0; i < qdev->num_dbc; ++i)
qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
if (mhi_irq < 0) {
ret = mhi_irq;
goto cleanup_qdev;
}
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
goto cleanup_qdev;
}
return 0;
cleanup_qdev:
cleanup_qdev(qdev);
return ret;
}
static void qaic_pci_remove(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
if (!qdev)
return;
qaic_dev_reset_clean_local_state(qdev, false);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
cleanup_qdev(qdev);
}
static void qaic_pci_shutdown(struct pci_dev *pdev)
{
/* see qaic_exit for what link_up is doing */
link_up = true;
qaic_pci_remove(pdev);
}
static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error)
{
return PCI_ERS_RESULT_NEED_RESET;
}
static void qaic_pci_reset_prepare(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
qaic_notify_reset(qdev);
qaic_mhi_start_reset(qdev->mhi_cntrl);
qaic_dev_reset_clean_local_state(qdev, false);
}
static void qaic_pci_reset_done(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
qdev->in_reset = false;
qaic_mhi_reset_done(qdev->mhi_cntrl);
}
static const struct mhi_device_id qaic_mhi_match_table[] = {
{ .chan = "QAIC_CONTROL", },
{},
};
static struct mhi_driver qaic_mhi_driver = {
.id_table = qaic_mhi_match_table,
.remove = qaic_mhi_remove,
.probe = qaic_mhi_probe,
.ul_xfer_cb = qaic_mhi_ul_xfer_cb,
.dl_xfer_cb = qaic_mhi_dl_xfer_cb,
.driver = {
.name = "qaic_mhi",
},
};
static const struct pci_device_id qaic_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
{ }
};
MODULE_DEVICE_TABLE(pci, qaic_ids);
static const struct pci_error_handlers qaic_pci_err_handler = {
.error_detected = qaic_pci_error_detected,
.reset_prepare = qaic_pci_reset_prepare,
.reset_done = qaic_pci_reset_done,
};
static struct pci_driver qaic_pci_driver = {
.name = QAIC_NAME,
.id_table = qaic_ids,
.probe = qaic_pci_probe,
.remove = qaic_pci_remove,
.shutdown = qaic_pci_shutdown,
.err_handler = &qaic_pci_err_handler,
};
static int __init qaic_init(void)
{
int ret;
ret = mhi_driver_register(&qaic_mhi_driver);
if (ret) {
pr_debug("qaic: mhi_driver_register failed %d\n", ret);
return ret;
}
ret = pci_register_driver(&qaic_pci_driver);
if (ret) {
pr_debug("qaic: pci_register_driver failed %d\n", ret);
goto free_mhi;
}
return 0;
free_mhi:
mhi_driver_unregister(&qaic_mhi_driver);
return ret;
}
static void __exit qaic_exit(void)
{
/*
* We assume that qaic_pci_remove() is called due to a hotplug event
* which would mean that the link is down, and thus
* qaic_mhi_free_controller() should not try to access the device during
* cleanup.
* We call pci_unregister_driver() below, which also triggers
* qaic_pci_remove(), but since this is module exit, we expect the link
* to the device to be up, in which case qaic_mhi_free_controller()
* should try to access the device during cleanup to put the device in
* a sane state.
* For that reason, we set link_up here to let qaic_mhi_free_controller
* know the expected link state. Since the module is going to be
* removed at the end of this, we don't need to worry about
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
pci_unregister_driver(&qaic_pci_driver);
mhi_driver_unregister(&qaic_mhi_driver);
}
module_init(qaic_init);
module_exit(qaic_exit);
MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team");
MODULE_DESCRIPTION(QAIC_DESC " Accel Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/accel/qaic/qaic_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
#include <uapi/drm/qaic_accel.h>
#include "qaic.h"
#define SEM_VAL_MASK GENMASK_ULL(11, 0)
#define SEM_INDEX_MASK GENMASK_ULL(4, 0)
#define BULK_XFER BIT(3)
#define GEN_COMPLETION BIT(4)
#define INBOUND_XFER 1
#define OUTBOUND_XFER 2
#define REQHP_OFF 0x0 /* we read this */
#define REQTP_OFF 0x4 /* we write this */
#define RSPHP_OFF 0x8 /* we write this */
#define RSPTP_OFF 0xc /* we read this */
#define ENCODE_SEM(val, index, sync, cmd, flags) \
({ \
FIELD_PREP(GENMASK(11, 0), (val)) | \
FIELD_PREP(GENMASK(20, 16), (index)) | \
FIELD_PREP(BIT(22), (sync)) | \
FIELD_PREP(GENMASK(26, 24), (cmd)) | \
FIELD_PREP(GENMASK(30, 29), (flags)) | \
FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \
})
#define NUM_EVENTS 128
#define NUM_DELAYS 10
static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
module_param(wait_exec_default_timeout_ms, uint, 0600);
MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO");
static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */
module_param(datapath_poll_interval_us, uint, 0600);
MODULE_PARM_DESC(datapath_poll_interval_us,
"Amount of time to sleep between activity when datapath polling is enabled");
struct dbc_req {
/*
* A request ID is assigned to each memory handle going in DMA queue.
* As a single memory handle can enqueue multiple elements in DMA queue
* all of them will have the same request ID.
*/
__le16 req_id;
/* Future use */
__u8 seq_id;
/*
* Special encoded variable
* 7 0 - Do not force to generate MSI after DMA is completed
* 1 - Force to generate MSI after DMA is completed
* 6:5 Reserved
* 4 1 - Generate completion element in the response queue
* 0 - No Completion Code
* 3 0 - DMA request is a Link list transfer
* 1 - DMA request is a Bulk transfer
* 2 Reserved
* 1:0 00 - No DMA transfer involved
* 01 - DMA transfer is part of inbound transfer
* 10 - DMA transfer has outbound transfer
* 11 - NA
*/
__u8 cmd;
__le32 resv;
/* Source address for the transfer */
__le64 src_addr;
/* Destination address for the transfer */
__le64 dest_addr;
/* Length of transfer request */
__le32 len;
__le32 resv2;
/* Doorbell address */
__le64 db_addr;
/*
* Special encoded variable
* 7 1 - Doorbell(db) write
* 0 - No doorbell write
* 6:2 Reserved
* 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary
* 01 - 16 bit access, db address must be aligned to 16bit-boundary
* 10 - 8 bit access, db address must be aligned to 8bit-boundary
* 11 - Reserved
*/
__u8 db_len;
__u8 resv3;
__le16 resv4;
/* 32 bit data written to doorbell address */
__le32 db_data;
/*
* Special encoded variable
* All the fields of sem_cmdX are passed from user and all are ORed
* together to form sem_cmd.
* 0:11 Semaphore value
* 15:12 Reserved
* 20:16 Semaphore index
* 21 Reserved
* 22 Semaphore Sync
* 23 Reserved
* 26:24 Semaphore command
* 28:27 Reserved
* 29 Semaphore DMA out bound sync fence
* 30 Semaphore DMA in bound sync fence
* 31 Enable semaphore command
*/
__le32 sem_cmd0;
__le32 sem_cmd1;
__le32 sem_cmd2;
__le32 sem_cmd3;
} __packed;
struct dbc_rsp {
/* Request ID of the memory handle whose DMA transaction is completed */
__le16 req_id;
/* Status of the DMA transaction. 0 : Success otherwise failure */
__le16 status;
} __packed;
inline int get_dbc_req_elem_size(void)
{
return sizeof(struct dbc_req);
}
inline int get_dbc_rsp_elem_size(void)
{
return sizeof(struct dbc_rsp);
}
static void free_slice(struct kref *kref)
{
struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
list_del(&slice->slice);
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
kfree(slice->sgt);
kfree(slice->reqs);
kfree(slice);
}
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
struct sg_table *sgt;
int ret, j;
/* find out number of relevant nents needed for this mem */
total_len = 0;
sgf = NULL;
sgl = NULL;
nents = 0;
size = size ? size : PAGE_SIZE;
for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
len = sg_dma_len(sg);
if (!len)
continue;
if (offset >= total_len && offset < total_len + len) {
sgf = sg;
offf = offset - total_len;
}
if (sgf)
nents++;
if (offset + size >= total_len &&
offset + size <= total_len + len) {
sgl = sg;
offl = offset + size - total_len;
break;
}
total_len += len;
}
if (!sgf || !sgl) {
ret = -EINVAL;
goto out;
}
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto out;
}
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret)
goto free_sgt;
/* copy relevant sg node and fix page and length */
sgn = sgf;
for_each_sgtable_sg(sgt, sg, j) {
memcpy(sg, sgn, sizeof(*sg));
if (sgn == sgf) {
sg_dma_address(sg) += offf;
sg_dma_len(sg) -= offf;
sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf);
} else {
offf = 0;
}
if (sgn == sgl) {
sg_dma_len(sg) = offl - offf;
sg_set_page(sg, sg_page(sgn), offl - offf, offf);
sg_mark_end(sg);
break;
}
sgn = sg_next(sgn);
}
*sgt_out = sgt;
return ret;
free_sgt:
kfree(sgt);
out:
*sgt_out = NULL;
return ret;
}
static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
struct qaic_attach_slice_entry *req)
{
__le64 db_addr = cpu_to_le64(req->db_addr);
__le32 db_data = cpu_to_le32(req->db_data);
struct scatterlist *sg;
__u8 cmd = BULK_XFER;
int presync_sem;
u64 dev_addr;
__u8 db_len;
int i;
if (!slice->no_xfer)
cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER);
if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8))
return -EINVAL;
presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync;
if (presync_sem > 1)
return -EINVAL;
presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 |
req->sem2.presync << 2 | req->sem3.presync << 3;
switch (req->db_len) {
case 32:
db_len = BIT(7);
break;
case 16:
db_len = BIT(7) | 1;
break;
case 8:
db_len = BIT(7) | 2;
break;
case 0:
db_len = 0; /* doorbell is not active for this command */
break;
default:
return -EINVAL; /* should never hit this */
}
/*
* When we end up splitting up a single request (ie a buf slice) into
* multiple DMA requests, we have to manage the sync data carefully.
* There can only be one presync sem. That needs to be on every xfer
* so that the DMA engine doesn't transfer data before the receiver is
* ready. We only do the doorbell and postsync sems after the xfer.
* To guarantee previous xfers for the request are complete, we use a
* fence.
*/
dev_addr = req->dev_addr;
for_each_sgtable_sg(slice->sgt, sg, i) {
slice->reqs[i].cmd = cmd;
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
sg_dma_address(sg) : dev_addr);
slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
dev_addr : sg_dma_address(sg));
/*
* sg_dma_len(sg) returns size of a DMA segment, maximum DMA
* segment size is set to UINT_MAX by qaic and hence return
* values of sg_dma_len(sg) can never exceed u32 range. So,
* by down sizing we are not corrupting the value.
*/
slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg));
switch (presync_sem) {
case BIT(0):
slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val,
req->sem0.index,
req->sem0.presync,
req->sem0.cmd,
req->sem0.flags));
break;
case BIT(1):
slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val,
req->sem1.index,
req->sem1.presync,
req->sem1.cmd,
req->sem1.flags));
break;
case BIT(2):
slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val,
req->sem2.index,
req->sem2.presync,
req->sem2.cmd,
req->sem2.flags));
break;
case BIT(3):
slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val,
req->sem3.index,
req->sem3.presync,
req->sem3.cmd,
req->sem3.flags));
break;
}
dev_addr += sg_dma_len(sg);
}
/* add post transfer stuff to last segment */
i--;
slice->reqs[i].cmd |= GEN_COMPLETION;
slice->reqs[i].db_addr = db_addr;
slice->reqs[i].db_len = db_len;
slice->reqs[i].db_data = db_data;
/*
* Add a fence if we have more than one request going to the hardware
* representing the entirety of the user request, and the user request
* has no presync condition.
* Fences are expensive, so we try to avoid them. We rely on the
* hardware behavior to avoid needing one when there is a presync
* condition. When a presync exists, all requests for that same
* presync will be queued into a fifo. Thus, since we queue the
* post xfer activity only on the last request we queue, the hardware
* will ensure that the last queued request is processed last, thus
* making sure the post xfer activity happens at the right time without
* a fence.
*/
if (i && !presync_sem)
req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ?
QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE);
slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index,
req->sem0.presync, req->sem0.cmd,
req->sem0.flags));
slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index,
req->sem1.presync, req->sem1.cmd,
req->sem1.flags));
slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index,
req->sem2.presync, req->sem2.cmd,
req->sem2.flags));
slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index,
req->sem3.presync, req->sem3.cmd,
req->sem3.flags));
return 0;
}
static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
struct qaic_attach_slice_entry *slice_ent)
{
struct sg_table *sgt = NULL;
struct bo_slice *slice;
int ret;
ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset);
if (ret)
goto out;
slice = kmalloc(sizeof(*slice), GFP_KERNEL);
if (!slice) {
ret = -ENOMEM;
goto free_sgt;
}
slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
if (!slice->reqs) {
ret = -ENOMEM;
goto free_slice;
}
slice->no_xfer = !slice_ent->size;
slice->sgt = sgt;
slice->nents = sgt->nents;
slice->dir = bo->dir;
slice->bo = bo;
slice->size = slice_ent->size;
slice->offset = slice_ent->offset;
ret = encode_reqs(qdev, slice, slice_ent);
if (ret)
goto free_req;
bo->total_slice_nents += sgt->nents;
kref_init(&slice->ref_count);
drm_gem_object_get(&bo->base);
list_add_tail(&slice->slice, &bo->slices);
return 0;
free_req:
kfree(slice->reqs);
free_slice:
kfree(slice);
free_sgt:
sg_free_table(sgt);
kfree(sgt);
out:
return ret;
}
static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size)
{
struct scatterlist *sg;
struct sg_table *sgt;
struct page **pages;
int *pages_order;
int buf_extra;
int max_order;
int nr_pages;
int ret = 0;
int i, j, k;
int order;
if (size) {
nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
/*
* calculate how much extra we are going to allocate, to remove
* later
*/
buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
max_order = min(MAX_ORDER - 1, get_order(size));
} else {
/* allocate a single page for book keeping */
nr_pages = 1;
buf_extra = 0;
max_order = 0;
}
pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
pages_order = (void *)pages + sizeof(*pages) * nr_pages;
/*
* Allocate requested memory using alloc_pages. It is possible to allocate
* the requested memory in multiple chunks by calling alloc_pages
* multiple times. Use SG table to handle multiple allocated pages.
*/
i = 0;
while (nr_pages > 0) {
order = min(get_order(nr_pages * PAGE_SIZE), max_order);
while (1) {
pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER |
__GFP_NOWARN | __GFP_ZERO |
(order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL),
order);
if (pages[i])
break;
if (!order--) {
ret = -ENOMEM;
goto free_partial_alloc;
}
}
max_order = order;
pages_order[i] = order;
nr_pages -= 1 << order;
if (nr_pages <= 0)
/* account for over allocation */
buf_extra += abs(nr_pages) * PAGE_SIZE;
i++;
}
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto free_partial_alloc;
}
if (sg_alloc_table(sgt, i, GFP_KERNEL)) {
ret = -ENOMEM;
goto free_sgt;
}
/* Populate the SG table with the allocated memory pages */
sg = sgt->sgl;
for (k = 0; k < i; k++, sg = sg_next(sg)) {
/* Last entry requires special handling */
if (k < i - 1) {
sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0);
} else {
sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0);
sg_mark_end(sg);
}
}
kvfree(pages);
*sgt_out = sgt;
return ret;
free_sgt:
kfree(sgt);
free_partial_alloc:
for (j = 0; j < i; j++)
__free_pages(pages[j], pages_order[j]);
kvfree(pages);
out:
*sgt_out = NULL;
return ret;
}
static bool invalid_sem(struct qaic_sem *sem)
{
if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK ||
!(sem->presync == 0 || sem->presync == 1) || sem->pad ||
sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) ||
sem->cmd > QAIC_SEM_WAIT_GT_0)
return true;
return false;
}
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
int i;
for (i = 0; i < count; i++) {
if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 ||
slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) ||
invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) ||
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
if (slice_ent[i].offset + slice_ent[i].size > total_size)
return -EINVAL;
}
return 0;
}
static void qaic_free_sgt(struct sg_table *sgt)
{
struct scatterlist *sg;
for (sg = sgt->sgl; sg; sg = sg_next(sg))
if (sg_page(sg))
__free_pages(sg_page(sg), get_order(sg->length));
sg_free_table(sgt);
kfree(sgt);
}
static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size);
}
static const struct vm_operations_struct drm_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct qaic_bo *bo = to_qaic_bo(obj);
unsigned long offset = 0;
struct scatterlist *sg;
int ret = 0;
if (obj->import_attach)
return -EINVAL;
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
if (sg_page(sg)) {
ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)),
sg->length, vma->vm_page_prot);
if (ret)
goto out;
offset += sg->length;
}
}
out:
return ret;
}
static void qaic_free_object(struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
if (obj->import_attach) {
/* DMABUF/PRIME Path */
drm_prime_gem_destroy(obj, NULL);
} else {
/* Private buffer allocation path */
qaic_free_sgt(bo->sgt);
}
drm_gem_object_release(obj);
kfree(bo);
}
static const struct drm_gem_object_funcs qaic_gem_funcs = {
.free = qaic_free_object,
.print_info = qaic_gem_print_info,
.mmap = qaic_gem_object_mmap,
.vm_ops = &drm_vm_ops,
};
static struct qaic_bo *qaic_alloc_init_bo(void)
{
struct qaic_bo *bo;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&bo->slices);
init_completion(&bo->xfer_done);
complete_all(&bo->xfer_done);
return bo;
}
int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qaic_create_bo *args = data;
int usr_rcu_id, qdev_rcu_id;
struct drm_gem_object *obj;
struct qaic_device *qdev;
struct qaic_user *usr;
struct qaic_bo *bo;
size_t size;
int ret;
if (args->pad)
return -EINVAL;
size = PAGE_ALIGN(args->size);
if (size == 0)
return -EINVAL;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
ret = -ENODEV;
goto unlock_usr_srcu;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
bo = qaic_alloc_init_bo();
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto unlock_dev_srcu;
}
obj = &bo->base;
drm_gem_private_object_init(dev, obj, size);
obj->funcs = &qaic_gem_funcs;
ret = create_sgt(qdev, &bo->sgt, size);
if (ret)
goto free_bo;
bo->size = args->size;
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
if (ret)
goto free_sgt;
bo->handle = args->handle;
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return 0;
free_sgt:
qaic_free_sgt(bo->sgt);
free_bo:
kfree(bo);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return ret;
}
int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qaic_mmap_bo *args = data;
int usr_rcu_id, qdev_rcu_id;
struct drm_gem_object *obj;
struct qaic_device *qdev;
struct qaic_user *usr;
int ret;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
ret = -ENODEV;
goto unlock_usr_srcu;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
ret = -ENOENT;
goto unlock_dev_srcu;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put(obj);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return ret;
}
struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
struct qaic_bo *bo;
size_t size;
int ret;
bo = qaic_alloc_init_bo();
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto out;
}
obj = &bo->base;
get_dma_buf(dma_buf);
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach)) {
ret = PTR_ERR(attach);
goto attach_fail;
}
size = PAGE_ALIGN(attach->dmabuf->size);
if (size == 0) {
ret = -EINVAL;
goto size_align_fail;
}
drm_gem_private_object_init(dev, obj, size);
/*
* skipping dma_buf_map_attachment() as we do not know the direction
* just yet. Once the direction is known in the subsequent IOCTL to
* attach slicing, we can do it then.
*/
obj->funcs = &qaic_gem_funcs;
obj->import_attach = attach;
obj->resv = dma_buf->resv;
return obj;
size_align_fail:
dma_buf_detach(dma_buf, attach);
attach_fail:
dma_buf_put(dma_buf);
kfree(bo);
out:
return ERR_PTR(ret);
}
static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr)
{
struct drm_gem_object *obj = &bo->base;
struct sg_table *sgt;
int ret;
if (obj->import_attach->dmabuf->size < hdr->size)
return -EINVAL;
sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
return ret;
}
bo->sgt = sgt;
bo->size = hdr->size;
return 0;
}
static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
struct qaic_attach_slice_hdr *hdr)
{
int ret;
if (bo->size != hdr->size)
return -EINVAL;
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
if (ret)
return -EFAULT;
return 0;
}
static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
struct qaic_attach_slice_hdr *hdr)
{
int ret;
if (bo->base.import_attach)
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
if (ret == 0)
bo->dir = hdr->dir;
return ret;
}
static void qaic_unprepare_import_bo(struct qaic_bo *bo)
{
dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
bo->sgt = NULL;
bo->size = 0;
}
static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0);
}
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
if (bo->base.import_attach)
qaic_unprepare_import_bo(bo);
else
qaic_unprepare_export_bo(qdev, bo);
bo->dir = 0;
}
static void qaic_free_slices_bo(struct qaic_bo *bo)
{
struct bo_slice *slice, *temp;
list_for_each_entry_safe(slice, temp, &bo->slices, slice)
kref_put(&slice->ref_count, free_slice);
}
static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
struct qaic_attach_slice_hdr *hdr,
struct qaic_attach_slice_entry *slice_ent)
{
int ret, i;
for (i = 0; i < hdr->count; i++) {
ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]);
if (ret) {
qaic_free_slices_bo(bo);
return ret;
}
}
if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
qaic_free_slices_bo(bo);
return -ENOSPC;
}
bo->sliced = true;
bo->nr_slice = hdr->count;
list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
return 0;
}
int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qaic_attach_slice_entry *slice_ent;
struct qaic_attach_slice *args = data;
int rcu_id, usr_rcu_id, qdev_rcu_id;
struct dma_bridge_chan *dbc;
struct drm_gem_object *obj;
struct qaic_device *qdev;
unsigned long arg_size;
struct qaic_user *usr;
u8 __user *user_data;
struct qaic_bo *bo;
int ret;
if (args->hdr.count == 0)
return -EINVAL;
arg_size = args->hdr.count * sizeof(*slice_ent);
if (arg_size / args->hdr.count != sizeof(*slice_ent))
return -EINVAL;
if (args->hdr.size == 0)
return -EINVAL;
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
return -EINVAL;
if (args->data == 0)
return -EINVAL;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
ret = -ENODEV;
goto unlock_usr_srcu;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
if (args->hdr.dbc_id >= qdev->num_dbc) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
user_data = u64_to_user_ptr(args->data);
slice_ent = kzalloc(arg_size, GFP_KERNEL);
if (!slice_ent) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
ret = copy_from_user(slice_ent, user_data, arg_size);
if (ret) {
ret = -EFAULT;
goto free_slice_ent;
}
ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
if (ret)
goto free_slice_ent;
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
goto free_slice_ent;
}
bo = to_qaic_bo(obj);
if (bo->sliced) {
ret = -EINVAL;
goto put_bo;
}
dbc = &qdev->dbc[args->hdr.dbc_id];
rcu_id = srcu_read_lock(&dbc->ch_lock);
if (dbc->usr != usr) {
ret = -EINVAL;
goto unlock_ch_srcu;
}
ret = qaic_prepare_bo(qdev, bo, &args->hdr);
if (ret)
goto unlock_ch_srcu;
ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
if (ret)
goto unprepare_bo;
if (args->hdr.dir == DMA_TO_DEVICE)
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
bo->dbc = dbc;
srcu_read_unlock(&dbc->ch_lock, rcu_id);
drm_gem_object_put(obj);
kfree(slice_ent);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return 0;
unprepare_bo:
qaic_unprepare_bo(qdev, bo);
unlock_ch_srcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
put_bo:
drm_gem_object_put(obj);
free_slice_ent:
kfree(slice_ent);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return ret;
}
static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
u32 head, u32 *ptail)
{
struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
struct dbc_req *reqs = slice->reqs;
u32 tail = *ptail;
u32 avail;
avail = head - tail;
if (head <= tail)
avail += dbc->nelem;
--avail;
if (avail < slice->nents)
return -EAGAIN;
if (tail + slice->nents > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, slice->nents);
memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
sizeof(*reqs) * avail);
reqs += avail;
avail = slice->nents - avail;
if (avail)
memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
} else {
memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
sizeof(*reqs) * slice->nents);
}
*ptail = (tail + slice->nents) % dbc->nelem;
return 0;
}
/*
* Based on the value of resize we may only need to transmit first_n
* entries and the last entry, with last_bytes to send from the last entry.
* Note that first_n could be 0.
*/
static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
u64 resize, u32 dbc_id, u32 head, u32 *ptail)
{
struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
struct dbc_req *reqs = slice->reqs;
struct dbc_req *last_req;
u32 tail = *ptail;
u64 total_bytes;
u64 last_bytes;
u32 first_n;
u32 avail;
int ret;
int i;
avail = head - tail;
if (head <= tail)
avail += dbc->nelem;
--avail;
total_bytes = 0;
for (i = 0; i < slice->nents; i++) {
total_bytes += le32_to_cpu(reqs[i].len);
if (total_bytes >= resize)
break;
}
if (total_bytes < resize) {
/* User space should have used the full buffer path. */
ret = -EINVAL;
return ret;
}
first_n = i;
last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize;
if (avail < (first_n + 1))
return -EAGAIN;
if (first_n) {
if (tail + first_n > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, first_n);
memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
sizeof(*reqs) * avail);
last_req = reqs + avail;
avail = first_n - avail;
if (avail)
memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
} else {
memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
sizeof(*reqs) * first_n);
}
}
/* Copy over the last entry. Here we need to adjust len to the left over
* size, and set src and dst to the entry it is copied to.
*/
last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
/*
* last_bytes holds size of a DMA segment, maximum DMA segment size is
* set to UINT_MAX by qaic and hence last_bytes can never exceed u32
* range. So, by down sizing we are not corrupting the value.
*/
last_req->len = cpu_to_le32((u32)last_bytes);
last_req->src_addr = reqs[first_n].src_addr;
last_req->dest_addr = reqs[first_n].dest_addr;
*ptail = (tail + first_n + 1) % dbc->nelem;
return 0;
}
static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv,
struct qaic_execute_entry *exec, unsigned int count,
bool is_partial, struct dma_bridge_chan *dbc, u32 head,
u32 *tail)
{
struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
struct drm_gem_object *obj;
struct bo_slice *slice;
unsigned long flags;
struct qaic_bo *bo;
bool queued;
int i, j;
int ret;
for (i = 0; i < count; i++) {
/*
* ref count will be decremented when the transfer of this
* buffer is complete. It is inside dbc_irq_threaded_fn().
*/
obj = drm_gem_object_lookup(file_priv,
is_partial ? pexec[i].handle : exec[i].handle);
if (!obj) {
ret = -ENOENT;
goto failed_to_send_bo;
}
bo = to_qaic_bo(obj);
if (!bo->sliced) {
ret = -EINVAL;
goto failed_to_send_bo;
}
if (is_partial && pexec[i].resize > bo->size) {
ret = -EINVAL;
goto failed_to_send_bo;
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
queued = bo->queued;
bo->queued = true;
if (queued) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
goto failed_to_send_bo;
}
bo->req_id = dbc->next_req_id++;
list_for_each_entry(slice, &bo->slices, slice) {
/*
* If this slice does not fall under the given
* resize then skip this slice and continue the loop
*/
if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset)
continue;
for (j = 0; j < slice->nents; j++)
slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
/*
* If it is a partial execute ioctl call then check if
* resize has cut this slice short then do a partial copy
* else do complete copy
*/
if (is_partial && pexec[i].resize &&
pexec[i].resize < slice->offset + slice->size)
ret = copy_partial_exec_reqs(qdev, slice,
pexec[i].resize - slice->offset,
dbc->id, head, tail);
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
goto failed_to_send_bo;
}
}
reinit_completion(&bo->xfer_done);
list_add_tail(&bo->xfer_list, &dbc->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
}
return 0;
failed_to_send_bo:
if (likely(obj))
drm_gem_object_put(obj);
for (j = 0; j < i; j++) {
spin_lock_irqsave(&dbc->xfer_lock, flags);
bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
obj = &bo->base;
bo->queued = false;
list_del(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
drm_gem_object_put(obj);
}
return ret;
}
static void update_profiling_data(struct drm_file *file_priv,
struct qaic_execute_entry *exec, unsigned int count,
bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level)
{
struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
struct drm_gem_object *obj;
struct qaic_bo *bo;
int i;
for (i = 0; i < count; i++) {
/*
* Since we already committed the BO to hardware, the only way
* this should fail is a pending signal. We can't cancel the
* submit to hardware, so we have to just skip the profiling
* data. In case the signal is not fatal to the process, we
* return success so that the user doesn't try to resubmit.
*/
obj = drm_gem_object_lookup(file_priv,
is_partial ? pexec[i].handle : exec[i].handle);
if (!obj)
break;
bo = to_qaic_bo(obj);
bo->perf_stats.req_received_ts = received_ts;
bo->perf_stats.req_submit_ts = submit_ts;
bo->perf_stats.queue_level_before = queue_level;
queue_level += bo->total_slice_nents;
drm_gem_object_put(obj);
}
}
static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
bool is_partial)
{
struct qaic_execute *args = data;
struct qaic_execute_entry *exec;
struct dma_bridge_chan *dbc;
int usr_rcu_id, qdev_rcu_id;
struct qaic_device *qdev;
struct qaic_user *usr;
u8 __user *user_data;
unsigned long n;
u64 received_ts;
u32 queue_level;
u64 submit_ts;
int rcu_id;
u32 head;
u32 tail;
u64 size;
int ret;
received_ts = ktime_get_ns();
size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
n = (unsigned long)size * args->hdr.count;
if (args->hdr.count == 0 || n / args->hdr.count != size)
return -EINVAL;
user_data = u64_to_user_ptr(args->data);
exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
if (!exec)
return -ENOMEM;
if (copy_from_user(exec, user_data, n)) {
ret = -EFAULT;
goto free_exec;
}
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
ret = -ENODEV;
goto unlock_usr_srcu;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
if (args->hdr.dbc_id >= qdev->num_dbc) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
dbc = &qdev->dbc[args->hdr.dbc_id];
rcu_id = srcu_read_lock(&dbc->ch_lock);
if (!dbc->usr || dbc->usr->handle != usr->handle) {
ret = -EPERM;
goto release_ch_rcu;
}
head = readl(dbc->dbc_base + REQHP_OFF);
tail = readl(dbc->dbc_base + REQTP_OFF);
if (head == U32_MAX || tail == U32_MAX) {
/* PCI link error */
ret = -ENODEV;
goto release_ch_rcu;
}
queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
head, &tail);
if (ret)
goto release_ch_rcu;
/* Finalize commit to hardware */
submit_ts = ktime_get_ns();
writel(tail, dbc->dbc_base + REQTP_OFF);
update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
submit_ts, queue_level);
if (datapath_polling)
schedule_work(&dbc->poll_work);
release_ch_rcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
free_exec:
kfree(exec);
return ret;
}
int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return __qaic_execute_bo_ioctl(dev, data, file_priv, false);
}
int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return __qaic_execute_bo_ioctl(dev, data, file_priv, true);
}
/*
* Our interrupt handling is a bit more complicated than a simple ideal, but
* sadly necessary.
*
* Each dbc has a completion queue. Entries in the queue correspond to DMA
* requests which the device has processed. The hardware already has a built
* in irq mitigation. When the device puts an entry into the queue, it will
* only trigger an interrupt if the queue was empty. Therefore, when adding
* the Nth event to a non-empty queue, the hardware doesn't trigger an
* interrupt. This means the host doesn't get additional interrupts signaling
* the same thing - the queue has something to process.
* This behavior can be overridden in the DMA request.
* This means that when the host receives an interrupt, it is required to
* drain the queue.
*
* This behavior is what NAPI attempts to accomplish, although we can't use
* NAPI as we don't have a netdev. We use threaded irqs instead.
*
* However, there is a situation where the host drains the queue fast enough
* that every event causes an interrupt. Typically this is not a problem as
* the rate of events would be low. However, that is not the case with
* lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of
* lprnet, the host receives roughly 80k interrupts per second from the device
* (per /proc/interrupts). While NAPI documentation indicates the host should
* just chug along, sadly that behavior causes instability in some hosts.
*
* Therefore, we implement an interrupt disable scheme similar to NAPI. The
* key difference is that we will delay after draining the queue for a small
* time to allow additional events to come in via polling. Using the above
* lprnet workload, this reduces the number of interrupts processed from
* ~80k/sec to about 64 in 5 minutes and appears to solve the system
* instability.
*/
irqreturn_t dbc_irq_handler(int irq, void *data)
{
struct dma_bridge_chan *dbc = data;
int rcu_id;
u32 head;
u32 tail;
rcu_id = srcu_read_lock(&dbc->ch_lock);
if (!dbc->usr) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_HANDLED;
}
head = readl(dbc->dbc_base + RSPHP_OFF);
if (head == U32_MAX) { /* PCI link error */
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_NONE;
}
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail == U32_MAX) { /* PCI link error */
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_NONE;
}
if (head == tail) { /* queue empty */
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_NONE;
}
disable_irq_nosync(irq);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_WAKE_THREAD;
}
void irq_polling_work(struct work_struct *work)
{
struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
unsigned long flags;
int rcu_id;
u32 head;
u32 tail;
rcu_id = srcu_read_lock(&dbc->ch_lock);
while (1) {
if (dbc->qdev->in_reset) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
if (!dbc->usr) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
if (list_empty(&dbc->xfer_list)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
head = readl(dbc->dbc_base + RSPHP_OFF);
if (head == U32_MAX) { /* PCI link error */
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail == U32_MAX) { /* PCI link error */
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
if (head != tail) {
irq_wake_thread(dbc->irq, dbc);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
cond_resched();
usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us);
}
}
irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
{
struct dma_bridge_chan *dbc = data;
int event_count = NUM_EVENTS;
int delay_count = NUM_DELAYS;
struct qaic_device *qdev;
struct qaic_bo *bo, *i;
struct dbc_rsp *rsp;
unsigned long flags;
int rcu_id;
u16 status;
u16 req_id;
u32 head;
u32 tail;
rcu_id = srcu_read_lock(&dbc->ch_lock);
head = readl(dbc->dbc_base + RSPHP_OFF);
if (head == U32_MAX) /* PCI link error */
goto error_out;
qdev = dbc->qdev;
read_fifo:
if (!event_count) {
event_count = NUM_EVENTS;
cond_resched();
}
/*
* if this channel isn't assigned or gets unassigned during processing
* we have nothing further to do
*/
if (!dbc->usr)
goto error_out;
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail == U32_MAX) /* PCI link error */
goto error_out;
if (head == tail) { /* queue empty */
if (delay_count) {
--delay_count;
usleep_range(100, 200);
goto read_fifo; /* check for a new event */
}
goto normal_out;
}
delay_count = NUM_DELAYS;
while (head != tail) {
if (!event_count)
break;
--event_count;
rsp = dbc->rsp_q_base + head * sizeof(*rsp);
req_id = le16_to_cpu(rsp->req_id);
status = le16_to_cpu(rsp->status);
if (status)
pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status);
spin_lock_irqsave(&dbc->xfer_lock, flags);
/*
* A BO can receive multiple interrupts, since a BO can be
* divided into multiple slices and a buffer receives as many
* interrupts as slices. So until it receives interrupts for
* all the slices we cannot mark that buffer complete.
*/
list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) {
if (bo->req_id == req_id)
bo->nr_slice_xfer_done++;
else
continue;
if (bo->nr_slice_xfer_done < bo->nr_slice)
break;
/*
* At this point we have received all the interrupts for
* BO, which means BO execution is complete.
*/
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
bo->nr_slice_xfer_done = 0;
bo->queued = false;
list_del(&bo->xfer_list);
bo->perf_stats.req_processed_ts = ktime_get_ns();
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
break;
}
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
head = (head + 1) % dbc->nelem;
}
/*
* Update the head pointer of response queue and let the device know
* that we have consumed elements from the queue.
*/
writel(head, dbc->dbc_base + RSPHP_OFF);
/* elements might have been put in the queue while we were processing */
goto read_fifo;
normal_out:
if (likely(!datapath_polling))
enable_irq(irq);
else
schedule_work(&dbc->poll_work);
/* checking the fifo and enabling irqs is a race, missed event check */
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail != U32_MAX && head != tail) {
if (likely(!datapath_polling))
disable_irq_nosync(irq);
goto read_fifo;
}
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_HANDLED;
error_out:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
if (likely(!datapath_polling))
enable_irq(irq);
else
schedule_work(&dbc->poll_work);
return IRQ_HANDLED;
}
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qaic_wait *args = data;
int usr_rcu_id, qdev_rcu_id;
struct dma_bridge_chan *dbc;
struct drm_gem_object *obj;
struct qaic_device *qdev;
unsigned long timeout;
struct qaic_user *usr;
struct qaic_bo *bo;
int rcu_id;
int ret;
if (args->pad != 0)
return -EINVAL;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
ret = -ENODEV;
goto unlock_usr_srcu;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
if (args->dbc_id >= qdev->num_dbc) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
dbc = &qdev->dbc[args->dbc_id];
rcu_id = srcu_read_lock(&dbc->ch_lock);
if (dbc->usr != usr) {
ret = -EPERM;
goto unlock_ch_srcu;
}
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
ret = -ENOENT;
goto unlock_ch_srcu;
}
bo = to_qaic_bo(obj);
timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms;
timeout = msecs_to_jiffies(timeout);
ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout);
if (!ret) {
ret = -ETIMEDOUT;
goto put_obj;
}
if (ret > 0)
ret = 0;
if (!dbc->usr)
ret = -EPERM;
put_obj:
drm_gem_object_put(obj);
unlock_ch_srcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return ret;
}
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct qaic_perf_stats_entry *ent = NULL;
struct qaic_perf_stats *args = data;
int usr_rcu_id, qdev_rcu_id;
struct drm_gem_object *obj;
struct qaic_device *qdev;
struct qaic_user *usr;
struct qaic_bo *bo;
int ret, i;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
ret = -ENODEV;
goto unlock_usr_srcu;
}
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
if (qdev->in_reset) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
if (args->hdr.dbc_id >= qdev->num_dbc) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
if (!ent) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
if (ret) {
ret = -EFAULT;
goto free_ent;
}
for (i = 0; i < args->hdr.count; i++) {
obj = drm_gem_object_lookup(file_priv, ent[i].handle);
if (!obj) {
ret = -ENOENT;
goto free_ent;
}
bo = to_qaic_bo(obj);
/*
* perf stats ioctl is called before wait ioctl is complete then
* the latency information is invalid.
*/
if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) {
ent[i].device_latency_us = 0;
} else {
ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts -
bo->perf_stats.req_submit_ts), 1000);
}
ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts -
bo->perf_stats.req_received_ts), 1000);
ent[i].queue_level_before = bo->perf_stats.queue_level_before;
ent[i].num_queue_element = bo->total_slice_nents;
drm_gem_object_put(obj);
}
if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent)))
ret = -EFAULT;
free_ent:
kfree(ent);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return ret;
}
static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
{
unsigned long flags;
struct qaic_bo *bo;
spin_lock_irqsave(&dbc->xfer_lock, flags);
while (!list_empty(&dbc->xfer_list)) {
bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
bo->queued = false;
list_del(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
spin_lock_irqsave(&dbc->xfer_lock, flags);
}
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
}
int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{
if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
return -EPERM;
qdev->dbc[dbc_id].usr = NULL;
synchronize_srcu(&qdev->dbc[dbc_id].ch_lock);
return 0;
}
/**
* enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
* user. Add user context back to DBC to enable it. This function trusts the
* DBC ID passed and expects the DBC to be disabled.
* @qdev: Qranium device handle
* @dbc_id: ID of the DBC
* @usr: User context
*/
void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{
qdev->dbc[dbc_id].usr = usr;
}
void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
{
struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
dbc->usr = NULL;
empty_xfer_list(qdev, dbc);
synchronize_srcu(&dbc->ch_lock);
/*
* Threads holding channel lock, may add more elements in the xfer_list.
* Flush out these elements from xfer_list.
*/
empty_xfer_list(qdev, dbc);
}
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
{
struct bo_slice *slice, *slice_temp;
struct qaic_bo *bo, *bo_temp;
struct dma_bridge_chan *dbc;
dbc = &qdev->dbc[dbc_id];
if (!dbc->in_use)
return;
wakeup_dbc(qdev, dbc_id);
dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr);
dbc->total_size = 0;
dbc->req_q_base = NULL;
dbc->dma_addr = 0;
dbc->nelem = 0;
dbc->usr = NULL;
list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice)
kref_put(&slice->ref_count, free_slice);
bo->sliced = false;
INIT_LIST_HEAD(&bo->slices);
bo->total_slice_nents = 0;
bo->dir = 0;
bo->dbc = NULL;
bo->nr_slice = 0;
bo->nr_slice_xfer_done = 0;
bo->queued = false;
bo->req_id = 0;
init_completion(&bo->xfer_done);
complete_all(&bo->xfer_done);
list_del(&bo->bo_list);
bo->perf_stats.req_received_ts = 0;
bo->perf_stats.req_submit_ts = 0;
bo->perf_stats.req_processed_ts = 0;
bo->perf_stats.queue_level_before = 0;
}
dbc->in_use = false;
wake_up(&dbc->dbc_release);
}
| linux-master | drivers/accel/qaic/qaic_data.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2018 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "gaudiP.h"
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/drm/habanalabs_accel.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
#define SPMU_EVENT_TYPES_OFFSET 0x400
#define SPMU_MAX_COUNTERS 6
static u64 debug_stm_regs[GAUDI_STM_LAST + 1] = {
[GAUDI_STM_MME0_ACC] = mmMME0_ACC_STM_BASE,
[GAUDI_STM_MME0_SBAB] = mmMME0_SBAB_STM_BASE,
[GAUDI_STM_MME0_CTRL] = mmMME0_CTRL_STM_BASE,
[GAUDI_STM_MME1_ACC] = mmMME1_ACC_STM_BASE,
[GAUDI_STM_MME1_SBAB] = mmMME1_SBAB_STM_BASE,
[GAUDI_STM_MME1_CTRL] = mmMME1_CTRL_STM_BASE,
[GAUDI_STM_MME2_ACC] = mmMME2_ACC_STM_BASE,
[GAUDI_STM_MME2_SBAB] = mmMME2_SBAB_STM_BASE,
[GAUDI_STM_MME2_CTRL] = mmMME2_CTRL_STM_BASE,
[GAUDI_STM_MME3_ACC] = mmMME3_ACC_STM_BASE,
[GAUDI_STM_MME3_SBAB] = mmMME3_SBAB_STM_BASE,
[GAUDI_STM_MME3_CTRL] = mmMME3_CTRL_STM_BASE,
[GAUDI_STM_DMA_IF_W_S] = mmDMA_IF_W_S_STM_BASE,
[GAUDI_STM_DMA_IF_E_S] = mmDMA_IF_E_S_STM_BASE,
[GAUDI_STM_DMA_IF_W_N] = mmDMA_IF_W_N_STM_BASE,
[GAUDI_STM_DMA_IF_E_N] = mmDMA_IF_E_N_STM_BASE,
[GAUDI_STM_CPU] = mmCPU_STM_BASE,
[GAUDI_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
[GAUDI_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
[GAUDI_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
[GAUDI_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
[GAUDI_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
[GAUDI_STM_DMA_CH_5_CS] = mmDMA_CH_5_CS_STM_BASE,
[GAUDI_STM_DMA_CH_6_CS] = mmDMA_CH_6_CS_STM_BASE,
[GAUDI_STM_DMA_CH_7_CS] = mmDMA_CH_7_CS_STM_BASE,
[GAUDI_STM_PCIE] = mmPCIE_STM_BASE,
[GAUDI_STM_MMU_CS] = mmMMU_CS_STM_BASE,
[GAUDI_STM_PSOC] = mmPSOC_STM_BASE,
[GAUDI_STM_NIC0_0] = mmSTM_0_NIC0_DBG_BASE,
[GAUDI_STM_NIC0_1] = mmSTM_1_NIC0_DBG_BASE,
[GAUDI_STM_NIC1_0] = mmSTM_0_NIC1_DBG_BASE,
[GAUDI_STM_NIC1_1] = mmSTM_1_NIC1_DBG_BASE,
[GAUDI_STM_NIC2_0] = mmSTM_0_NIC2_DBG_BASE,
[GAUDI_STM_NIC2_1] = mmSTM_1_NIC2_DBG_BASE,
[GAUDI_STM_NIC3_0] = mmSTM_0_NIC3_DBG_BASE,
[GAUDI_STM_NIC3_1] = mmSTM_1_NIC3_DBG_BASE,
[GAUDI_STM_NIC4_0] = mmSTM_0_NIC4_DBG_BASE,
[GAUDI_STM_NIC4_1] = mmSTM_1_NIC4_DBG_BASE,
[GAUDI_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
[GAUDI_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
[GAUDI_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
[GAUDI_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
[GAUDI_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
[GAUDI_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
[GAUDI_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
[GAUDI_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
};
static u64 debug_etf_regs[GAUDI_ETF_LAST + 1] = {
[GAUDI_ETF_MME0_ACC] = mmMME0_ACC_ETF_BASE,
[GAUDI_ETF_MME0_SBAB] = mmMME0_SBAB_ETF_BASE,
[GAUDI_ETF_MME0_CTRL] = mmMME0_CTRL_ETF_BASE,
[GAUDI_ETF_MME1_ACC] = mmMME1_ACC_ETF_BASE,
[GAUDI_ETF_MME1_SBAB] = mmMME1_SBAB_ETF_BASE,
[GAUDI_ETF_MME1_CTRL] = mmMME1_CTRL_ETF_BASE,
[GAUDI_ETF_MME2_ACC] = mmMME2_MME2_ACC_ETF_BASE,
[GAUDI_ETF_MME2_SBAB] = mmMME2_SBAB_ETF_BASE,
[GAUDI_ETF_MME2_CTRL] = mmMME2_CTRL_ETF_BASE,
[GAUDI_ETF_MME3_ACC] = mmMME3_ACC_ETF_BASE,
[GAUDI_ETF_MME3_SBAB] = mmMME3_SBAB_ETF_BASE,
[GAUDI_ETF_MME3_CTRL] = mmMME3_CTRL_ETF_BASE,
[GAUDI_ETF_DMA_IF_W_S] = mmDMA_IF_W_S_ETF_BASE,
[GAUDI_ETF_DMA_IF_E_S] = mmDMA_IF_E_S_ETF_BASE,
[GAUDI_ETF_DMA_IF_W_N] = mmDMA_IF_W_N_ETF_BASE,
[GAUDI_ETF_DMA_IF_E_N] = mmDMA_IF_E_N_ETF_BASE,
[GAUDI_ETF_CPU_0] = mmCPU_ETF_0_BASE,
[GAUDI_ETF_CPU_1] = mmCPU_ETF_1_BASE,
[GAUDI_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
[GAUDI_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_5_CS] = mmDMA_CH_5_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_6_CS] = mmDMA_CH_6_CS_ETF_BASE,
[GAUDI_ETF_DMA_CH_7_CS] = mmDMA_CH_7_CS_ETF_BASE,
[GAUDI_ETF_PCIE] = mmPCIE_ETF_BASE,
[GAUDI_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
[GAUDI_ETF_PSOC] = mmPSOC_ETF_BASE,
[GAUDI_ETF_NIC0_0] = mmETF_0_NIC0_DBG_BASE,
[GAUDI_ETF_NIC0_1] = mmETF_1_NIC0_DBG_BASE,
[GAUDI_ETF_NIC1_0] = mmETF_0_NIC1_DBG_BASE,
[GAUDI_ETF_NIC1_1] = mmETF_1_NIC1_DBG_BASE,
[GAUDI_ETF_NIC2_0] = mmETF_0_NIC2_DBG_BASE,
[GAUDI_ETF_NIC2_1] = mmETF_1_NIC2_DBG_BASE,
[GAUDI_ETF_NIC3_0] = mmETF_0_NIC3_DBG_BASE,
[GAUDI_ETF_NIC3_1] = mmETF_1_NIC3_DBG_BASE,
[GAUDI_ETF_NIC4_0] = mmETF_0_NIC4_DBG_BASE,
[GAUDI_ETF_NIC4_1] = mmETF_1_NIC4_DBG_BASE,
[GAUDI_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
[GAUDI_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
[GAUDI_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
[GAUDI_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
[GAUDI_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
[GAUDI_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
[GAUDI_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
[GAUDI_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
};
static u64 debug_funnel_regs[GAUDI_FUNNEL_LAST + 1] = {
[GAUDI_FUNNEL_MME0_ACC] = mmMME0_ACC_FUNNEL_BASE,
[GAUDI_FUNNEL_MME1_ACC] = mmMME1_ACC_FUNNEL_BASE,
[GAUDI_FUNNEL_MME2_ACC] = mmMME2_ACC_FUNNEL_BASE,
[GAUDI_FUNNEL_MME3_ACC] = mmMME3_ACC_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X0] = mmSRAM_Y0_X0_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X1] = mmSRAM_Y0_X1_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X2] = mmSRAM_Y0_X2_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X3] = mmSRAM_Y0_X3_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X4] = mmSRAM_Y0_X4_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X5] = mmSRAM_Y0_X5_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X6] = mmSRAM_Y0_X6_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y0_X7] = mmSRAM_Y0_X7_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X0] = mmSRAM_Y1_X0_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X1] = mmSRAM_Y1_X1_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X2] = mmSRAM_Y1_X2_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X3] = mmSRAM_Y1_X3_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X4] = mmSRAM_Y1_X4_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X5] = mmSRAM_Y1_X5_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X6] = mmSRAM_Y1_X6_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y1_X7] = mmSRAM_Y1_X7_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X0] = mmSRAM_Y2_X0_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X1] = mmSRAM_Y2_X1_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X2] = mmSRAM_Y2_X2_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X3] = mmSRAM_Y2_X3_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X4] = mmSRAM_Y2_X4_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X5] = mmSRAM_Y2_X5_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X6] = mmSRAM_Y2_X6_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y2_X7] = mmSRAM_Y2_X7_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X0] = mmSRAM_Y3_X0_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X1] = mmSRAM_Y3_X1_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X2] = mmSRAM_Y3_X2_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X4] = mmSRAM_Y3_X4_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X3] = mmSRAM_Y3_X3_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X5] = mmSRAM_Y3_X5_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X6] = mmSRAM_Y3_X6_FUNNEL_BASE,
[GAUDI_FUNNEL_SRAM_Y3_X7] = mmSRAM_Y3_X7_FUNNEL_BASE,
[GAUDI_FUNNEL_SIF_0] = mmSIF_FUNNEL_0_BASE,
[GAUDI_FUNNEL_SIF_1] = mmSIF_FUNNEL_1_BASE,
[GAUDI_FUNNEL_SIF_2] = mmSIF_FUNNEL_2_BASE,
[GAUDI_FUNNEL_SIF_3] = mmSIF_FUNNEL_3_BASE,
[GAUDI_FUNNEL_SIF_4] = mmSIF_FUNNEL_4_BASE,
[GAUDI_FUNNEL_SIF_5] = mmSIF_FUNNEL_5_BASE,
[GAUDI_FUNNEL_SIF_6] = mmSIF_FUNNEL_6_BASE,
[GAUDI_FUNNEL_SIF_7] = mmSIF_FUNNEL_7_BASE,
[GAUDI_FUNNEL_NIF_0] = mmNIF_FUNNEL_0_BASE,
[GAUDI_FUNNEL_NIF_1] = mmNIF_FUNNEL_1_BASE,
[GAUDI_FUNNEL_NIF_2] = mmNIF_FUNNEL_2_BASE,
[GAUDI_FUNNEL_NIF_3] = mmNIF_FUNNEL_3_BASE,
[GAUDI_FUNNEL_NIF_4] = mmNIF_FUNNEL_4_BASE,
[GAUDI_FUNNEL_NIF_5] = mmNIF_FUNNEL_5_BASE,
[GAUDI_FUNNEL_NIF_6] = mmNIF_FUNNEL_6_BASE,
[GAUDI_FUNNEL_NIF_7] = mmNIF_FUNNEL_7_BASE,
[GAUDI_FUNNEL_DMA_IF_W_S] = mmDMA_IF_W_S_FUNNEL_BASE,
[GAUDI_FUNNEL_DMA_IF_E_S] = mmDMA_IF_E_S_FUNNEL_BASE,
[GAUDI_FUNNEL_DMA_IF_W_N] = mmDMA_IF_W_N_FUNNEL_BASE,
[GAUDI_FUNNEL_DMA_IF_E_N] = mmDMA_IF_E_N_FUNNEL_BASE,
[GAUDI_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
[GAUDI_FUNNEL_NIC_TPC_W_S] = mmNIC_TPC_FUNNEL_W_S_BASE,
[GAUDI_FUNNEL_NIC_TPC_E_S] = mmNIC_TPC_FUNNEL_E_S_BASE,
[GAUDI_FUNNEL_NIC_TPC_W_N] = mmNIC_TPC_FUNNEL_W_N_BASE,
[GAUDI_FUNNEL_NIC_TPC_E_N] = mmNIC_TPC_FUNNEL_E_N_BASE,
[GAUDI_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
[GAUDI_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
[GAUDI_FUNNEL_NIC0] = mmFUNNEL_NIC0_DBG_BASE,
[GAUDI_FUNNEL_NIC1] = mmFUNNEL_NIC1_DBG_BASE,
[GAUDI_FUNNEL_NIC2] = mmFUNNEL_NIC2_DBG_BASE,
[GAUDI_FUNNEL_NIC3] = mmFUNNEL_NIC3_DBG_BASE,
[GAUDI_FUNNEL_NIC4] = mmFUNNEL_NIC4_DBG_BASE,
[GAUDI_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
[GAUDI_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
};
static u64 debug_bmon_regs[GAUDI_BMON_LAST + 1] = {
[GAUDI_BMON_MME0_ACC_0] = mmMME0_ACC_BMON0_BASE,
[GAUDI_BMON_MME0_SBAB_0] = mmMME0_SBAB_BMON0_BASE,
[GAUDI_BMON_MME0_SBAB_1] = mmMME0_SBAB_BMON1_BASE,
[GAUDI_BMON_MME0_CTRL_0] = mmMME0_CTRL_BMON0_BASE,
[GAUDI_BMON_MME0_CTRL_1] = mmMME0_CTRL_BMON1_BASE,
[GAUDI_BMON_MME1_ACC_0] = mmMME1_ACC_BMON0_BASE,
[GAUDI_BMON_MME1_SBAB_0] = mmMME1_SBAB_BMON0_BASE,
[GAUDI_BMON_MME1_SBAB_1] = mmMME1_SBAB_BMON1_BASE,
[GAUDI_BMON_MME1_CTRL_0] = mmMME1_CTRL_BMON0_BASE,
[GAUDI_BMON_MME1_CTRL_1] = mmMME1_CTRL_BMON1_BASE,
[GAUDI_BMON_MME2_ACC_0] = mmMME2_ACC_BMON0_BASE,
[GAUDI_BMON_MME2_SBAB_0] = mmMME2_SBAB_BMON0_BASE,
[GAUDI_BMON_MME2_SBAB_1] = mmMME2_SBAB_BMON1_BASE,
[GAUDI_BMON_MME2_CTRL_0] = mmMME2_CTRL_BMON0_BASE,
[GAUDI_BMON_MME2_CTRL_1] = mmMME2_CTRL_BMON1_BASE,
[GAUDI_BMON_MME3_ACC_0] = mmMME3_ACC_BMON0_BASE,
[GAUDI_BMON_MME3_SBAB_0] = mmMME3_SBAB_BMON0_BASE,
[GAUDI_BMON_MME3_SBAB_1] = mmMME3_SBAB_BMON1_BASE,
[GAUDI_BMON_MME3_CTRL_0] = mmMME3_CTRL_BMON0_BASE,
[GAUDI_BMON_MME3_CTRL_1] = mmMME3_CTRL_BMON1_BASE,
[GAUDI_BMON_DMA_IF_W_S_SOB_WR] = mmDMA_IF_W_S_SOB_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_S_0_WR] = mmDMA_IF_W_S_HBM0_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_S_0_RD] = mmDMA_IF_W_S_HBM0_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_S_1_WR] = mmDMA_IF_W_S_HBM1_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_S_1_RD] = mmDMA_IF_W_S_HBM1_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_S_SOB_WR] = mmDMA_IF_E_S_SOB_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_S_0_WR] = mmDMA_IF_E_S_HBM0_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_S_0_RD] = mmDMA_IF_E_S_HBM0_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_S_1_WR] = mmDMA_IF_E_S_HBM1_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_S_1_RD] = mmDMA_IF_E_S_HBM1_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_N_SOB_WR] = mmDMA_IF_W_N_SOB_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_N_HBM0_WR] = mmDMA_IF_W_N_HBM0_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_N_HBM0_RD] = mmDMA_IF_W_N_HBM0_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_N_HBM1_WR] = mmDMA_IF_W_N_HBM1_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_W_N_HBM1_RD] = mmDMA_IF_W_N_HBM1_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_N_SOB_WR] = mmDMA_IF_E_N_SOB_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_N_HBM0_WR] = mmDMA_IF_E_N_HBM0_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_N_HBM0_RD] = mmDMA_IF_E_N_HBM0_RD_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_N_HBM1_WR] = mmDMA_IF_E_N_HBM1_WR_BMON_BASE,
[GAUDI_BMON_DMA_IF_E_N_HBM1_RD] = mmDMA_IF_E_N_HBM1_RD_BMON_BASE,
[GAUDI_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
[GAUDI_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
[GAUDI_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_5_0] = mmDMA_CH_5_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_5_1] = mmDMA_CH_5_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_6_0] = mmDMA_CH_6_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_6_1] = mmDMA_CH_6_BMON_1_BASE,
[GAUDI_BMON_DMA_CH_7_0] = mmDMA_CH_7_BMON_0_BASE,
[GAUDI_BMON_DMA_CH_7_1] = mmDMA_CH_7_BMON_1_BASE,
[GAUDI_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
[GAUDI_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
[GAUDI_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
[GAUDI_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
[GAUDI_BMON_MMU_0] = mmMMU_BMON_0_BASE,
[GAUDI_BMON_MMU_1] = mmMMU_BMON_1_BASE,
[GAUDI_BMON_NIC0_0] = mmBMON0_NIC0_DBG_BASE,
[GAUDI_BMON_NIC0_1] = mmBMON1_NIC0_DBG_BASE,
[GAUDI_BMON_NIC0_2] = mmBMON2_NIC0_DBG_BASE,
[GAUDI_BMON_NIC0_3] = mmBMON3_NIC0_DBG_BASE,
[GAUDI_BMON_NIC0_4] = mmBMON4_NIC0_DBG_BASE,
[GAUDI_BMON_NIC1_0] = mmBMON0_NIC1_DBG_BASE,
[GAUDI_BMON_NIC1_1] = mmBMON1_NIC1_DBG_BASE,
[GAUDI_BMON_NIC1_2] = mmBMON2_NIC1_DBG_BASE,
[GAUDI_BMON_NIC1_3] = mmBMON3_NIC1_DBG_BASE,
[GAUDI_BMON_NIC1_4] = mmBMON4_NIC1_DBG_BASE,
[GAUDI_BMON_NIC2_0] = mmBMON0_NIC2_DBG_BASE,
[GAUDI_BMON_NIC2_1] = mmBMON1_NIC2_DBG_BASE,
[GAUDI_BMON_NIC2_2] = mmBMON2_NIC2_DBG_BASE,
[GAUDI_BMON_NIC2_3] = mmBMON3_NIC2_DBG_BASE,
[GAUDI_BMON_NIC2_4] = mmBMON4_NIC2_DBG_BASE,
[GAUDI_BMON_NIC3_0] = mmBMON0_NIC3_DBG_BASE,
[GAUDI_BMON_NIC3_1] = mmBMON1_NIC3_DBG_BASE,
[GAUDI_BMON_NIC3_2] = mmBMON2_NIC3_DBG_BASE,
[GAUDI_BMON_NIC3_3] = mmBMON3_NIC3_DBG_BASE,
[GAUDI_BMON_NIC3_4] = mmBMON4_NIC3_DBG_BASE,
[GAUDI_BMON_NIC4_0] = mmBMON0_NIC4_DBG_BASE,
[GAUDI_BMON_NIC4_1] = mmBMON1_NIC4_DBG_BASE,
[GAUDI_BMON_NIC4_2] = mmBMON2_NIC4_DBG_BASE,
[GAUDI_BMON_NIC4_3] = mmBMON3_NIC4_DBG_BASE,
[GAUDI_BMON_NIC4_4] = mmBMON4_NIC4_DBG_BASE,
[GAUDI_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
[GAUDI_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
[GAUDI_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
[GAUDI_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
[GAUDI_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
};
static u64 debug_spmu_regs[GAUDI_SPMU_LAST + 1] = {
[GAUDI_SPMU_MME0_ACC] = mmMME0_ACC_SPMU_BASE,
[GAUDI_SPMU_MME0_SBAB] = mmMME0_SBAB_SPMU_BASE,
[GAUDI_SPMU_MME0_CTRL] = mmMME0_CTRL_SPMU_BASE,
[GAUDI_SPMU_MME1_ACC] = mmMME1_ACC_SPMU_BASE,
[GAUDI_SPMU_MME1_SBAB] = mmMME1_SBAB_SPMU_BASE,
[GAUDI_SPMU_MME1_CTRL] = mmMME1_CTRL_SPMU_BASE,
[GAUDI_SPMU_MME2_MME2_ACC] = mmMME2_ACC_SPMU_BASE,
[GAUDI_SPMU_MME2_SBAB] = mmMME2_SBAB_SPMU_BASE,
[GAUDI_SPMU_MME2_CTRL] = mmMME2_CTRL_SPMU_BASE,
[GAUDI_SPMU_MME3_ACC] = mmMME3_ACC_SPMU_BASE,
[GAUDI_SPMU_MME3_SBAB] = mmMME3_SBAB_SPMU_BASE,
[GAUDI_SPMU_MME3_CTRL] = mmMME3_CTRL_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_5_CS] = mmDMA_CH_5_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_6_CS] = mmDMA_CH_6_CS_SPMU_BASE,
[GAUDI_SPMU_DMA_CH_7_CS] = mmDMA_CH_7_CS_SPMU_BASE,
[GAUDI_SPMU_PCIE] = mmPCIE_SPMU_BASE,
[GAUDI_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
[GAUDI_SPMU_NIC0_0] = mmSPMU_0_NIC0_DBG_BASE,
[GAUDI_SPMU_NIC0_1] = mmSPMU_1_NIC0_DBG_BASE,
[GAUDI_SPMU_NIC1_0] = mmSPMU_0_NIC1_DBG_BASE,
[GAUDI_SPMU_NIC1_1] = mmSPMU_1_NIC1_DBG_BASE,
[GAUDI_SPMU_NIC2_0] = mmSPMU_0_NIC2_DBG_BASE,
[GAUDI_SPMU_NIC2_1] = mmSPMU_1_NIC2_DBG_BASE,
[GAUDI_SPMU_NIC3_0] = mmSPMU_0_NIC3_DBG_BASE,
[GAUDI_SPMU_NIC3_1] = mmSPMU_1_NIC3_DBG_BASE,
[GAUDI_SPMU_NIC4_0] = mmSPMU_0_NIC4_DBG_BASE,
[GAUDI_SPMU_NIC4_1] = mmSPMU_1_NIC4_DBG_BASE,
[GAUDI_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
[GAUDI_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
[GAUDI_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
[GAUDI_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
[GAUDI_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
[GAUDI_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
[GAUDI_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
[GAUDI_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
};
static int gaudi_coresight_timeout(struct hl_device *hdev, u64 addr,
int position, bool up)
{
int rc;
u32 val;
rc = hl_poll_timeout(
hdev,
addr,
val,
up ? val & BIT(position) : !(val & BIT(position)),
1000,
CORESIGHT_TIMEOUT_USEC);
if (rc) {
dev_err(hdev->dev,
"Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
addr, position, up);
return -EFAULT;
}
return 0;
}
static int gaudi_config_stm(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_stm *input;
u64 base_reg;
u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
dev_err(hdev->dev, "Invalid register index in STM\n");
return -EINVAL;
}
base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + 0xE80, 0x80004);
WREG32(base_reg + 0xD64, 7);
WREG32(base_reg + 0xD60, 0);
WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
WREG32(base_reg + 0xD60, 1);
WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
WREG32(base_reg + 0xE70, 0x10);
WREG32(base_reg + 0xE60, 0);
WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask));
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
frequency = hdev->asic_prop.psoc_timestamp_frequency;
if (frequency == 0)
frequency = input->frequency;
WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x1F00);
/* SW-2176 - SW WA for HW bug */
if ((CFG_BASE + base_reg) >= mmDMA_CH_0_CS_STM_BASE &&
(CFG_BASE + base_reg) <= mmDMA_CH_7_CS_STM_BASE) {
WREG32(base_reg + 0xE68, 0xffff8005);
WREG32(base_reg + 0xE6C, 0x0);
}
WREG32(base_reg + 0xE80, 0x23 | (input->id << 16));
} else {
WREG32(base_reg + 0xE80, 4);
WREG32(base_reg + 0xD64, 0);
WREG32(base_reg + 0xD60, 1);
WREG32(base_reg + 0xD00, 0);
WREG32(base_reg + 0xD20, 0);
WREG32(base_reg + 0xD60, 0);
WREG32(base_reg + 0xE20, 0);
WREG32(base_reg + 0xE00, 0);
WREG32(base_reg + 0xDF4, 0x80);
WREG32(base_reg + 0xE70, 0);
WREG32(base_reg + 0xE60, 0);
WREG32(base_reg + 0xE64, 0);
WREG32(base_reg + 0xE8C, 0);
rc = gaudi_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
if (rc) {
dev_err(hdev->dev,
"Failed to disable STM on timeout, error %d\n",
rc);
return rc;
}
WREG32(base_reg + 0xE80, 4);
}
return 0;
}
static int gaudi_config_etf(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etf *input;
u64 base_reg;
u32 val;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
dev_err(hdev->dev, "Invalid register index in ETF\n");
return -EINVAL;
}
base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
val = RREG32(base_reg + 0x304);
val |= 0x1000;
WREG32(base_reg + 0x304, val);
val |= 0x40;
WREG32(base_reg + 0x304, val);
rc = gaudi_coresight_timeout(hdev, base_reg + 0x304, 6, false);
if (rc) {
dev_err(hdev->dev,
"Failed to %s ETF on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
rc = gaudi_coresight_timeout(hdev, base_reg + 0xC, 2, true);
if (rc) {
dev_err(hdev->dev,
"Failed to %s ETF on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
WREG32(base_reg + 0x20, 0);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + 0x34, 0x3FFC);
WREG32(base_reg + 0x28, input->sink_mode);
WREG32(base_reg + 0x304, 0x4001);
WREG32(base_reg + 0x308, 0xA);
WREG32(base_reg + 0x20, 1);
} else {
WREG32(base_reg + 0x34, 0);
WREG32(base_reg + 0x28, 0);
WREG32(base_reg + 0x304, 0);
}
return 0;
}
static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
u64 size, bool *is_host)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
/* maximum address length is 50 bits */
if (addr >> 50) {
dev_err(hdev->dev,
"ETR buffer address shouldn't exceed 50 bits\n");
return false;
}
if (addr > (addr + size)) {
dev_err(hdev->dev,
"ETR buffer size %llu overflow\n", size);
return false;
}
/* PMMU and HPMMU addresses are equal, check only one of them */
if ((gaudi->hw_cap_initialized & HW_CAP_MMU) &&
hl_mem_area_inside_range(addr, size,
prop->pmmu.start_addr,
prop->pmmu.end_addr)) {
*is_host = true;
return true;
}
if (hl_mem_area_inside_range(addr, size,
prop->dram_user_base_address,
prop->dram_end_address))
return true;
if (hl_mem_area_inside_range(addr, size,
prop->sram_user_base_address,
prop->sram_end_address))
return true;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
dev_err(hdev->dev, "ETR buffer should be in SRAM/DRAM\n");
return false;
}
static int gaudi_config_etr(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
u64 msb;
u32 val;
int rc;
WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
WREG32(mmPSOC_ETR_FFCR, val);
rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
bool is_host = false;
input = params->input;
if (!input)
return -EINVAL;
if (input->buffer_size == 0) {
dev_err(hdev->dev,
"ETR buffer size should be bigger than 0\n");
return -EINVAL;
}
if (!gaudi_etr_validate_address(hdev,
input->buffer_address, input->buffer_size,
&is_host)) {
dev_err(hdev->dev, "ETR buffer address is invalid\n");
return -EINVAL;
}
msb = upper_32_bits(input->buffer_address) >> 8;
msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
if (!hdev->asic_prop.fw_security_enabled) {
/* make ETR not privileged */
val = FIELD_PREP(
PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
/* make ETR non-secured (inverted logic) */
val |= FIELD_PREP(
PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
/*
* Workaround for H3 #HW-2075 bug: use small data
* chunks
*/
val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK,
is_host ? 0 : 7);
WREG32(mmPSOC_ETR_AXICTL, val);
}
WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI,
upper_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_FFCR, 3);
WREG32(mmPSOC_ETR_PSCR, 0xA);
WREG32(mmPSOC_ETR_CTL, 1);
} else {
WREG32(mmPSOC_ETR_BUFWM, 0);
WREG32(mmPSOC_ETR_RSZ, 0x400);
WREG32(mmPSOC_ETR_DBALO, 0);
WREG32(mmPSOC_ETR_DBAHI, 0);
WREG32(mmPSOC_ETR_PSCR, 0);
WREG32(mmPSOC_ETR_MODE, 0);
WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
/*
* The trace buffer address is 50 bits wide. The end of
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
* The 10 msb of the 50-bit address are stored in a
* global configuration register.
*/
rwp = RREG32(mmPSOC_ETR_RWP);
rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
msb = RREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR) &
PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
*(u64 *) params->output = ((u64) msb << 40) |
((u64) rwphi << 32) | rwp;
}
}
return 0;
}
static int gaudi_config_funnel(struct hl_device *hdev,
struct hl_debug_params *params)
{
u64 base_reg;
if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
return -EINVAL;
}
base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
WREG32(base_reg, params->enable ? 0x33F : 0);
return 0;
}
static int gaudi_config_bmon(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_bmon *input;
u64 base_reg;
if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
dev_err(hdev->dev, "Invalid register index in BMON\n");
return -EINVAL;
}
base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0x104, 1);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0));
WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0));
WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0));
WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0));
WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1));
WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1));
WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1));
WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1));
WREG32(base_reg + 0x224, 0);
WREG32(base_reg + 0x234, 0);
WREG32(base_reg + 0x30C, input->bw_win);
WREG32(base_reg + 0x308, input->win_capture);
WREG32(base_reg + 0x700, 0xA000B00 | (input->id << 12));
WREG32(base_reg + 0x708, 0xA000A00 | (input->id << 12));
WREG32(base_reg + 0x70C, 0xA000C00 | (input->id << 12));
WREG32(base_reg + 0x100, 0x11);
WREG32(base_reg + 0x304, 0x1);
} else {
WREG32(base_reg + 0x200, 0);
WREG32(base_reg + 0x204, 0);
WREG32(base_reg + 0x208, 0xFFFFFFFF);
WREG32(base_reg + 0x20C, 0xFFFFFFFF);
WREG32(base_reg + 0x240, 0);
WREG32(base_reg + 0x244, 0);
WREG32(base_reg + 0x248, 0xFFFFFFFF);
WREG32(base_reg + 0x24C, 0xFFFFFFFF);
WREG32(base_reg + 0x224, 0xFFFFFFFF);
WREG32(base_reg + 0x234, 0x1070F);
WREG32(base_reg + 0x30C, 0);
WREG32(base_reg + 0x308, 0xFFFF);
WREG32(base_reg + 0x700, 0xA000B00);
WREG32(base_reg + 0x708, 0xA000A00);
WREG32(base_reg + 0x70C, 0xA000C00);
WREG32(base_reg + 0x100, 1);
WREG32(base_reg + 0x304, 0);
WREG32(base_reg + 0x104, 0);
}
return 0;
}
static int gaudi_config_spmu(struct hl_device *hdev,
struct hl_debug_params *params)
{
u64 base_reg;
struct hl_debug_params_spmu *input = params->input;
u64 *output;
u32 output_arr_len;
u32 events_num;
u32 overflow_idx;
u32 cycle_cnt_idx;
int i;
if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
dev_err(hdev->dev, "Invalid register index in SPMU\n");
return -EINVAL;
}
base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
if (input->event_types_num < 3) {
dev_err(hdev->dev,
"not enough event types values for SPMU enable\n");
return -EINVAL;
}
if (input->event_types_num > SPMU_MAX_COUNTERS) {
dev_err(hdev->dev,
"too many event types values for SPMU enable\n");
return -EINVAL;
}
WREG32(base_reg + 0xE04, 0x41013046);
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < input->event_types_num ; i++)
WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
input->event_types[i]);
WREG32(base_reg + 0xE04, 0x41013041);
WREG32(base_reg + 0xC00, 0x8000003F);
} else {
output = params->output;
output_arr_len = params->output_size / 8;
events_num = output_arr_len - 2;
overflow_idx = output_arr_len - 2;
cycle_cnt_idx = output_arr_len - 1;
if (!output)
return -EINVAL;
if (output_arr_len < 3) {
dev_err(hdev->dev,
"not enough values for SPMU disable\n");
return -EINVAL;
}
if (events_num > SPMU_MAX_COUNTERS) {
dev_err(hdev->dev,
"too many events values for SPMU disable\n");
return -EINVAL;
}
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < events_num ; i++)
output[i] = RREG32(base_reg + i * 8);
output[overflow_idx] = RREG32(base_reg + 0xCC0);
output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
output[cycle_cnt_idx] <<= 32;
output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
WREG32(base_reg + 0xCC0, 0);
}
return 0;
}
int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
switch (params->op) {
case HL_DEBUG_OP_STM:
rc = gaudi_config_stm(hdev, params);
break;
case HL_DEBUG_OP_ETF:
rc = gaudi_config_etf(hdev, params);
break;
case HL_DEBUG_OP_ETR:
rc = gaudi_config_etr(hdev, params);
break;
case HL_DEBUG_OP_FUNNEL:
rc = gaudi_config_funnel(hdev, params);
break;
case HL_DEBUG_OP_BMON:
rc = gaudi_config_bmon(hdev, params);
break;
case HL_DEBUG_OP_SPMU:
rc = gaudi_config_spmu(hdev, params);
break;
case HL_DEBUG_OP_TIMESTAMP:
/* Do nothing as this opcode is deprecated */
break;
default:
dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
return -EINVAL;
}
/* Perform read from the device to flush all configuration */
RREG32(mmHW_STATE);
return rc;
}
void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
for (i = GAUDI_ETF_FIRST ; i <= GAUDI_ETF_LAST ; i++) {
params.reg_idx = i;
rc = gaudi_config_etf(hdev, ¶ms);
if (rc)
dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
}
rc = gaudi_config_etr(hdev, ¶ms);
if (rc)
dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
}
| linux-master | drivers/accel/habanalabs/gaudi/gaudi_coresight.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2018 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "gaudiP.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#define GAUDI_NUMBER_OF_LBW_RR_REGS 28
#define GAUDI_NUMBER_OF_HBW_RR_REGS 24
#define GAUDI_NUMBER_OF_LBW_RANGES 10
static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
mmDMA_IF_W_S_SOB_HIT_WPROT,
mmDMA_IF_W_S_DMA0_HIT_WPROT,
mmDMA_IF_W_S_DMA1_HIT_WPROT,
mmDMA_IF_E_S_SOB_HIT_WPROT,
mmDMA_IF_E_S_DMA0_HIT_WPROT,
mmDMA_IF_E_S_DMA1_HIT_WPROT,
mmDMA_IF_W_N_SOB_HIT_WPROT,
mmDMA_IF_W_N_DMA0_HIT_WPROT,
mmDMA_IF_W_N_DMA1_HIT_WPROT,
mmDMA_IF_E_N_SOB_HIT_WPROT,
mmDMA_IF_E_N_DMA0_HIT_WPROT,
mmDMA_IF_E_N_DMA1_HIT_WPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW,
mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW,
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
};
static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
mmDMA_IF_W_S_SOB_HIT_RPROT,
mmDMA_IF_W_S_DMA0_HIT_RPROT,
mmDMA_IF_W_S_DMA1_HIT_RPROT,
mmDMA_IF_E_S_SOB_HIT_RPROT,
mmDMA_IF_E_S_DMA0_HIT_RPROT,
mmDMA_IF_E_S_DMA1_HIT_RPROT,
mmDMA_IF_W_N_SOB_HIT_RPROT,
mmDMA_IF_W_N_DMA0_HIT_RPROT,
mmDMA_IF_W_N_DMA1_HIT_RPROT,
mmDMA_IF_E_N_SOB_HIT_RPROT,
mmDMA_IF_E_N_DMA0_HIT_RPROT,
mmDMA_IF_E_N_DMA1_HIT_RPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR,
mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR,
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
};
static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
mmDMA_IF_W_S_SOB_MIN_WPROT_0,
mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
mmDMA_IF_E_S_SOB_MIN_WPROT_0,
mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
mmDMA_IF_W_N_SOB_MIN_WPROT_0,
mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
mmDMA_IF_E_N_SOB_MIN_WPROT_0,
mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0,
mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0,
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
};
static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
mmDMA_IF_W_S_SOB_MAX_WPROT_0,
mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
mmDMA_IF_E_S_SOB_MAX_WPROT_0,
mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
mmDMA_IF_W_N_SOB_MAX_WPROT_0,
mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
mmDMA_IF_E_N_SOB_MAX_WPROT_0,
mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0,
mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0,
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
};
static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
mmDMA_IF_W_S_SOB_MIN_RPROT_0,
mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
mmDMA_IF_E_S_SOB_MIN_RPROT_0,
mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
mmDMA_IF_W_N_SOB_MIN_RPROT_0,
mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
mmDMA_IF_E_N_SOB_MIN_RPROT_0,
mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0,
mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0,
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
};
static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
mmDMA_IF_W_S_SOB_MAX_RPROT_0,
mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
mmDMA_IF_E_S_SOB_MAX_RPROT_0,
mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
mmDMA_IF_W_N_SOB_MAX_RPROT_0,
mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
mmDMA_IF_E_N_SOB_MAX_RPROT_0,
mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0,
mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0,
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
};
static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW,
mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW,
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
};
static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR,
mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR,
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
};
static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
};
static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
};
static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
};
static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
};
static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
};
static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
};
static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
};
static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0,
mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0,
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0
};
/**
* gaudi_pb_set_block - set the given block as protected
*
* @hdev: pointer to hl_device structure
* @base: block base address
*/
static void gaudi_pb_set_block(struct hl_device *hdev, u64 base)
{
u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
while (pb_addr & 0xFFF) {
WREG32(pb_addr, 0);
pb_addr += 4;
}
}
static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
gaudi_pb_set_block(hdev, mmMME0_ACC_BASE);
gaudi_pb_set_block(hdev, mmMME0_SBAB_BASE);
gaudi_pb_set_block(hdev, mmMME0_PRTN_BASE);
gaudi_pb_set_block(hdev, mmMME1_ACC_BASE);
gaudi_pb_set_block(hdev, mmMME1_SBAB_BASE);
gaudi_pb_set_block(hdev, mmMME1_PRTN_BASE);
gaudi_pb_set_block(hdev, mmMME2_ACC_BASE);
gaudi_pb_set_block(hdev, mmMME2_SBAB_BASE);
gaudi_pb_set_block(hdev, mmMME2_PRTN_BASE);
gaudi_pb_set_block(hdev, mmMME3_ACC_BASE);
gaudi_pb_set_block(hdev, mmMME3_SBAB_BASE);
gaudi_pb_set_block(hdev, mmMME3_PRTN_BASE);
WREG32(mmMME0_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmMME1_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmMME2_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmMME3_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmMME0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmMME2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmMME0_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_CTRL_RESET & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_QM_STALL & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_LOG_SHADOW & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_MIN & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
mask |= 1U << ((mmMME0_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME0_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmMME0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME1_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME1_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME1_CTRL_RESET & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_QM_STALL & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_LOG_SHADOW & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_MIN & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
mask |= 1U << ((mmMME1_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME1_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME1_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME1_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
/* MME 1 is slave, hence its whole QM block is protected (with RR) */
pb_addr = (mmMME2_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_CTRL_RESET & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_QM_STALL & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_LOG_SHADOW & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_MIN & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
mask |= 1U << ((mmMME2_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME2_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmMME2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME3_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME3_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmMME3_CTRL_RESET & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_QM_STALL & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_LOG_SHADOW & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_TH & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_MIN & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
mask |= 1U << ((mmMME3_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME3_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME3_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmMME3_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
/* MME 3 is slave, hence its whole QM block is protected (with RR) */
}
static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
if (!hdev->asic_prop.fw_security_enabled) {
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE);
gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE);
gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE);
}
WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA0_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA1_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA2_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA3_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA4_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA5_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA6_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmDMA7_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmDMA0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset =
((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA0_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA0_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA0_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA1_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA1_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA1_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA2_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA2_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA2_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA3_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA3_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA3_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA4_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA4_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA4_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA5_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA5_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA5_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA6_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA6_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA6_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_CORE_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_CORE_PROT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_SECURE_PROPS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmDMA7_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_ARCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_WR_MAX_AWID & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_WR_AWCACHE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_WR_INFLIGHTS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_CORE_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmDMA7_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_DESC_CNT & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_STS & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
mask |= 1U << ((mmDMA7_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
static void gaudi_init_nic_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
WREG32(mmNIC0_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmNIC0_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmNIC0_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM0_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC0_QM1_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC0_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmNIC1_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmNIC1_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmNIC1_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM0_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC1_QM1_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC1_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmNIC2_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmNIC2_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmNIC2_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM0_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC2_QM1_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC2_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmNIC3_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmNIC3_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmNIC3_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM0_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC3_QM1_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC3_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmNIC4_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmNIC4_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmNIC4_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM0_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmNIC4_QM1_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmNIC4_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
if (!hdev->asic_prop.fw_security_enabled) {
gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE);
gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE);
}
WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC1_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC2_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC3_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC4_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC5_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC6_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
WREG32(mmTPC7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
WREG32(mmTPC7_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_STS1_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_SIZE_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_SIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_SIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_SIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_PI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_PI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_PI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_PI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_CFG1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_PQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_PQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS0_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS1_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS1_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS1_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_STS1_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_CQ_CTL_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_CP_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_DBG_0_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_CP_DBG_0_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_DBG_0_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_DBG_0_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_ARB_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
mask = 1U << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_ARB_STATE_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MSG_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CGM_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CGM_STS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_AXCACHE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1U << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WQ_CREDITS & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_ARUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_ARUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_AWUSER_LO & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_AWUSER_HI & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1U << ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_DBGMEM_RC & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
/**
* gaudi_init_protection_bits - Initialize protection bits of specific registers
*
* @hdev: pointer to hl_device structure
*
* All protection bits are 1 by default, means not protected. Need to set to 0
* each bit that belongs to a protected register.
*
*/
static void gaudi_init_protection_bits(struct hl_device *hdev)
{
/*
* In each 4K block of registers, the last 128 bytes are protection
* bits - total of 1024 bits, one for each register. Each bit is related
* to a specific register, by the order of the registers.
* So in order to calculate the bit that is related to a given register,
* we need to calculate its word offset and then the exact bit inside
* the word (which is 4 bytes).
*
* Register address:
*
* 31 12 11 7 6 2 1 0
* -----------------------------------------------------------------
* | Don't | word | bit location | 0 |
* | care | offset | inside word | |
* -----------------------------------------------------------------
*
* Bits 7-11 represents the word offset inside the 128 bytes.
* Bits 2-6 represents the bit location inside the word.
*
* When a bit is cleared, it means the register it represents can only
* be accessed by a secured entity. When the bit is set, any entity can
* access the register.
*
* The last 4 bytes in the block of the PBs control the security of
* the PBs themselves, so they always need to be configured to be
* secured
*/
if (!hdev->asic_prop.fw_security_enabled) {
gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE);
gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE);
gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE);
gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE);
gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE);
}
gaudi_init_dma_protection_bits(hdev);
gaudi_init_mme_protection_bits(hdev);
gaudi_init_nic_protection_bits(hdev);
gaudi_init_tpc_protection_bits(hdev);
}
static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
{
u32 lbw_rng_start[GAUDI_NUMBER_OF_LBW_RANGES];
u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
int i, j;
lbw_rng_start[0] = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */
lbw_rng_end[0] = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */
lbw_rng_start[1] = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */
lbw_rng_end[1] = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */
lbw_rng_start[2] = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */
lbw_rng_end[2] = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */
lbw_rng_start[3] = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */
lbw_rng_end[3] = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */
lbw_rng_start[4] = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */
lbw_rng_end[4] = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */
lbw_rng_start[5] = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */
lbw_rng_end[5] = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */
lbw_rng_start[6] = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */
lbw_rng_end[6] = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */
lbw_rng_start[7] = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */
lbw_rng_end[7] = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */
lbw_rng_start[8] = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */
lbw_rng_end[8] = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */
lbw_rng_start[9] = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */
lbw_rng_end[9] = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */
for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) {
WREG32(gaudi_rr_lbw_hit_aw_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
WREG32(gaudi_rr_lbw_hit_ar_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
}
for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++)
for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
lbw_rng_start[j]);
WREG32(gaudi_rr_lbw_min_ar_regs[i] + (j << 2),
lbw_rng_start[j]);
WREG32(gaudi_rr_lbw_max_aw_regs[i] + (j << 2),
lbw_rng_end[j]);
WREG32(gaudi_rr_lbw_max_ar_regs[i] + (j << 2),
lbw_rng_end[j]);
}
}
static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
u32 sram_addr_lo = lower_32_bits(SRAM_BASE_ADDR);
u32 sram_addr_hi = upper_32_bits(SRAM_BASE_ADDR);
u32 scratch_addr_lo = lower_32_bits(PSOC_SCRATCHPAD_ADDR);
u32 scratch_addr_hi = upper_32_bits(PSOC_SCRATCHPAD_ADDR);
u32 pcie_fw_addr_lo = lower_32_bits(PCIE_FW_SRAM_ADDR);
u32 pcie_fw_addr_hi = upper_32_bits(PCIE_FW_SRAM_ADDR);
u32 spi_addr_lo = lower_32_bits(SPI_FLASH_BASE_ADDR);
u32 spi_addr_hi = upper_32_bits(SPI_FLASH_BASE_ADDR);
int i;
/* Configure HBW RR:
* 1st range is the DRAM (first 512MB)
* 2nd range is the 1st 128 bytes in SRAM (for tensor DMA). This area
* is defined as read-only for user
* 3rd range is the PSOC scratch-pad
* 4th range is the PCIe F/W SRAM area
* 5th range is the SPI FLASH area
* 6th range is the host
*/
for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
}
for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
WREG32(gaudi_rr_hbw_base_high_aw_regs[i], dram_addr_hi);
WREG32(gaudi_rr_hbw_base_high_ar_regs[i], dram_addr_hi);
WREG32(gaudi_rr_hbw_mask_low_aw_regs[i], 0xE0000000);
WREG32(gaudi_rr_hbw_mask_low_ar_regs[i], 0xE0000000);
WREG32(gaudi_rr_hbw_mask_high_aw_regs[i], 0x3FFFF);
WREG32(gaudi_rr_hbw_mask_high_ar_regs[i], 0x3FFFF);
WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 4, sram_addr_lo);
WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 4, sram_addr_hi);
WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 4, 0xFFFFFF80);
WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 4, 0x3FFFF);
WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 8, scratch_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 8, scratch_addr_lo);
WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 8, scratch_addr_hi);
WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 8, scratch_addr_hi);
WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 8, 0xFFFF0000);
WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 8, 0xFFFF0000);
WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 8, 0x3FFFF);
WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 8, 0x3FFFF);
WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 12, pcie_fw_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 12, pcie_fw_addr_lo);
WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 12, pcie_fw_addr_hi);
WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 12, pcie_fw_addr_hi);
WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 12, 0xFFFF8000);
WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 12, 0xFFFF8000);
WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 12, 0x3FFFF);
WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 12, 0x3FFFF);
WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 16, spi_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 16, spi_addr_lo);
WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 16, spi_addr_hi);
WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 16, spi_addr_hi);
WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 16, 0xFE000000);
WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 16, 0xFE000000);
WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 16, 0x3FFFF);
WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 16, 0x3FFFF);
if (gaudi->hw_cap_initialized & HW_CAP_MMU)
continue;
/* Protect HOST */
WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 20, 0);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 20, 0);
WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 20, 0);
WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 20, 0);
WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 20, 0);
WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 20, 0);
WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 20, 0xFFF80);
WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 20, 0xFFF80);
}
}
/**
* gaudi_init_security - Initialize security model
*
* @hdev: pointer to hl_device structure
*
* Initialize the security model of the device
* That includes range registers and protection bit per register
*
*/
void gaudi_init_security(struct hl_device *hdev)
{
/* Due to H/W errata GAUDI0500, need to override default security
* property configuration of MME SBAB and ACC to be non-privileged and
* non-secured
*/
if (!hdev->asic_prop.fw_security_enabled) {
WREG32(mmMME0_SBAB_PROT, 0x2);
WREG32(mmMME0_ACC_PROT, 0x2);
WREG32(mmMME1_SBAB_PROT, 0x2);
WREG32(mmMME1_ACC_PROT, 0x2);
WREG32(mmMME2_SBAB_PROT, 0x2);
WREG32(mmMME2_ACC_PROT, 0x2);
WREG32(mmMME3_SBAB_PROT, 0x2);
WREG32(mmMME3_ACC_PROT, 0x2);
/*
* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB
*/
WREG32(0xC01B28, 0x1);
}
gaudi_init_range_registers_lbw(hdev);
gaudi_init_range_registers_hbw(hdev);
gaudi_init_protection_bits(hdev);
}
void gaudi_ack_protection_bits_errors(struct hl_device *hdev)
{
}
| linux-master | drivers/accel/habanalabs/gaudi/gaudi_security.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "gaudiP.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include "../include/hw_ip/mmu/mmu_v1_1.h"
#include "../include/gaudi/gaudi_masks.h"
#include "../include/gaudi/gaudi_fw_if.h"
#include "../include/gaudi/gaudi_reg_map.h"
#include "../include/gaudi/gaudi_async_ids_map_extended.h"
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/hwmon.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
/*
* Gaudi security scheme:
*
* 1. Host is protected by:
* - Range registers
* - MMU
*
* 2. DDR is protected by:
* - Range registers (protect the first 512MB)
*
* 3. Configuration is protected by:
* - Range registers
* - Protection bits
*
* MMU is always enabled.
*
* QMAN DMA channels 0,1 (PCI DMAN):
* - DMA is not secured.
* - PQ and CQ are secured.
* - CP is secured: The driver needs to parse CB but WREG should be allowed
* because of TDMA (tensor DMA). Hence, WREG is always not
* secured.
*
* When the driver needs to use DMA it will check that Gaudi is idle, set DMA
* channel 0 to be secured, execute the DMA and change it back to not secured.
* Currently, the driver doesn't use the DMA while there are compute jobs
* running.
*
* The current use cases for the driver to use the DMA are:
* - Clear SRAM on context switch (happens on context switch when device is
* idle)
* - MMU page tables area clear (happens on init)
*
* QMAN DMA 2-7, TPC, MME, NIC:
* PQ is secured and is located on the Host (HBM CON TPC3 bug)
* CQ, CP and the engine are not secured
*
*/
#define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
#define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
#define GAUDI_TPC_FW_FILE "habanalabs/gaudi/gaudi_tpc.bin"
#define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
#define GAUDI_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
#define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */
#define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */
#define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
#define GAUDI_MAX_STRING_LEN 20
#define GAUDI_CB_POOL_CB_CNT 512
#define GAUDI_CB_POOL_CB_SIZE 0x20000 /* 128KB */
#define GAUDI_ALLOC_CPU_MEM_RETRY_CNT 3
#define GAUDI_NUM_OF_TPC_INTR_CAUSE 20
#define GAUDI_NUM_OF_QM_ERR_CAUSE 16
#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
#define GAUDI_ARB_WDT_TIMEOUT 0xEE6b27FF /* 8 seconds */
#define HBM_SCRUBBING_TIMEOUT_US 1000000 /* 1s */
#define BIN_REG_STRING_SIZE sizeof("0b10101010101010101010101010101010")
#define MONITOR_SOB_STRING_SIZE 256
static u32 gaudi_stream_master[GAUDI_STREAM_MASTER_ARR_SIZE] = {
GAUDI_QUEUE_ID_DMA_0_0,
GAUDI_QUEUE_ID_DMA_0_1,
GAUDI_QUEUE_ID_DMA_0_2,
GAUDI_QUEUE_ID_DMA_0_3,
GAUDI_QUEUE_ID_DMA_1_0,
GAUDI_QUEUE_ID_DMA_1_1,
GAUDI_QUEUE_ID_DMA_1_2,
GAUDI_QUEUE_ID_DMA_1_3
};
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
[GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
[GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
[GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
[GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
[GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
[GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_5,
[GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_6,
[GAUDI_HBM_DMA_6] = GAUDI_ENGINE_ID_DMA_7
};
static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
[0] = GAUDI_QUEUE_ID_DMA_0_0,
[1] = GAUDI_QUEUE_ID_DMA_0_1,
[2] = GAUDI_QUEUE_ID_DMA_0_2,
[3] = GAUDI_QUEUE_ID_DMA_0_3,
[4] = GAUDI_QUEUE_ID_DMA_1_0,
[5] = GAUDI_QUEUE_ID_DMA_1_1,
[6] = GAUDI_QUEUE_ID_DMA_1_2,
[7] = GAUDI_QUEUE_ID_DMA_1_3,
};
static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_WREG_32] = sizeof(struct packet_wreg32),
[PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
[PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
[PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
[PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
[PACKET_REPEAT] = sizeof(struct packet_repeat),
[PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
[PACKET_FENCE] = sizeof(struct packet_fence),
[PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
[PACKET_NOP] = sizeof(struct packet_nop),
[PACKET_STOP] = sizeof(struct packet_stop),
[PACKET_ARB_POINT] = sizeof(struct packet_arb_point),
[PACKET_WAIT] = sizeof(struct packet_wait),
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
case PACKET_WREG_32:
case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_CP_DMA:
case PACKET_REPEAT:
case PACKET_MSG_PROT:
case PACKET_FENCE:
case PACKET_LIN_DMA:
case PACKET_NOP:
case PACKET_STOP:
case PACKET_ARB_POINT:
case PACKET_WAIT:
case PACKET_LOAD_AND_EXE:
return true;
default:
return false;
}
}
static const char * const
gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
"tpc_address_exceed_slm",
"tpc_div_by_0",
"tpc_spu_mac_overflow",
"tpc_spu_addsub_overflow",
"tpc_spu_abs_overflow",
"tpc_spu_fp_dst_nan_inf",
"tpc_spu_fp_dst_denorm",
"tpc_vpu_mac_overflow",
"tpc_vpu_addsub_overflow",
"tpc_vpu_abs_overflow",
"tpc_vpu_fp_dst_nan_inf",
"tpc_vpu_fp_dst_denorm",
"tpc_assertions",
"tpc_illegal_instruction",
"tpc_pc_wrap_around",
"tpc_qm_sw_err",
"tpc_hbw_rresp_err",
"tpc_hbw_bresp_err",
"tpc_lbw_rresp_err",
"tpc_lbw_bresp_err"
};
static const char * const
gaudi_qman_error_cause[GAUDI_NUM_OF_QM_ERR_CAUSE] = {
"PQ AXI HBW error",
"CQ AXI HBW error",
"CP AXI HBW error",
"CP error due to undefined OPCODE",
"CP encountered STOP OPCODE",
"CP AXI LBW error",
"CP WRREG32 or WRBULK returned error",
"N/A",
"FENCE 0 inc over max value and clipped",
"FENCE 1 inc over max value and clipped",
"FENCE 2 inc over max value and clipped",
"FENCE 3 inc over max value and clipped",
"FENCE 0 dec under min value and clipped",
"FENCE 1 dec under min value and clipped",
"FENCE 2 dec under min value and clipped",
"FENCE 3 dec under min value and clipped"
};
static const char * const
gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
"Choice push while full error",
"Choice Q watchdog error",
"MSG AXI LBW returned with error"
};
static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_2 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_3 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_0 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_1 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_2 */
QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_3 */
QUEUE_TYPE_CPU, /* GAUDI_QUEUE_ID_CPU_PQ */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_3 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_0 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_1 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_2 */
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
{ .id = 0, .name = "SYNC_OBJ_DMA_DOWN_FEEDBACK" },
{ .id = 1, .name = "SYNC_OBJ_DMA_UP_FEEDBACK" },
{ .id = 2, .name = "SYNC_OBJ_DMA_STATIC_DRAM_SRAM_FEEDBACK" },
{ .id = 3, .name = "SYNC_OBJ_DMA_SRAM_DRAM_FEEDBACK" },
{ .id = 4, .name = "SYNC_OBJ_FIRST_COMPUTE_FINISH" },
{ .id = 5, .name = "SYNC_OBJ_HOST_DRAM_DONE" },
{ .id = 6, .name = "SYNC_OBJ_DBG_CTR_DEPRECATED" },
{ .id = 7, .name = "SYNC_OBJ_DMA_ACTIVATIONS_DRAM_SRAM_FEEDBACK" },
{ .id = 8, .name = "SYNC_OBJ_ENGINE_SEM_MME_0" },
{ .id = 9, .name = "SYNC_OBJ_ENGINE_SEM_MME_1" },
{ .id = 10, .name = "SYNC_OBJ_ENGINE_SEM_TPC_0" },
{ .id = 11, .name = "SYNC_OBJ_ENGINE_SEM_TPC_1" },
{ .id = 12, .name = "SYNC_OBJ_ENGINE_SEM_TPC_2" },
{ .id = 13, .name = "SYNC_OBJ_ENGINE_SEM_TPC_3" },
{ .id = 14, .name = "SYNC_OBJ_ENGINE_SEM_TPC_4" },
{ .id = 15, .name = "SYNC_OBJ_ENGINE_SEM_TPC_5" },
{ .id = 16, .name = "SYNC_OBJ_ENGINE_SEM_TPC_6" },
{ .id = 17, .name = "SYNC_OBJ_ENGINE_SEM_TPC_7" },
{ .id = 18, .name = "SYNC_OBJ_ENGINE_SEM_DMA_1" },
{ .id = 19, .name = "SYNC_OBJ_ENGINE_SEM_DMA_2" },
{ .id = 20, .name = "SYNC_OBJ_ENGINE_SEM_DMA_3" },
{ .id = 21, .name = "SYNC_OBJ_ENGINE_SEM_DMA_4" },
{ .id = 22, .name = "SYNC_OBJ_ENGINE_SEM_DMA_5" },
{ .id = 23, .name = "SYNC_OBJ_ENGINE_SEM_DMA_6" },
{ .id = 24, .name = "SYNC_OBJ_ENGINE_SEM_DMA_7" },
{ .id = 25, .name = "SYNC_OBJ_DBG_CTR_0" },
{ .id = 26, .name = "SYNC_OBJ_DBG_CTR_1" },
};
static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
{ .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
{ .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
{ .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
{ .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
{ .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
{ .id = 206, .name = "MON_OBJ_TPC_2_CLK_GATE" },
{ .id = 207, .name = "MON_OBJ_TPC_3_CLK_GATE" },
{ .id = 208, .name = "MON_OBJ_TPC_4_CLK_GATE" },
{ .id = 209, .name = "MON_OBJ_TPC_5_CLK_GATE" },
{ .id = 210, .name = "MON_OBJ_TPC_6_CLK_GATE" },
{ .id = 211, .name = "MON_OBJ_TPC_7_CLK_GATE" },
};
static s64 gaudi_state_dump_specs_props[] = {
[SP_SYNC_OBJ_BASE_ADDR] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0,
[SP_NEXT_SYNC_OBJ_ADDR] = NEXT_SYNC_OBJ_ADDR_INTERVAL,
[SP_SYNC_OBJ_AMOUNT] = NUM_OF_SOB_IN_BLOCK,
[SP_MON_OBJ_WR_ADDR_LOW] =
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0,
[SP_MON_OBJ_WR_ADDR_HIGH] =
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0,
[SP_MON_OBJ_WR_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0,
[SP_MON_OBJ_ARM_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0,
[SP_MON_OBJ_STATUS] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0,
[SP_MONITORS_AMOUNT] = NUM_OF_MONITORS_IN_BLOCK,
[SP_TPC0_CMDQ] = mmTPC0_QM_GLBL_CFG0,
[SP_TPC0_CFG_SO] = mmTPC0_CFG_QM_SYNC_OBJECT_ADDR,
[SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
[SP_MME_CMDQ] = mmMME0_QM_GLBL_CFG0,
[SP_MME_CFG_SO] = mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL,
[SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
[SP_DMA_CMDQ] = mmDMA0_QM_GLBL_CFG0,
[SP_DMA_CFG_SO] = mmDMA0_CORE_WR_COMP_ADDR_LO,
[SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
[SP_NUM_OF_MME_ENGINES] = NUM_OF_MME_ENGINES,
[SP_SUB_MME_ENG_NUM] = NUM_OF_MME_SUB_ENGINES,
[SP_NUM_OF_DMA_ENGINES] = NUM_OF_DMA_ENGINES,
[SP_NUM_OF_TPC_ENGINES] = NUM_OF_TPC_ENGINES,
[SP_ENGINE_NUM_OF_QUEUES] = NUM_OF_QUEUES,
[SP_ENGINE_NUM_OF_STREAMS] = NUM_OF_STREAMS,
[SP_ENGINE_NUM_OF_FENCES] = NUM_OF_FENCES,
[SP_FENCE0_CNT_OFFSET] =
mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
[SP_FENCE0_RDATA_OFFSET] =
mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
[SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
[SP_NUM_CORES] = 1,
};
static const int gaudi_queue_id_to_engine_id[] = {
[GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3] = GAUDI_ENGINE_ID_DMA_0,
[GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3] = GAUDI_ENGINE_ID_DMA_1,
[GAUDI_QUEUE_ID_CPU_PQ] = GAUDI_ENGINE_ID_SIZE,
[GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3] = GAUDI_ENGINE_ID_DMA_2,
[GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3] = GAUDI_ENGINE_ID_DMA_3,
[GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3] = GAUDI_ENGINE_ID_DMA_4,
[GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3] = GAUDI_ENGINE_ID_DMA_5,
[GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3] = GAUDI_ENGINE_ID_DMA_6,
[GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3] = GAUDI_ENGINE_ID_DMA_7,
[GAUDI_QUEUE_ID_MME_0_0...GAUDI_QUEUE_ID_MME_0_3] = GAUDI_ENGINE_ID_MME_0,
[GAUDI_QUEUE_ID_MME_1_0...GAUDI_QUEUE_ID_MME_1_3] = GAUDI_ENGINE_ID_MME_2,
[GAUDI_QUEUE_ID_TPC_0_0...GAUDI_QUEUE_ID_TPC_0_3] = GAUDI_ENGINE_ID_TPC_0,
[GAUDI_QUEUE_ID_TPC_1_0...GAUDI_QUEUE_ID_TPC_1_3] = GAUDI_ENGINE_ID_TPC_1,
[GAUDI_QUEUE_ID_TPC_2_0...GAUDI_QUEUE_ID_TPC_2_3] = GAUDI_ENGINE_ID_TPC_2,
[GAUDI_QUEUE_ID_TPC_3_0...GAUDI_QUEUE_ID_TPC_3_3] = GAUDI_ENGINE_ID_TPC_3,
[GAUDI_QUEUE_ID_TPC_4_0...GAUDI_QUEUE_ID_TPC_4_3] = GAUDI_ENGINE_ID_TPC_4,
[GAUDI_QUEUE_ID_TPC_5_0...GAUDI_QUEUE_ID_TPC_5_3] = GAUDI_ENGINE_ID_TPC_5,
[GAUDI_QUEUE_ID_TPC_6_0...GAUDI_QUEUE_ID_TPC_6_3] = GAUDI_ENGINE_ID_TPC_6,
[GAUDI_QUEUE_ID_TPC_7_0...GAUDI_QUEUE_ID_TPC_7_3] = GAUDI_ENGINE_ID_TPC_7,
[GAUDI_QUEUE_ID_NIC_0_0...GAUDI_QUEUE_ID_NIC_0_3] = GAUDI_ENGINE_ID_NIC_0,
[GAUDI_QUEUE_ID_NIC_1_0...GAUDI_QUEUE_ID_NIC_1_3] = GAUDI_ENGINE_ID_NIC_1,
[GAUDI_QUEUE_ID_NIC_2_0...GAUDI_QUEUE_ID_NIC_2_3] = GAUDI_ENGINE_ID_NIC_2,
[GAUDI_QUEUE_ID_NIC_3_0...GAUDI_QUEUE_ID_NIC_3_3] = GAUDI_ENGINE_ID_NIC_3,
[GAUDI_QUEUE_ID_NIC_4_0...GAUDI_QUEUE_ID_NIC_4_3] = GAUDI_ENGINE_ID_NIC_4,
[GAUDI_QUEUE_ID_NIC_5_0...GAUDI_QUEUE_ID_NIC_5_3] = GAUDI_ENGINE_ID_NIC_5,
[GAUDI_QUEUE_ID_NIC_6_0...GAUDI_QUEUE_ID_NIC_6_3] = GAUDI_ENGINE_ID_NIC_6,
[GAUDI_QUEUE_ID_NIC_7_0...GAUDI_QUEUE_ID_NIC_7_3] = GAUDI_ENGINE_ID_NIC_7,
[GAUDI_QUEUE_ID_NIC_8_0...GAUDI_QUEUE_ID_NIC_8_3] = GAUDI_ENGINE_ID_NIC_8,
[GAUDI_QUEUE_ID_NIC_9_0...GAUDI_QUEUE_ID_NIC_9_3] = GAUDI_ENGINE_ID_NIC_9,
};
/* The order here is opposite to the order of the indexing in the h/w.
* i.e. SYNC_MGR_W_S is actually 0, SYNC_MGR_E_S is 1, etc.
*/
static const char * const gaudi_sync_manager_names[] = {
"SYNC_MGR_E_N",
"SYNC_MGR_W_N",
"SYNC_MGR_E_S",
"SYNC_MGR_W_S",
NULL
};
struct ecc_info_extract_params {
u64 block_address;
u32 num_memories;
bool derr;
};
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
u64 phys_addr);
static int gaudi_send_job_on_qman0(struct hl_device *hdev,
struct hl_cs_job *job);
static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val);
static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
static inline enum hl_collective_mode
get_collective_mode(struct hl_device *hdev, u32 queue_id)
{
if (gaudi_queue_type[queue_id] == QUEUE_TYPE_EXT)
return HL_COLLECTIVE_MASTER;
if (queue_id >= GAUDI_QUEUE_ID_DMA_5_0 &&
queue_id <= GAUDI_QUEUE_ID_DMA_5_3)
return HL_COLLECTIVE_SLAVE;
if (queue_id >= GAUDI_QUEUE_ID_TPC_7_0 &&
queue_id <= GAUDI_QUEUE_ID_TPC_7_3)
return HL_COLLECTIVE_SLAVE;
if (queue_id >= GAUDI_QUEUE_ID_NIC_0_0 &&
queue_id <= GAUDI_QUEUE_ID_NIC_9_3)
return HL_COLLECTIVE_SLAVE;
return HL_COLLECTIVE_NOT_SUPPORTED;
}
static inline void set_default_power_values(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (hdev->card_type == cpucp_card_type_pmc) {
prop->max_power_default = MAX_POWER_DEFAULT_PMC;
if (prop->fw_security_enabled)
prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC;
else
prop->dc_power_default = DC_POWER_DEFAULT_PMC;
} else {
prop->max_power_default = MAX_POWER_DEFAULT_PCI;
prop->dc_power_default = DC_POWER_DEFAULT_PCI;
}
}
static int gaudi_set_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 num_sync_stream_queues = 0;
int i;
prop->max_queues = GAUDI_QUEUE_ID_SIZE;
prop->hw_queues_props = kcalloc(prop->max_queues,
sizeof(struct hw_queue_properties),
GFP_KERNEL);
if (!prop->hw_queues_props)
return -ENOMEM;
for (i = 0 ; i < prop->max_queues ; i++) {
if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].supports_sync_stream = 1;
prop->hw_queues_props[i].cb_alloc_flags =
CB_ALLOC_KERNEL;
num_sync_stream_queues++;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
prop->hw_queues_props[i].supports_sync_stream = 0;
prop->hw_queues_props[i].cb_alloc_flags =
CB_ALLOC_KERNEL;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].supports_sync_stream = 0;
prop->hw_queues_props[i].cb_alloc_flags =
CB_ALLOC_USER;
}
prop->hw_queues_props[i].collective_mode =
get_collective_mode(hdev, i);
}
prop->cache_line_size = DEVICE_CACHE_LINE_SIZE;
prop->cfg_base_address = CFG_BASE;
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->host_base_address = HOST_PHYS_BASE;
prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->completion_mode = HL_COMPLETION_MODE_JOB;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
/* 2 SOBs per internal queue stream are reserved for collective */
prop->sync_stream_first_sob =
ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR)
* QMAN_STREAMS * HL_RSVD_SOBS;
/* 1 monitor per internal queue stream are reserved for collective
* 2 monitors per external queue stream are reserved for collective
*/
prop->sync_stream_first_mon =
(NUMBER_OF_COLLECTIVE_QUEUES * QMAN_STREAMS) +
(NUMBER_OF_EXT_HW_QUEUES * 2);
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = GAUDI_HBM_SIZE_32GB;
prop->dram_end_address = prop->dram_base_address + prop->dram_size;
prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
prop->sram_base_address = SRAM_BASE_ADDR;
prop->sram_size = SRAM_SIZE;
prop->sram_end_address = prop->sram_base_address + prop->sram_size;
prop->sram_user_base_address =
prop->sram_base_address + SRAM_USER_BASE_OFFSET;
prop->mmu_cache_mng_addr = MMU_CACHE_MNG_ADDR;
prop->mmu_cache_mng_size = MMU_CACHE_MNG_SIZE;
prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
if (hdev->pldm)
prop->mmu_pgt_size = 0x800000; /* 8MB */
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = false;
prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT;
prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT;
prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT;
prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT;
prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT;
prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK;
prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK;
prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK;
prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK;
prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK;
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr =
(VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
prop->pmmu.page_size = PAGE_SIZE_4KB;
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
prop->pmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu));
prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
prop->dmmu.end_addr = VA_HOST_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI_EVENT_SIZE;
prop->max_num_of_engines = GAUDI_ENGINE_ID_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
set_default_power_values(hdev);
prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
prop->first_available_user_sob[HL_GAUDI_WS_DCORE] =
prop->sync_stream_first_sob +
(num_sync_stream_queues * HL_RSVD_SOBS);
prop->first_available_user_mon[HL_GAUDI_WS_DCORE] =
prop->sync_stream_first_mon +
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = USHRT_MAX;
prop->tpc_interrupt_id = USHRT_MAX;
/* single msi */
prop->eq_interrupt_id = 0;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
prop->fw_cpu_boot_dev_sts0_valid = false;
prop->fw_cpu_boot_dev_sts1_valid = false;
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
prop->clk_pll_index = HL_GAUDI_MME_PLL;
prop->max_freq_value = GAUDI_MAX_CLK_FREQ;
prop->use_get_power_for_reset_history = true;
prop->configurable_stop_on_err = true;
prop->set_max_power_on_device_init = true;
prop->dma_mask = 48;
prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL;
return 0;
}
static int gaudi_pci_bars_map(struct hl_device *hdev)
{
static const char * const name[] = {"SRAM", "CFG", "HBM"};
bool is_wc[3] = {false, false, true};
int rc;
rc = hl_pci_bars_map(hdev, name, is_wc);
if (rc)
return rc;
hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] +
(CFG_BASE - SPI_FLASH_BASE_ADDR);
return 0;
}
static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr))
return old_addr;
if (hdev->asic_prop.iatu_done_by_fw)
return U64_MAX;
/* Inbound Region 2 - Bar 4 - Point to HBM */
pci_region.mode = PCI_BAR_MATCH_MODE;
pci_region.bar = HBM_BAR_ID;
pci_region.addr = addr;
rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
if (rc)
return U64_MAX;
if (gaudi) {
old_addr = gaudi->hbm_bar_cur_addr;
gaudi->hbm_bar_cur_addr = addr;
}
return old_addr;
}
static int gaudi_init_iatu(struct hl_device *hdev)
{
struct hl_inbound_pci_region inbound_region;
struct hl_outbound_pci_region outbound_region;
int rc;
if (hdev->asic_prop.iatu_done_by_fw)
return 0;
/* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = SRAM_BAR_ID;
inbound_region.addr = SRAM_BASE_ADDR;
rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
if (rc)
goto done;
/* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = CFG_BAR_ID;
inbound_region.addr = SPI_FLASH_BASE_ADDR;
rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
if (rc)
goto done;
/* Inbound Region 2 - Bar 4 - Point to HBM */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = HBM_BAR_ID;
inbound_region.addr = DRAM_PHYS_BASE;
rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
if (rc)
goto done;
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE;
outbound_region.size = HOST_PHYS_SIZE;
rc = hl_pci_set_outbound_region(hdev, &outbound_region);
done:
return rc;
}
static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
{
return RREG32(mmHW_STATE);
}
static int gaudi_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
resource_size_t pci_bar_size;
u32 fw_boot_status;
int rc;
rc = gaudi_set_fixed_properties(hdev);
if (rc) {
dev_err(hdev->dev, "Failed setting fixed properties\n");
return rc;
}
/* Check BAR sizes */
pci_bar_size = pci_resource_len(pdev, SRAM_BAR_ID);
if (pci_bar_size != SRAM_BAR_SIZE) {
dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
SRAM_BAR_ID, &pci_bar_size, SRAM_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
pci_bar_size = pci_resource_len(pdev, CFG_BAR_ID);
if (pci_bar_size != CFG_BAR_SIZE) {
dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) {
hdev->asic_prop.iatu_done_by_fw = true;
/*
* GIC-security-bit can ONLY be set by CPUCP, so in this stage
* decision can only be taken based on PCI ID security.
*/
hdev->asic_prop.gic_interrupts_enable = false;
goto pci_init;
}
rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
&fw_boot_status);
if (rc)
goto free_queue_props;
/* Check whether FW is configuring iATU */
if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
(fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
hdev->asic_prop.iatu_done_by_fw = true;
pci_init:
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
/* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
rc = hdev->asic_funcs->hw_fini(hdev, true, false);
if (rc) {
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
}
return 0;
pci_fini:
hl_pci_fini(hdev);
free_queue_props:
kfree(hdev->asic_prop.hw_queues_props);
return rc;
}
static int gaudi_early_fini(struct hl_device *hdev)
{
kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
}
/**
* gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
*
*/
static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
int rc;
if ((hdev->fw_components & FW_TYPE_LINUX) &&
(prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
if (rc)
return rc;
freq = pll_freq_arr[2];
} else {
/* Backward compatibility */
div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
nr = RREG32(mmPSOC_CPU_PLL_NR);
nf = RREG32(mmPSOC_CPU_PLL_NF);
od = RREG32(mmPSOC_CPU_PLL_OD);
if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
if (div_sel == DIV_SEL_REF_CLK)
freq = PLL_REF_CLK;
else
freq = PLL_REF_CLK / (div_fctr + 1);
} else if (div_sel == DIV_SEL_PLL_CLK ||
div_sel == DIV_SEL_DIVIDED_PLL) {
pll_clk = PLL_REF_CLK * (nf + 1) /
((nr + 1) * (od + 1));
if (div_sel == DIV_SEL_PLL_CLK)
freq = pll_clk;
else
freq = pll_clk / (div_fctr + 1);
} else {
dev_warn(hdev->dev, "Received invalid div select value: %#x", div_sel);
freq = 0;
}
}
prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
static int _gaudi_init_tpc_mem(struct hl_device *hdev,
dma_addr_t tpc_kernel_src_addr, u32 tpc_kernel_size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct packet_lin_dma *init_tpc_mem_pkt;
struct hl_cs_job *job;
struct hl_cb *cb;
u64 dst_addr;
u32 cb_size, ctl;
u8 tpc_id;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
init_tpc_mem_pkt = cb->kernel_address;
cb_size = sizeof(*init_tpc_mem_pkt);
memset(init_tpc_mem_pkt, 0, cb_size);
init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size);
ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
/* TPC_CMD is configured with I$ prefetch enabled, so address should be aligned to 8KB */
dst_addr = FIELD_PREP(GAUDI_PKT_LIN_DMA_DST_ADDR_MASK,
round_up(prop->sram_user_base_address, SZ_8K));
init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto release_cb;
}
job->id = 0;
job->user_cb = cb;
atomic_inc(&job->user_cb->cs_cnt);
job->user_cb_size = cb_size;
job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
hl_debugfs_add_job(hdev, job);
rc = gaudi_send_job_on_qman0(hdev, job);
if (rc)
goto free_job;
for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
rc = gaudi_run_tpc_kernel(hdev, dst_addr, tpc_id);
if (rc)
break;
}
free_job:
hl_userptr_delete_list(hdev, &job->userptr_list);
hl_debugfs_remove_job(hdev, job);
kfree(job);
atomic_dec(&cb->cs_cnt);
release_cb:
hl_cb_put(cb);
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
/*
* gaudi_init_tpc_mem() - Initialize TPC memories.
* @hdev: Pointer to hl_device structure.
*
* Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
*
* Return: 0 for success, negative value for error.
*/
static int gaudi_init_tpc_mem(struct hl_device *hdev)
{
const struct firmware *fw;
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
int rc, count = 5;
again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
if (rc == -EINTR && count-- > 0) {
msleep(50);
goto again;
}
if (rc) {
dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
fw_size = fw->size;
cpu_addr = hl_asic_dma_alloc_coherent(hdev, fw_size, &dma_handle, GFP_KERNEL | __GFP_ZERO);
if (!cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate %zu of dma memory for TPC kernel\n",
fw_size);
rc = -ENOMEM;
goto out;
}
memcpy(cpu_addr, fw->data, fw_size);
rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
hl_asic_dma_free_coherent(hdev, fw->size, cpu_addr, dma_handle);
out:
release_firmware(fw);
return rc;
}
static void gaudi_collective_map_sobs(struct hl_device *hdev, u32 stream)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_collective_properties *prop = &gaudi->collective_props;
struct hl_hw_queue *q;
u32 i, sob_id, sob_group_id, queue_id;
/* Iterate through SOB groups and assign a SOB for each slave queue */
sob_group_id =
stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream];
sob_id = prop->hw_sob_group[sob_group_id].base_sob_id;
queue_id = GAUDI_QUEUE_ID_NIC_0_0 + stream;
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
q = &hdev->kernel_queues[queue_id + (4 * i)];
q->sync_stream_prop.collective_sob_id = sob_id + i;
}
/* Both DMA5 and TPC7 use the same resources since only a single
* engine need to participate in the reduction process
*/
queue_id = GAUDI_QUEUE_ID_DMA_5_0 + stream;
q = &hdev->kernel_queues[queue_id];
q->sync_stream_prop.collective_sob_id =
sob_id + NIC_NUMBER_OF_ENGINES;
queue_id = GAUDI_QUEUE_ID_TPC_7_0 + stream;
q = &hdev->kernel_queues[queue_id];
q->sync_stream_prop.collective_sob_id =
sob_id + NIC_NUMBER_OF_ENGINES;
}
static void gaudi_sob_group_hw_reset(struct kref *ref)
{
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
int i;
for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
WREG32((mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
(hw_sob_group->base_sob_id * 4) + (i * 4)), 0);
kref_init(&hw_sob_group->kref);
}
static void gaudi_sob_group_reset_error(struct kref *ref)
{
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
dev_crit(hdev->dev,
"SOB release shouldn't be called here, base_sob_id: %d\n",
hw_sob_group->base_sob_id);
}
static void gaudi_collective_mstr_sob_mask_set(struct gaudi_device *gaudi)
{
struct gaudi_collective_properties *prop;
int i;
prop = &gaudi->collective_props;
memset(prop->mstr_sob_mask, 0, sizeof(prop->mstr_sob_mask));
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++)
if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |=
BIT(i % HL_MAX_SOBS_PER_MONITOR);
/* Set collective engine bit */
prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |=
BIT(i % HL_MAX_SOBS_PER_MONITOR);
}
static int gaudi_collective_init(struct hl_device *hdev)
{
u32 i, sob_id, reserved_sobs_per_group;
struct gaudi_collective_properties *prop;
struct gaudi_device *gaudi;
gaudi = hdev->asic_specific;
prop = &gaudi->collective_props;
sob_id = hdev->asic_prop.collective_first_sob;
/* First sob in group must be aligned to HL_MAX_SOBS_PER_MONITOR */
reserved_sobs_per_group =
ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR);
/* Init SOB groups */
for (i = 0 ; i < NUM_SOB_GROUPS; i++) {
prop->hw_sob_group[i].hdev = hdev;
prop->hw_sob_group[i].base_sob_id = sob_id;
sob_id += reserved_sobs_per_group;
gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref);
}
for (i = 0 ; i < QMAN_STREAMS; i++) {
prop->next_sob_group_val[i] = 1;
prop->curr_sob_group_idx[i] = 0;
gaudi_collective_map_sobs(hdev, i);
}
gaudi_collective_mstr_sob_mask_set(gaudi);
return 0;
}
static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_group)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_collective_properties *cprop = &gaudi->collective_props;
kref_put(&cprop->hw_sob_group[sob_group].kref,
gaudi_sob_group_hw_reset);
}
static void gaudi_collective_master_init_job(struct hl_device *hdev,
struct hl_cs_job *job, u32 stream, u32 sob_group_offset)
{
u32 master_sob_base, master_monitor, queue_id, cb_size = 0;
struct gaudi_collective_properties *cprop;
struct hl_gen_wait_properties wait_prop;
struct hl_sync_stream_properties *prop;
struct gaudi_device *gaudi;
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
queue_id = job->hw_queue_id;
prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
master_sob_base =
cprop->hw_sob_group[sob_group_offset].base_sob_id;
master_monitor = prop->collective_mstr_mon_id[0];
cprop->hw_sob_group[sob_group_offset].queue_id = queue_id;
dev_dbg(hdev->dev,
"Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
master_sob_base, cprop->mstr_sob_mask[0],
cprop->next_sob_group_val[stream],
master_monitor, queue_id);
wait_prop.data = (void *) job->patched_cb;
wait_prop.sob_base = master_sob_base;
wait_prop.sob_mask = cprop->mstr_sob_mask[0];
wait_prop.sob_val = cprop->next_sob_group_val[stream];
wait_prop.mon_id = master_monitor;
wait_prop.q_idx = queue_id;
wait_prop.size = cb_size;
cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
master_sob_base += HL_MAX_SOBS_PER_MONITOR;
master_monitor = prop->collective_mstr_mon_id[1];
dev_dbg(hdev->dev,
"Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
master_sob_base, cprop->mstr_sob_mask[1],
cprop->next_sob_group_val[stream],
master_monitor, queue_id);
wait_prop.sob_base = master_sob_base;
wait_prop.sob_mask = cprop->mstr_sob_mask[1];
wait_prop.mon_id = master_monitor;
wait_prop.size = cb_size;
cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
}
static void gaudi_collective_slave_init_job(struct hl_device *hdev,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
struct hl_gen_wait_properties wait_prop;
struct hl_sync_stream_properties *prop;
u32 queue_id, cb_size = 0;
queue_id = job->hw_queue_id;
prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
if (job->cs->encaps_signals) {
/* use the encaps signal handle store earlier in the flow
* and set the SOB information from the encaps
* signals handle
*/
hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job,
cs_cmpl);
dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n",
job->cs->sequence,
cs_cmpl->hw_sob->sob_id,
cs_cmpl->sob_val);
}
/* Add to wait CBs using slave monitor */
wait_prop.data = (void *) job->user_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
wait_prop.sob_mask = 0x1;
wait_prop.sob_val = cs_cmpl->sob_val;
wait_prop.mon_id = prop->collective_slave_mon_id;
wait_prop.q_idx = queue_id;
wait_prop.size = cb_size;
dev_dbg(hdev->dev,
"Generate slave wait CB, sob %d, val:%x, mon %d, q %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
prop->collective_slave_mon_id, queue_id);
cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
dev_dbg(hdev->dev,
"generate signal CB, sob_id: %d, sob val: 1, q_idx: %d\n",
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
prop->collective_sob_id, cb_size, false);
}
static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
{
struct hl_cs_compl *signal_cs_cmpl =
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
struct hl_cs_compl *cs_cmpl =
container_of(cs->fence, struct hl_cs_compl, base_fence);
struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
struct gaudi_collective_properties *cprop;
u32 stream, queue_id, sob_group_offset;
struct gaudi_device *gaudi;
struct hl_device *hdev;
struct hl_cs_job *job;
struct hl_ctx *ctx;
ctx = cs->ctx;
hdev = ctx->hdev;
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
if (cs->encaps_signals) {
cs_cmpl->hw_sob = handle->hw_sob;
/* at this checkpoint we only need the hw_sob pointer
* for the completion check before start going over the jobs
* of the master/slaves, the sob_value will be taken later on
* in gaudi_collective_slave_init_job depends on each
* job wait offset value.
*/
cs_cmpl->sob_val = 0;
} else {
/* copy the SOB id and value of the signal CS */
cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
}
/* check again if the signal cs already completed.
* if yes then don't send any wait cs since the hw_sob
* could be in reset already. if signal is not completed
* then get refcount to hw_sob to prevent resetting the sob
* while wait cs is not submitted.
* note that this check is protected by two locks,
* hw queue lock and completion object lock,
* and the same completion object lock also protects
* the hw_sob reset handler function.
* The hw_queue lock prevent out of sync of hw_sob
* refcount value, changed by signal/wait flows.
*/
spin_lock(&signal_cs_cmpl->lock);
if (completion_done(&cs->signal_fence->completion)) {
spin_unlock(&signal_cs_cmpl->lock);
return -EINVAL;
}
/* Increment kref since all slave queues are now waiting on it */
kref_get(&cs_cmpl->hw_sob->kref);
spin_unlock(&signal_cs_cmpl->lock);
/* Calculate the stream from collective master queue (1st job) */
job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
stream = job->hw_queue_id % 4;
sob_group_offset =
stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream];
list_for_each_entry(job, &cs->job_list, cs_node) {
queue_id = job->hw_queue_id;
if (hdev->kernel_queues[queue_id].collective_mode ==
HL_COLLECTIVE_MASTER)
gaudi_collective_master_init_job(hdev, job, stream,
sob_group_offset);
else
gaudi_collective_slave_init_job(hdev, job, cs_cmpl);
}
cs_cmpl->sob_group = sob_group_offset;
/* Handle sob group kref and wraparound */
kref_get(&cprop->hw_sob_group[sob_group_offset].kref);
cprop->next_sob_group_val[stream]++;
if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) {
/*
* Decrement as we reached the max value.
* The release function won't be called here as we've
* just incremented the refcount.
*/
kref_put(&cprop->hw_sob_group[sob_group_offset].kref,
gaudi_sob_group_reset_error);
cprop->next_sob_group_val[stream] = 1;
/* only two SOBs are currently in use */
cprop->curr_sob_group_idx[stream] =
(cprop->curr_sob_group_idx[stream] + 1) &
(HL_RSVD_SOBS - 1);
gaudi_collective_map_sobs(hdev, stream);
dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n",
cprop->curr_sob_group_idx[stream], stream);
}
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
return 0;
}
static u32 gaudi_get_patched_cb_extra_size(u32 user_cb_size)
{
u32 cacheline_end, additional_commands;
cacheline_end = round_up(user_cb_size, DEVICE_CACHE_LINE_SIZE);
additional_commands = sizeof(struct packet_msg_prot) * 2;
if (user_cb_size + additional_commands > cacheline_end)
return cacheline_end - user_cb_size + additional_commands;
else
return additional_commands;
}
static int gaudi_collective_wait_create_job(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
u32 encaps_signal_offset)
{
struct hw_queue_properties *hw_queue_prop;
struct hl_cs_counters_atomic *cntr;
struct hl_cs_job *job;
struct hl_cb *cb;
u32 cb_size;
bool patched_cb;
cntr = &hdev->aggregated_cs_counters;
if (mode == HL_COLLECTIVE_MASTER) {
/* CB size of collective master queue contains
* 4 msg short packets for monitor 1 configuration
* 1 fence packet
* 4 msg short packets for monitor 2 configuration
* 1 fence packet
* 2 msg prot packets for completion and MSI
*/
cb_size = sizeof(struct packet_msg_short) * 8 +
sizeof(struct packet_fence) * 2 +
sizeof(struct packet_msg_prot) * 2;
patched_cb = true;
} else {
/* CB size of collective slave queues contains
* 4 msg short packets for monitor configuration
* 1 fence packet
* 1 additional msg short packet for sob signal
*/
cb_size = sizeof(struct packet_msg_short) * 5 +
sizeof(struct packet_fence);
patched_cb = false;
}
hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id];
job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
if (!job) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
return -ENOMEM;
}
/* Allocate internal mapped CB for non patched CBs */
cb = hl_cb_kernel_create(hdev, cb_size, !patched_cb);
if (!cb) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
kfree(job);
return -EFAULT;
}
job->id = 0;
job->cs = cs;
job->user_cb = cb;
atomic_inc(&job->user_cb->cs_cnt);
job->user_cb_size = cb_size;
job->hw_queue_id = queue_id;
/* since its guaranteed to have only one chunk in the collective wait
* cs, we can use this chunk to set the encapsulated signal offset
* in the jobs.
*/
if (cs->encaps_signals)
job->encaps_sig_wait_offset = encaps_signal_offset;
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need
* the CB in the CB idr anymore and to decrement its refcount as
* it was incremented inside hl_cb_kernel_create().
*/
if (patched_cb)
job->patched_cb = job->user_cb;
else
job->patched_cb = NULL;
job->job_cb_size = job->user_cb_size;
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
if (hw_queue_prop->type == QUEUE_TYPE_EXT)
cs_get(cs);
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
list_add_tail(&job->cs_node, &cs->job_list);
hl_debugfs_add_job(hdev, job);
return 0;
}
static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
u32 wait_queue_id, u32 collective_engine_id,
u32 encaps_signal_offset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hw_queue_properties *hw_queue_prop;
u32 queue_id, collective_queue, num_jobs;
u32 stream, nic_queue, nic_idx = 0;
bool skip;
int i, rc = 0;
/* Verify wait queue id is configured as master */
hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id];
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
dev_err(hdev->dev,
"Queue %d is not configured as collective master\n",
wait_queue_id);
return -EINVAL;
}
/* Verify engine id is supported */
if (collective_engine_id != GAUDI_ENGINE_ID_DMA_5 &&
collective_engine_id != GAUDI_ENGINE_ID_TPC_7) {
dev_err(hdev->dev,
"Collective wait does not support engine %u\n",
collective_engine_id);
return -EINVAL;
}
stream = wait_queue_id % 4;
if (collective_engine_id == GAUDI_ENGINE_ID_DMA_5)
collective_queue = GAUDI_QUEUE_ID_DMA_5_0 + stream;
else
collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream;
num_jobs = NUMBER_OF_SOBS_IN_GRP + 1;
nic_queue = GAUDI_QUEUE_ID_NIC_0_0 + stream;
/* First job goes to the collective master queue, it will wait for
* the collective slave queues to finish execution.
* The synchronization is done using two monitors:
* First monitor for NICs 0-7, second monitor for NICs 8-9 and the
* reduction engine (DMA5/TPC7).
*
* Rest of the jobs goes to the collective slave queues which will
* all wait for the user to signal sob 'cs_cmpl->sob_val'.
*/
for (i = 0 ; i < num_jobs ; i++) {
if (i == 0) {
queue_id = wait_queue_id;
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
HL_COLLECTIVE_MASTER, queue_id,
wait_queue_id, encaps_signal_offset);
} else {
if (nic_idx < NIC_NUMBER_OF_ENGINES) {
if (gaudi->hw_cap_initialized &
BIT(HW_CAP_NIC_SHIFT + nic_idx))
skip = false;
else
skip = true;
queue_id = nic_queue;
nic_queue += 4;
nic_idx++;
if (skip)
continue;
} else {
queue_id = collective_queue;
}
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
HL_COLLECTIVE_SLAVE, queue_id,
wait_queue_id, encaps_signal_offset);
}
if (rc)
return rc;
}
return rc;
}
static int gaudi_late_init(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
rc = gaudi->cpucp_info_get(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to get cpucp info\n");
return rc;
}
if ((hdev->card_type == cpucp_card_type_pci) &&
(hdev->nic_ports_mask & 0x3)) {
dev_info(hdev->dev,
"PCI card detected, only 8 ports are enabled\n");
hdev->nic_ports_mask &= ~0x3;
/* Stop and disable unused NIC QMANs */
WREG32(mmNIC0_QM0_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
WREG32(mmNIC0_QM1_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
WREG32(mmNIC0_QM0_GLBL_CFG0, 0);
WREG32(mmNIC0_QM1_GLBL_CFG0, 0);
gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1);
}
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
if (rc) {
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc;
}
/* Scrub both SRAM and DRAM */
rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc)
goto disable_pci_access;
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
goto disable_pci_access;
}
rc = gaudi_mmu_clear_pgt_range(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
goto disable_pci_access;
}
rc = gaudi_init_tpc_mem(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize TPC memories\n");
goto disable_pci_access;
}
rc = gaudi_collective_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to init collective\n");
goto disable_pci_access;
}
/* We only support a single ASID for the user, so for the sake of optimization, just
* initialize the ASID one time during device initialization with the fixed value of 1
*/
gaudi_mmu_prepare(hdev, 1);
hl_fw_set_pll_profile(hdev);
return 0;
disable_pci_access:
hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
return rc;
}
static void gaudi_late_fini(struct hl_device *hdev)
{
hl_hwmon_release_resources(hdev);
}
static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
{
dma_addr_t dma_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
void *virt_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {};
int i, j, rc = 0;
/*
* The device CPU works with 40-bits addresses, while bit 39 must be set
* to '1' when accessing the host.
* Bits 49:39 of the full host address are saved for a later
* configuration of the HW to perform extension to 50 bits.
* Because there is a single HW register that holds the extension bits,
* these bits must be identical in all allocated range.
*/
for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
virt_addr_arr[i] = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
&dma_addr_arr[i],
GFP_KERNEL | __GFP_ZERO);
if (!virt_addr_arr[i]) {
rc = -ENOMEM;
goto free_dma_mem_arr;
}
end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
if (GAUDI_CPU_PCI_MSB_ADDR(dma_addr_arr[i]) ==
GAUDI_CPU_PCI_MSB_ADDR(end_addr))
break;
}
if (i == GAUDI_ALLOC_CPU_MEM_RETRY_CNT) {
dev_err(hdev->dev,
"MSB of CPU accessible DMA memory are not identical in all range\n");
rc = -EFAULT;
goto free_dma_mem_arr;
}
hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
hdev->cpu_accessible_dma_address = dma_addr_arr[i];
hdev->cpu_pci_msb_addr =
GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
if (!hdev->asic_prop.fw_security_enabled)
GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
free_dma_mem_arr:
for (j = 0 ; j < i ; j++)
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, virt_addr_arr[j],
dma_addr_arr[j]);
return rc;
}
static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
u32 i;
for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
q = &gaudi->internal_qmans[i];
if (!q->pq_kernel_addr)
continue;
hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr);
}
}
static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
int rc, i;
for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
if (gaudi_queue_type[i] != QUEUE_TYPE_INT)
continue;
q = &gaudi->internal_qmans[i];
switch (i) {
case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_7_3:
q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
break;
case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3:
q->pq_size = MME_QMAN_SIZE_IN_BYTES;
break;
case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3:
q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
break;
case GAUDI_QUEUE_ID_NIC_0_0 ... GAUDI_QUEUE_ID_NIC_9_3:
q->pq_size = NIC_QMAN_SIZE_IN_BYTES;
break;
default:
dev_err(hdev->dev, "Bad internal queue index %d", i);
rc = -EINVAL;
goto free_internal_qmans_pq_mem;
}
q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (!q->pq_kernel_addr) {
rc = -ENOMEM;
goto free_internal_qmans_pq_mem;
}
}
return 0;
free_internal_qmans_pq_mem:
gaudi_free_internal_qmans_pq_mem(hdev);
return rc;
}
static void gaudi_set_pci_memory_regions(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_mem_region *region;
/* CFG */
region = &hdev->pci_mem_region[PCI_REGION_CFG];
region->region_base = CFG_BASE;
region->region_size = CFG_SIZE;
region->offset_in_bar = CFG_BASE - SPI_FLASH_BASE_ADDR;
region->bar_size = CFG_BAR_SIZE;
region->bar_id = CFG_BAR_ID;
region->used = 1;
/* SRAM */
region = &hdev->pci_mem_region[PCI_REGION_SRAM];
region->region_base = SRAM_BASE_ADDR;
region->region_size = SRAM_SIZE;
region->offset_in_bar = 0;
region->bar_size = SRAM_BAR_SIZE;
region->bar_id = SRAM_BAR_ID;
region->used = 1;
/* DRAM */
region = &hdev->pci_mem_region[PCI_REGION_DRAM];
region->region_base = DRAM_PHYS_BASE;
region->region_size = hdev->asic_prop.dram_size;
region->offset_in_bar = 0;
region->bar_size = prop->dram_pci_bar_size;
region->bar_id = HBM_BAR_ID;
region->used = 1;
/* SP SRAM */
region = &hdev->pci_mem_region[PCI_REGION_SP_SRAM];
region->region_base = PSOC_SCRATCHPAD_ADDR;
region->region_size = PSOC_SCRATCHPAD_SIZE;
region->offset_in_bar = PSOC_SCRATCHPAD_ADDR - SPI_FLASH_BASE_ADDR;
region->bar_size = CFG_BAR_SIZE;
region->bar_id = CFG_BAR_ID;
region->used = 1;
}
static int gaudi_sw_init(struct hl_device *hdev)
{
struct gaudi_device *gaudi;
u32 i, event_id = 0;
int rc;
/* Allocate device structure */
gaudi = kzalloc(sizeof(*gaudi), GFP_KERNEL);
if (!gaudi)
return -ENOMEM;
for (i = 0 ; i < ARRAY_SIZE(gaudi_irq_map_table) ; i++) {
if (gaudi_irq_map_table[i].valid) {
if (event_id == GAUDI_EVENT_SIZE) {
dev_err(hdev->dev,
"Event array exceeds the limit of %u events\n",
GAUDI_EVENT_SIZE);
rc = -EINVAL;
goto free_gaudi_device;
}
gaudi->events[event_id++] =
gaudi_irq_map_table[i].fc_id;
}
}
gaudi->cpucp_info_get = gaudi_cpucp_info_get;
hdev->asic_specific = gaudi;
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
if (!hdev->dma_pool) {
dev_err(hdev->dev, "failed to create DMA pool\n");
rc = -ENOMEM;
goto free_gaudi_device;
}
rc = gaudi_alloc_cpu_accessible_dma_mem(hdev);
if (rc)
goto free_dma_pool;
hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
if (!hdev->cpu_accessible_dma_pool) {
dev_err(hdev->dev,
"Failed to create CPU accessible DMA pool\n");
rc = -ENOMEM;
goto free_cpu_dma_mem;
}
rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
(uintptr_t) hdev->cpu_accessible_dma_mem,
HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to CPU accessible DMA pool\n");
rc = -EFAULT;
goto free_cpu_accessible_dma_pool;
}
rc = gaudi_alloc_internal_qmans_pq_mem(hdev);
if (rc)
goto free_cpu_accessible_dma_pool;
spin_lock_init(&gaudi->hw_queues_lock);
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
hdev->supports_staged_submission = true;
hdev->supports_wait_for_multi_cs = true;
hdev->asic_funcs->set_pci_memory_regions(hdev);
hdev->stream_master_qid_arr =
hdev->asic_funcs->get_stream_master_qid_arr();
hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE;
return 0;
free_cpu_accessible_dma_pool:
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
free_cpu_dma_mem:
if (!hdev->asic_prop.fw_security_enabled)
GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
hdev->cpu_pci_msb_addr);
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
free_dma_pool:
dma_pool_destroy(hdev->dma_pool);
free_gaudi_device:
kfree(gaudi);
return rc;
}
static int gaudi_sw_fini(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
gaudi_free_internal_qmans_pq_mem(hdev);
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
if (!hdev->asic_prop.fw_security_enabled)
GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
hdev->cpu_pci_msb_addr);
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
dma_pool_destroy(hdev->dma_pool);
kfree(gaudi);
return 0;
}
static irqreturn_t gaudi_irq_handler_single(int irq, void *arg)
{
struct hl_device *hdev = arg;
int i;
if (hdev->disabled)
return IRQ_HANDLED;
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_irq_handler_cq(irq, &hdev->completion_queue[i]);
hl_irq_handler_eq(irq, &hdev->event_queue);
return IRQ_HANDLED;
}
/*
* For backward compatibility, new MSI interrupts should be set after the
* existing CPU and NIC interrupts.
*/
static int gaudi_pci_irq_vector(struct hl_device *hdev, unsigned int nr,
bool cpu_eq)
{
int msi_vec;
if ((nr != GAUDI_EVENT_QUEUE_MSI_IDX) && (cpu_eq))
dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n",
GAUDI_EVENT_QUEUE_MSI_IDX);
msi_vec = ((nr < GAUDI_EVENT_QUEUE_MSI_IDX) || (cpu_eq)) ? nr :
(nr + NIC_NUMBER_OF_ENGINES + 1);
return pci_irq_vector(hdev->pdev, msi_vec);
}
static int gaudi_enable_msi_single(struct hl_device *hdev)
{
int rc, irq;
dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n");
irq = gaudi_pci_irq_vector(hdev, 0, false);
rc = request_irq(irq, gaudi_irq_handler_single, 0,
"gaudi single msi", hdev);
if (rc)
dev_err(hdev->dev,
"Failed to request single MSI IRQ\n");
return rc;
}
static int gaudi_enable_msi(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
if (gaudi->hw_cap_initialized & HW_CAP_MSI)
return 0;
rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI);
if (rc < 0) {
dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
return rc;
}
rc = gaudi_enable_msi_single(hdev);
if (rc)
goto free_pci_irq_vectors;
gaudi->hw_cap_initialized |= HW_CAP_MSI;
return 0;
free_pci_irq_vectors:
pci_free_irq_vectors(hdev->pdev);
return rc;
}
static void gaudi_sync_irqs(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
return;
/* Wait for all pending IRQs to be finished */
synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
}
static void gaudi_disable_msi(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
return;
gaudi_sync_irqs(hdev);
free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
pci_free_irq_vectors(hdev->pdev);
gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
}
static void gaudi_init_scrambler_sram(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (hdev->asic_prop.fw_security_enabled)
return;
if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_SRAM_SCR_EN)
return;
if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
return;
WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN,
1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER;
}
static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (hdev->asic_prop.fw_security_enabled)
return;
if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_DRAM_SCR_EN)
return;
if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
return;
WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_2_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_3_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_4_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_5_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_6_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_7_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_0_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_1_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_2_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_3_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_4_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_5_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_6_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_7_SCRAM_HBM_EN,
1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN,
1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER;
}
static void gaudi_init_e2e(struct hl_device *hdev)
{
if (hdev->asic_prop.fw_security_enabled)
return;
if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_E2E_CRED_EN)
return;
WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3);
WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49);
WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 101);
WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 297 >> 3);
WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 908 >> 3);
WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 19);
WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 19);
WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 318 >> 3);
WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 956 >> 3);
WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 79);
WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 163);
WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 318 >> 3);
WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 956 >> 3);
WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 79);
WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 79);
WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_EN,
1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_EN,
1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN,
1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN,
1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
}
static void gaudi_init_hbm_cred(struct hl_device *hdev)
{
u32 hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
if (hdev->asic_prop.fw_security_enabled)
return;
if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_HBM_CRED_EN)
return;
hbm0_wr = 0x33333333;
hbm0_rd = 0x77777777;
hbm1_wr = 0x55555555;
hbm1_rd = 0xDDDDDDDD;
WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
WREG32(mmDMA_IF_E_N_HBM1_WR_CRED_CNT, hbm1_wr);
WREG32(mmDMA_IF_E_N_HBM0_RD_CRED_CNT, hbm0_rd);
WREG32(mmDMA_IF_E_N_HBM1_RD_CRED_CNT, hbm1_rd);
WREG32(mmDMA_IF_E_S_HBM0_WR_CRED_CNT, hbm0_wr);
WREG32(mmDMA_IF_E_S_HBM1_WR_CRED_CNT, hbm1_wr);
WREG32(mmDMA_IF_E_S_HBM0_RD_CRED_CNT, hbm0_rd);
WREG32(mmDMA_IF_E_S_HBM1_RD_CRED_CNT, hbm1_rd);
WREG32(mmDMA_IF_W_N_HBM0_WR_CRED_CNT, hbm0_wr);
WREG32(mmDMA_IF_W_N_HBM1_WR_CRED_CNT, hbm1_wr);
WREG32(mmDMA_IF_W_N_HBM0_RD_CRED_CNT, hbm0_rd);
WREG32(mmDMA_IF_W_N_HBM1_RD_CRED_CNT, hbm1_rd);
WREG32(mmDMA_IF_W_S_HBM0_WR_CRED_CNT, hbm0_wr);
WREG32(mmDMA_IF_W_S_HBM1_WR_CRED_CNT, hbm1_wr);
WREG32(mmDMA_IF_W_S_HBM0_RD_CRED_CNT, hbm0_rd);
WREG32(mmDMA_IF_W_S_HBM1_RD_CRED_CNT, hbm1_rd);
WREG32(mmDMA_IF_E_N_HBM_CRED_EN_0,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_E_S_HBM_CRED_EN_0,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_W_N_HBM_CRED_EN_0,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_W_S_HBM_CRED_EN_0,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_E_N_HBM_CRED_EN_1,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_E_S_HBM_CRED_EN_1,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_W_N_HBM_CRED_EN_1,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
WREG32(mmDMA_IF_W_S_HBM_CRED_EN_1,
(1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
}
static void gaudi_init_golden_registers(struct hl_device *hdev)
{
u32 tpc_offset;
int tpc_id, i;
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
/* Mask all arithmetic interrupts from TPC */
WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFE);
/* Set 16 cache lines */
WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
ICACHE_FETCH_LINE_NUM, 2);
}
/* Make sure 1st 128 bytes in SRAM are 0 for Tensor DMA */
for (i = 0 ; i < 128 ; i += 8)
writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i);
WREG32(mmMME0_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
}
static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
int qman_id, dma_addr_t qman_pq_addr)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
u32 q_off, dma_qm_offset;
u32 dma_qm_err_cfg, irq_handler_offset;
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
mtr_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
mtr_base_ws_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_ws_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
q_off = dma_qm_offset + qman_id * 4;
WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_pq_addr));
WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_pq_addr));
WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HL_QUEUE_LENGTH));
WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, QMAN_LDMA_SIZE_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_SRC_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_DST_OFFSET);
WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
/* The following configuration is needed only once per QMAN */
if (qman_id == 0) {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
/* Configure RAZWI IRQ */
dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
if (hdev->stop_on_err)
dma_qm_err_cfg |=
PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
upper_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
dma_id);
WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
QM_ARB_ERR_MSG_EN_MASK);
/* Set timeout to maximum */
WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
QMAN_EXTERNAL_MAKE_TRUSTED);
WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
}
}
static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT;
u32 dma_offset = dma_id * DMA_CORE_OFFSET;
u32 irq_handler_offset;
/* Set to maximum possible according to physical size */
WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0);
/* WA for H/W bug H3-2116 */
WREG32(mmDMA0_CORE_LBW_MAX_OUTSTAND + dma_offset, 15);
/* STOP_ON bit implies no completion to operation in case of RAZWI */
if (hdev->stop_on_err)
dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg);
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl);
WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset,
upper_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset,
gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id);
WREG32(mmDMA0_CORE_PROT + dma_offset,
1 << DMA0_CORE_PROT_ERR_VAL_SHIFT);
/* If the channel is secured, it should be in MMU bypass mode */
WREG32(mmDMA0_CORE_SECURE_PROPS + dma_offset,
1 << DMA0_CORE_SECURE_PROPS_MMBP_SHIFT);
WREG32(mmDMA0_CORE_CFG_0 + dma_offset, 1 << DMA0_CORE_CFG_0_EN_SHIFT);
}
static void gaudi_enable_qman(struct hl_device *hdev, int dma_id,
u32 enable_mask)
{
u32 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_GLBL_CFG0 + dma_qm_offset, enable_mask);
}
static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hl_hw_queue *q;
int i, j, dma_id, cpu_skip, nic_skip, cq_id = 0, q_idx, msi_vec = 0;
if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)
return;
for (i = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
dma_id = gaudi_dma_assignment[i];
/*
* For queues after the CPU Q need to add 1 to get the correct
* queue. In addition, need to add the CPU EQ and NIC IRQs in
* order to get the correct MSI register.
*/
if (dma_id > 1) {
cpu_skip = 1;
nic_skip = NIC_NUMBER_OF_ENGINES;
} else {
cpu_skip = 0;
nic_skip = 0;
}
for (j = 0 ; j < QMAN_STREAMS ; j++) {
q_idx = 4 * dma_id + j + cpu_skip;
q = &hdev->kernel_queues[q_idx];
q->cq_id = cq_id++;
q->msi_vec = nic_skip + cpu_skip + msi_vec++;
gaudi_init_pci_dma_qman(hdev, dma_id, j,
q->bus_address);
}
gaudi_init_dma_core(hdev, dma_id);
gaudi_enable_qman(hdev, dma_id, PCI_DMA_QMAN_ENABLE);
}
gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA;
}
static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
int qman_id, u64 qman_base_addr)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
u32 dma_qm_err_cfg, irq_handler_offset;
u32 q_off, dma_qm_offset;
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
mtr_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
mtr_base_ws_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_ws_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
q_off = dma_qm_offset + qman_id * 4;
if (qman_id < 4) {
WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off,
lower_32_bits(qman_base_addr));
WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off,
upper_32_bits(qman_base_addr));
WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HBM_DMA_QMAN_LENGTH));
WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_CPDMA_SIZE_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_SRC_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_DST_OFFSET);
} else {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_SRC_OFFSET);
WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_DST_OFFSET);
/* Configure RAZWI IRQ */
dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
if (hdev->stop_on_err)
dma_qm_err_cfg |=
HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
upper_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
dma_id);
WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
QM_ARB_ERR_MSG_EN_MASK);
/* Set timeout to maximum */
WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
QMAN_INTERNAL_MAKE_TRUSTED);
}
WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
/* Configure DMA5 CP_MSG_BASE 2/3 for sync stream collective */
if (gaudi_dma_assignment[dma_id] == GAUDI_ENGINE_ID_DMA_5) {
WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off,
mtr_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off,
mtr_base_ws_hi);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off,
so_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off,
so_base_ws_hi);
}
}
static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
u64 qman_base_addr;
int i, j, dma_id, internal_q_index;
if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)
return;
for (i = 0 ; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1 + i];
for (j = 0 ; j < QMAN_STREAMS ; j++) {
/*
* Add the CPU queue in order to get the correct queue
* number as all internal queue are placed after it
*/
internal_q_index = dma_id * QMAN_STREAMS + j + 1;
q = &gaudi->internal_qmans[internal_q_index];
qman_base_addr = (u64) q->pq_dma_addr;
gaudi_init_hbm_dma_qman(hdev, dma_id, j,
qman_base_addr);
}
/* Initializing lower CP for HBM DMA QMAN */
gaudi_init_hbm_dma_qman(hdev, dma_id, 4, 0);
gaudi_init_dma_core(hdev, dma_id);
gaudi_enable_qman(hdev, dma_id, HBM_DMA_QMAN_ENABLE);
}
gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA;
}
static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
int qman_id, u64 qman_base_addr)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 irq_handler_offset;
u32 q_off, mme_id;
u32 mme_qm_err_cfg;
mtr_base_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
q_off = mme_offset + qman_id * 4;
if (qman_id < 4) {
WREG32(mmMME0_QM_PQ_BASE_LO_0 + q_off,
lower_32_bits(qman_base_addr));
WREG32(mmMME0_QM_PQ_BASE_HI_0 + q_off,
upper_32_bits(qman_base_addr));
WREG32(mmMME0_QM_PQ_SIZE_0 + q_off, ilog2(MME_QMAN_LENGTH));
WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0);
WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_CPDMA_SIZE_OFFSET);
WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_SRC_OFFSET);
WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_DST_OFFSET);
} else {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl);
WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_SRC_OFFSET);
WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_DST_OFFSET);
/* Configure RAZWI IRQ */
mme_id = mme_offset /
(mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2;
mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
if (hdev->stop_on_err)
mme_qm_err_cfg |=
MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg);
WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset,
upper_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset,
gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id +
mme_id);
WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
QM_ARB_ERR_MSG_EN_MASK);
/* Set timeout to maximum */
WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
QMAN_INTERNAL_MAKE_TRUSTED);
}
WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
}
static void gaudi_init_mme_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
u64 qman_base_addr;
u32 mme_offset;
int i, internal_q_index;
if (gaudi->hw_cap_initialized & HW_CAP_MME)
return;
/*
* map GAUDI_QUEUE_ID_MME_0_X to the N_W_MME (mmMME2_QM_BASE)
* and GAUDI_QUEUE_ID_MME_1_X to the S_W_MME (mmMME0_QM_BASE)
*/
mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
for (i = 0 ; i < MME_NUMBER_OF_QMANS ; i++) {
internal_q_index = GAUDI_QUEUE_ID_MME_0_0 + i;
q = &gaudi->internal_qmans[internal_q_index];
qman_base_addr = (u64) q->pq_dma_addr;
gaudi_init_mme_qman(hdev, mme_offset, (i & 0x3),
qman_base_addr);
if (i == 3)
mme_offset = 0;
}
/* Initializing lower CP for MME QMANs */
mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
gaudi_init_mme_qman(hdev, mme_offset, 4, 0);
gaudi_init_mme_qman(hdev, 0, 4, 0);
WREG32(mmMME2_QM_GLBL_CFG0, QMAN_MME_ENABLE);
WREG32(mmMME0_QM_GLBL_CFG0, QMAN_MME_ENABLE);
gaudi->hw_cap_initialized |= HW_CAP_MME;
}
static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
int qman_id, u64 qman_base_addr)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
u32 tpc_qm_err_cfg, irq_handler_offset;
u32 q_off, tpc_id;
mtr_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_en_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
mtr_base_ws_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_ws_lo = lower_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
q_off = tpc_offset + qman_id * 4;
tpc_id = tpc_offset /
(mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
if (qman_id < 4) {
WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off,
lower_32_bits(qman_base_addr));
WREG32(mmTPC0_QM_PQ_BASE_HI_0 + q_off,
upper_32_bits(qman_base_addr));
WREG32(mmTPC0_QM_PQ_SIZE_0 + q_off, ilog2(TPC_QMAN_LENGTH));
WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0);
WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_CPDMA_SIZE_OFFSET);
WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_SRC_OFFSET);
WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_CPDMA_DST_OFFSET);
} else {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl);
WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_SRC_OFFSET);
WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_DST_OFFSET);
/* Configure RAZWI IRQ */
tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
if (hdev->stop_on_err)
tpc_qm_err_cfg |=
TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg);
WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset,
upper_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset,
gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id +
tpc_id);
WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
QM_ARB_ERR_MSG_EN_MASK);
/* Set timeout to maximum */
WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
QMAN_INTERNAL_MAKE_TRUSTED);
}
WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
/* Configure TPC7 CP_MSG_BASE 2/3 for sync stream collective */
if (tpc_id == 6) {
WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off,
mtr_base_ws_lo);
WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off,
mtr_base_ws_hi);
WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off,
so_base_ws_lo);
WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off,
so_base_ws_hi);
}
}
static void gaudi_init_tpc_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
u64 qman_base_addr;
u32 so_base_hi, tpc_offset = 0;
u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH -
mmTPC0_CFG_SM_BASE_ADDRESS_HIGH;
int i, tpc_id, internal_q_index;
if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)
return;
so_base_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
for (i = 0 ; i < QMAN_STREAMS ; i++) {
internal_q_index = GAUDI_QUEUE_ID_TPC_0_0 +
tpc_id * QMAN_STREAMS + i;
q = &gaudi->internal_qmans[internal_q_index];
qman_base_addr = (u64) q->pq_dma_addr;
gaudi_init_tpc_qman(hdev, tpc_offset, i,
qman_base_addr);
if (i == 3) {
/* Initializing lower CP for TPC QMAN */
gaudi_init_tpc_qman(hdev, tpc_offset, 4, 0);
/* Enable the QMAN and TPC channel */
WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset,
QMAN_TPC_ENABLE);
}
}
WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + tpc_id * tpc_delta,
so_base_hi);
tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
gaudi->hw_cap_initialized |=
FIELD_PREP(HW_CAP_TPC_MASK, 1 << tpc_id);
}
}
static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
int qman_id, u64 qman_base_addr, int nic_id)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
u32 nic_qm_err_cfg, irq_handler_offset;
u32 q_off;
mtr_base_en_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_en_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_en_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
mtr_base_ws_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_ws_lo = lower_32_bits((CFG_BASE & U32_MAX) +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_ws_hi = upper_32_bits(CFG_BASE +
mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
q_off = nic_offset + qman_id * 4;
WREG32(mmNIC0_QM0_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr));
WREG32(mmNIC0_QM0_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_base_addr));
WREG32(mmNIC0_QM0_PQ_SIZE_0 + q_off, ilog2(NIC_QMAN_LENGTH));
WREG32(mmNIC0_QM0_PQ_PI_0 + q_off, 0);
WREG32(mmNIC0_QM0_PQ_CI_0 + q_off, 0);
WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off,
QMAN_LDMA_SIZE_OFFSET);
WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_SRC_OFFSET);
WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
QMAN_LDMA_DST_OFFSET);
WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
/* Configure NIC CP_MSG_BASE 2/3 for sync stream collective */
WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
if (qman_id == 0) {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl);
/* Configure RAZWI IRQ */
nic_qm_err_cfg = NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
if (hdev->stop_on_err)
nic_qm_err_cfg |=
NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
WREG32(mmNIC0_QM0_GLBL_ERR_CFG + nic_offset, nic_qm_err_cfg);
WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_LO + nic_offset,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_HI + nic_offset,
upper_32_bits(CFG_BASE + irq_handler_offset));
WREG32(mmNIC0_QM0_GLBL_ERR_WDATA + nic_offset,
gaudi_irq_map_table[GAUDI_EVENT_NIC0_QM0].cpu_id +
nic_id);
WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset,
QM_ARB_ERR_MSG_EN_MASK);
/* Set timeout to maximum */
WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, GAUDI_ARB_WDT_TIMEOUT);
WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0);
WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset,
QMAN_INTERNAL_MAKE_TRUSTED);
}
}
static void gaudi_init_nic_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
u64 qman_base_addr;
u32 nic_offset = 0;
u32 nic_delta_between_qmans =
mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
u32 nic_delta_between_nics =
mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
int i, nic_id, internal_q_index;
if (!hdev->nic_ports_mask)
return;
if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK)
return;
dev_dbg(hdev->dev, "Initializing NIC QMANs\n");
for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) {
if (!(hdev->nic_ports_mask & (1 << nic_id))) {
nic_offset += nic_delta_between_qmans;
if (nic_id & 1) {
nic_offset -= (nic_delta_between_qmans * 2);
nic_offset += nic_delta_between_nics;
}
continue;
}
for (i = 0 ; i < QMAN_STREAMS ; i++) {
internal_q_index = GAUDI_QUEUE_ID_NIC_0_0 +
nic_id * QMAN_STREAMS + i;
q = &gaudi->internal_qmans[internal_q_index];
qman_base_addr = (u64) q->pq_dma_addr;
gaudi_init_nic_qman(hdev, nic_offset, (i & 0x3),
qman_base_addr, nic_id);
}
/* Enable the QMAN */
WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, NIC_QMAN_ENABLE);
nic_offset += nic_delta_between_qmans;
if (nic_id & 1) {
nic_offset -= (nic_delta_between_qmans * 2);
nic_offset += nic_delta_between_nics;
}
gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id);
}
}
static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
return;
WREG32(mmDMA0_QM_GLBL_CFG0, 0);
WREG32(mmDMA1_QM_GLBL_CFG0, 0);
WREG32(mmDMA5_QM_GLBL_CFG0, 0);
}
static void gaudi_disable_hbm_dma_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
return;
WREG32(mmDMA2_QM_GLBL_CFG0, 0);
WREG32(mmDMA3_QM_GLBL_CFG0, 0);
WREG32(mmDMA4_QM_GLBL_CFG0, 0);
WREG32(mmDMA6_QM_GLBL_CFG0, 0);
WREG32(mmDMA7_QM_GLBL_CFG0, 0);
}
static void gaudi_disable_mme_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
return;
WREG32(mmMME2_QM_GLBL_CFG0, 0);
WREG32(mmMME0_QM_GLBL_CFG0, 0);
}
static void gaudi_disable_tpc_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 tpc_offset = 0;
int tpc_id;
if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
return;
for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, 0);
tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
}
}
static void gaudi_disable_nic_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 nic_mask, nic_offset = 0;
u32 nic_delta_between_qmans =
mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
u32 nic_delta_between_nics =
mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
int nic_id;
for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) {
nic_mask = 1 << (HW_CAP_NIC_SHIFT + nic_id);
if (gaudi->hw_cap_initialized & nic_mask)
WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, 0);
nic_offset += nic_delta_between_qmans;
if (nic_id & 1) {
nic_offset -= (nic_delta_between_qmans * 2);
nic_offset += nic_delta_between_nics;
}
}
}
static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
return;
/* Stop upper CPs of QMANs 0.0 to 1.3 and 5.0 to 5.3 */
WREG32(mmDMA0_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmDMA1_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmDMA5_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
}
static void gaudi_stop_hbm_dma_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
return;
/* Stop CPs of HBM DMA QMANs */
WREG32(mmDMA2_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmDMA3_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmDMA4_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmDMA6_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmDMA7_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
}
static void gaudi_stop_mme_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
return;
/* Stop CPs of MME QMANs */
WREG32(mmMME2_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmMME0_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
}
static void gaudi_stop_tpc_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
return;
WREG32(mmTPC0_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC1_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC2_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC3_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC4_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC5_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC6_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
}
static void gaudi_stop_nic_qmans(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
/* Stop upper CPs of QMANs */
if (gaudi->hw_cap_initialized & HW_CAP_NIC0)
WREG32(mmNIC0_QM0_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC1)
WREG32(mmNIC0_QM1_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC2)
WREG32(mmNIC1_QM0_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC3)
WREG32(mmNIC1_QM1_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC4)
WREG32(mmNIC2_QM0_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC5)
WREG32(mmNIC2_QM1_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC6)
WREG32(mmNIC3_QM0_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC7)
WREG32(mmNIC3_QM1_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC8)
WREG32(mmNIC4_QM0_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
if (gaudi->hw_cap_initialized & HW_CAP_NIC9)
WREG32(mmNIC4_QM1_GLBL_CFG1,
NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
}
static void gaudi_pci_dma_stall(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
return;
WREG32(mmDMA0_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
WREG32(mmDMA1_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
WREG32(mmDMA5_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
}
static void gaudi_hbm_dma_stall(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
return;
WREG32(mmDMA2_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
WREG32(mmDMA3_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
WREG32(mmDMA4_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
WREG32(mmDMA6_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
WREG32(mmDMA7_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
}
static void gaudi_mme_stall(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
return;
/* WA for H3-1800 bug: do ACC and SBAB writes twice */
WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
}
static void gaudi_tpc_stall(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
return;
WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
}
static void gaudi_disable_clock_gating(struct hl_device *hdev)
{
u32 qman_offset;
int i;
if (hdev->asic_prop.fw_security_enabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 0);
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, 0);
qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG);
}
WREG32(mmMME0_QM_CGM_CFG, 0);
WREG32(mmMME0_QM_CGM_CFG1, 0);
WREG32(mmMME2_QM_CGM_CFG, 0);
WREG32(mmMME2_QM_CGM_CFG1, 0);
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
WREG32(mmTPC0_QM_CGM_CFG + qman_offset, 0);
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 0);
qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG);
}
}
static void gaudi_enable_timestamp(struct hl_device *hdev)
{
/* Disable the timestamp counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
/* Zero the lower/upper parts of the 64-bit counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
/* Enable the counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
}
static void gaudi_disable_timestamp(struct hl_device *hdev)
{
/* Disable the timestamp counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
if (hdev->pldm)
wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
if (fw_reset)
goto skip_engines;
gaudi_stop_nic_qmans(hdev);
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
gaudi_stop_hbm_dma_qmans(hdev);
gaudi_stop_pci_dma_qmans(hdev);
msleep(wait_timeout_ms);
gaudi_pci_dma_stall(hdev);
gaudi_hbm_dma_stall(hdev);
gaudi_tpc_stall(hdev);
gaudi_mme_stall(hdev);
msleep(wait_timeout_ms);
gaudi_disable_nic_qmans(hdev);
gaudi_disable_mme_qmans(hdev);
gaudi_disable_tpc_qmans(hdev);
gaudi_disable_hbm_dma_qmans(hdev);
gaudi_disable_pci_dma_qmans(hdev);
gaudi_disable_timestamp(hdev);
skip_engines:
gaudi_disable_msi(hdev);
}
static int gaudi_mmu_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
u64 hop0_addr;
int rc, i;
if (gaudi->hw_cap_initialized & HW_CAP_MMU)
return 0;
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_addr = prop->mmu_pgt_addr +
(i * prop->mmu_hop_table_size);
rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
if (rc) {
dev_err(hdev->dev,
"failed to set hop0 addr for asid %d\n", i);
return rc;
}
}
/* init MMU cache manage page */
WREG32(mmSTLB_CACHE_INV_BASE_39_8, prop->mmu_cache_mng_addr >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, prop->mmu_cache_mng_addr >> 40);
/* mem cache invalidation */
WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
rc = hl_mmu_invalidate_cache(hdev, true, 0);
if (rc)
return rc;
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
WREG32(mmSTLB_HOP_CONFIGURATION, 0x30440);
/*
* The H/W expects the first PI after init to be 1. After wraparound
* we'll write 0.
*/
gaudi->mmu_cache_inv_pi = 1;
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
}
static int gaudi_load_firmware_to_device(struct hl_device *hdev)
{
void __iomem *dst;
dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst, 0, 0);
}
static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
{
void __iomem *dst;
dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0);
}
static void gaudi_init_dynamic_firmware_loader(struct hl_device *hdev)
{
struct dynamic_fw_load_mgr *dynamic_loader;
struct cpu_dyn_regs *dyn_regs;
dynamic_loader = &hdev->fw_loader.dynamic_loader;
/*
* here we update initial values for few specific dynamic regs (as
* before reading the first descriptor from FW those value has to be
* hard-coded) in later stages of the protocol those values will be
* updated automatically by reading the FW descriptor so data there
* will always be up-to-date
*/
dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
dyn_regs->kmd_msg_to_cpu =
cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
dyn_regs->cpu_cmd_status_to_host =
cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
dynamic_loader->wait_for_bl_timeout = GAUDI_WAIT_FOR_BL_TIMEOUT_USEC;
}
static void gaudi_init_static_firmware_loader(struct hl_device *hdev)
{
struct static_fw_load_mgr *static_loader;
static_loader = &hdev->fw_loader.static_loader;
static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
static_loader->cpu_reset_wait_msec = hdev->pldm ?
GAUDI_PLDM_RESET_WAIT_MSEC :
GAUDI_CPU_RESET_WAIT_MSEC;
}
static void gaudi_init_firmware_preload_params(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
pre_fw_load->wait_for_preboot_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC;
}
static void gaudi_init_firmware_loader(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE;
fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC;
fw_loader->boot_fit_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC;
fw_loader->skip_bmc = !hdev->bmc_enable;
fw_loader->sram_bar_id = SRAM_BAR_ID;
fw_loader->dram_bar_id = HBM_BAR_ID;
if (prop->dynamic_fw_load)
gaudi_init_dynamic_firmware_loader(hdev);
else
gaudi_init_static_firmware_loader(hdev);
}
static int gaudi_init_cpu(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
if (gaudi->hw_cap_initialized & HW_CAP_CPU)
return 0;
/*
* The device CPU works with 40 bits addresses.
* This register sets the extension to 50 bits.
*/
if (!hdev->asic_prop.fw_security_enabled)
WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
rc = hl_fw_init_cpu(hdev);
if (rc)
return rc;
gaudi->hw_cap_initialized |= HW_CAP_CPU;
return 0;
}
static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
u32 status, irq_handler_offset;
struct hl_eq *eq;
struct hl_hw_queue *cpu_pq =
&hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
int err;
if (!hdev->cpu_queues_enable)
return 0;
if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
return 0;
eq = &hdev->event_queue;
WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW,
lower_32_bits(hdev->cpu_accessible_dma_address));
WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH,
upper_32_bits(hdev->cpu_accessible_dma_address));
WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
/* Used for EQ CI */
WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
WREG32(mmCPU_IF_PF_PQ_PI, 0);
WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
irq_handler_offset = prop->gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_host_pi_upd_irq);
WREG32(irq_handler_offset,
gaudi_irq_map_table[GAUDI_EVENT_PI_UPDATE].cpu_id);
err = hl_poll_timeout(
hdev,
mmCPU_IF_QUEUE_INIT,
status,
(status == PQ_INIT_STATUS_READY_FOR_HOST),
1000,
cpu_timeout);
if (err) {
dev_err(hdev->dev,
"Failed to communicate with Device CPU (CPU-CP timeout)\n");
return -EIO;
}
/* update FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid)
prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
if (prop->fw_cpu_boot_dev_sts1_valid)
prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
RREG32(mmHW_STATE);
if (!hdev->asic_prop.fw_security_enabled) {
/* Set the access through PCI bars (Linux driver only) as
* secured
*/
WREG32(mmPCIE_WRAP_LBW_PROT_OVR,
(PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
/* Perform read to flush the waiting writes to ensure
* configuration was set in the device
*/
RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
}
/*
* Let's mark in the H/W that we have reached this point. We check
* this value in the reset_before_init function to understand whether
* we need to reset the chip before doing H/W init. This register is
* cleared by the H/W upon H/W reset
*/
WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
}
static int gaudi_hw_init(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
gaudi_pre_hw_init(hdev);
/* If iATU is done by FW, the HBM bar ALWAYS points to DRAM_PHYS_BASE.
* So we set it here and if anyone tries to move it later to
* a different address, there will be an error
*/
if (hdev->asic_prop.iatu_done_by_fw)
gaudi->hbm_bar_cur_addr = DRAM_PHYS_BASE;
/*
* Before pushing u-boot/linux to device, need to set the hbm bar to
* base address of dram
*/
if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
dev_err(hdev->dev,
"failed to map HBM bar to DRAM base address\n");
return -EIO;
}
rc = gaudi_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
return rc;
}
/* In case the clock gating was enabled in preboot we need to disable
* it here before touching the MME/TPC registers.
*/
gaudi_disable_clock_gating(hdev);
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
/* This is here just in case we are working without CPU */
gaudi_init_scrambler_hbm(hdev);
gaudi_init_golden_registers(hdev);
rc = gaudi_mmu_init(hdev);
if (rc)
return rc;
gaudi_init_security(hdev);
gaudi_init_pci_dma_qmans(hdev);
gaudi_init_hbm_dma_qmans(hdev);
gaudi_init_mme_qmans(hdev);
gaudi_init_tpc_qmans(hdev);
gaudi_init_nic_qmans(hdev);
gaudi_enable_timestamp(hdev);
/* MSI must be enabled before CPU queues and NIC are initialized */
rc = gaudi_enable_msi(hdev);
if (rc)
goto disable_queues;
/* must be called after MSI was enabled */
rc = gaudi_init_cpu_queues(hdev, GAUDI_CPU_TIMEOUT_USEC);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
rc);
goto disable_msi;
}
/* Perform read from the device to flush all configuration */
RREG32(mmHW_STATE);
return 0;
disable_msi:
gaudi_disable_msi(hdev);
disable_queues:
gaudi_disable_mme_qmans(hdev);
gaudi_disable_pci_dma_qmans(hdev);
return rc;
}
static int gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 status, reset_timeout_ms, cpu_timeout_ms, irq_handler_offset;
struct gaudi_device *gaudi = hdev->asic_specific;
bool driver_performs_reset;
if (!hard_reset) {
dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
return 0;
}
if (hdev->pldm) {
reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
} else {
reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
if (fw_reset) {
dev_dbg(hdev->dev,
"Firmware performs HARD reset, going to wait %dms\n",
reset_timeout_ms);
goto skip_reset;
}
driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled &&
!hdev->asic_prop.hard_reset_done_by_fw);
/* Set device to handle FLR by H/W as we will put the device CPU to
* halt mode
*/
if (driver_performs_reset)
WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
/* If linux is loaded in the device CPU we need to communicate with it
* via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU
* registers in case of old F/Ws
*/
if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_host_halt_irq);
WREG32(irq_handler_offset,
gaudi_irq_map_table[GAUDI_EVENT_HALT_MACHINE].cpu_id);
/* This is a hail-mary attempt to revive the card in the small chance that the
* f/w has experienced a watchdog event, which caused it to return back to preboot.
* In that case, triggering reset through GIC won't help. We need to trigger the
* reset as if Linux wasn't loaded.
*
* We do it only if the reset cause was HB, because that would be the indication
* of such an event.
*
* In case watchdog hasn't expired but we still got HB, then this won't do any
* damage.
*/
if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) {
if (hdev->asic_prop.hard_reset_done_by_fw)
hl_fw_ask_hard_reset_without_linux(hdev);
else
hl_fw_ask_halt_machine_without_linux(hdev);
}
} else {
if (hdev->asic_prop.hard_reset_done_by_fw)
hl_fw_ask_hard_reset_without_linux(hdev);
else
hl_fw_ask_halt_machine_without_linux(hdev);
}
if (driver_performs_reset) {
/* Configure the reset registers. Must be done as early as
* possible in case we fail during H/W initialization
*/
WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
(CFG_RST_H_DMA_MASK |
CFG_RST_H_MME_MASK |
CFG_RST_H_SM_MASK |
CFG_RST_H_TPC_7_MASK));
WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
(CFG_RST_H_HBM_MASK |
CFG_RST_H_TPC_7_MASK |
CFG_RST_H_NIC_MASK |
CFG_RST_H_SM_MASK |
CFG_RST_H_DMA_MASK |
CFG_RST_H_MME_MASK |
CFG_RST_H_CPU_MASK |
CFG_RST_H_MMU_MASK));
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
(CFG_RST_L_IF_MASK |
CFG_RST_L_PSOC_MASK |
CFG_RST_L_TPC_MASK));
msleep(cpu_timeout_ms);
/* Tell ASIC not to re-initialize PCIe */
WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
/* Restart BTL/BLR upon hard-reset */
WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
dev_dbg(hdev->dev,
"Issued HARD reset command, going to wait %dms\n",
reset_timeout_ms);
} else {
dev_dbg(hdev->dev,
"Firmware performs HARD reset, going to wait %dms\n",
reset_timeout_ms);
}
skip_reset:
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
* itself is in reset. Need to wait until the reset is deasserted
*/
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
return -ETIMEDOUT;
}
if (gaudi) {
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM |
HW_CAP_PCI_DMA | HW_CAP_MME | HW_CAP_TPC_MASK |
HW_CAP_HBM_DMA | HW_CAP_PLL | HW_CAP_NIC_MASK |
HW_CAP_MMU | HW_CAP_SRAM_SCRAMBLER |
HW_CAP_HBM_SCRAMBLER);
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
hdev->device_cpu_is_halted = false;
}
return 0;
}
static int gaudi_suspend(struct hl_device *hdev)
{
int rc;
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
return rc;
}
static int gaudi_resume(struct hl_device *hdev)
{
return gaudi_init_iatu(hdev);
}
static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
return rc;
}
static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 db_reg_offset, db_value, dma_qm_offset, q_off, irq_handler_offset;
struct gaudi_device *gaudi = hdev->asic_specific;
bool invalid_queue = false;
int dma_id;
switch (hw_queue_id) {
case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3:
dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3:
dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3:
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3:
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_2];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3:
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_3];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3:
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3:
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3:
dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_6];
dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_CPU_PQ:
if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
db_reg_offset = mmCPU_IF_PF_PQ_PI;
else
invalid_queue = true;
break;
case GAUDI_QUEUE_ID_MME_0_0:
db_reg_offset = mmMME2_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_MME_0_1:
db_reg_offset = mmMME2_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_MME_0_2:
db_reg_offset = mmMME2_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_MME_0_3:
db_reg_offset = mmMME2_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_MME_1_0:
db_reg_offset = mmMME0_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_MME_1_1:
db_reg_offset = mmMME0_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_MME_1_2:
db_reg_offset = mmMME0_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_MME_1_3:
db_reg_offset = mmMME0_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_0_0:
db_reg_offset = mmTPC0_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_0_1:
db_reg_offset = mmTPC0_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_0_2:
db_reg_offset = mmTPC0_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_0_3:
db_reg_offset = mmTPC0_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_1_0:
db_reg_offset = mmTPC1_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_1_1:
db_reg_offset = mmTPC1_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_1_2:
db_reg_offset = mmTPC1_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_1_3:
db_reg_offset = mmTPC1_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_2_0:
db_reg_offset = mmTPC2_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_2_1:
db_reg_offset = mmTPC2_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_2_2:
db_reg_offset = mmTPC2_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_2_3:
db_reg_offset = mmTPC2_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_3_0:
db_reg_offset = mmTPC3_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_3_1:
db_reg_offset = mmTPC3_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_3_2:
db_reg_offset = mmTPC3_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_3_3:
db_reg_offset = mmTPC3_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_4_0:
db_reg_offset = mmTPC4_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_4_1:
db_reg_offset = mmTPC4_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_4_2:
db_reg_offset = mmTPC4_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_4_3:
db_reg_offset = mmTPC4_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_5_0:
db_reg_offset = mmTPC5_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_5_1:
db_reg_offset = mmTPC5_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_5_2:
db_reg_offset = mmTPC5_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_5_3:
db_reg_offset = mmTPC5_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_6_0:
db_reg_offset = mmTPC6_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_6_1:
db_reg_offset = mmTPC6_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_6_2:
db_reg_offset = mmTPC6_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_6_3:
db_reg_offset = mmTPC6_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_TPC_7_0:
db_reg_offset = mmTPC7_QM_PQ_PI_0;
break;
case GAUDI_QUEUE_ID_TPC_7_1:
db_reg_offset = mmTPC7_QM_PQ_PI_1;
break;
case GAUDI_QUEUE_ID_TPC_7_2:
db_reg_offset = mmTPC7_QM_PQ_PI_2;
break;
case GAUDI_QUEUE_ID_TPC_7_3:
db_reg_offset = mmTPC7_QM_PQ_PI_3;
break;
case GAUDI_QUEUE_ID_NIC_0_0...GAUDI_QUEUE_ID_NIC_0_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC0))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC0_QM0_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_1_0...GAUDI_QUEUE_ID_NIC_1_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC1))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC0_QM1_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_2_0...GAUDI_QUEUE_ID_NIC_2_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC2))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC1_QM0_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_3_0...GAUDI_QUEUE_ID_NIC_3_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC3))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC1_QM1_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_4_0...GAUDI_QUEUE_ID_NIC_4_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC4))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC2_QM0_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_5_0...GAUDI_QUEUE_ID_NIC_5_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC5))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC2_QM1_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_6_0...GAUDI_QUEUE_ID_NIC_6_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC6))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC3_QM0_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_7_0...GAUDI_QUEUE_ID_NIC_7_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC7))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC3_QM1_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_8_0...GAUDI_QUEUE_ID_NIC_8_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC8))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC4_QM0_PQ_PI_0 + q_off;
break;
case GAUDI_QUEUE_ID_NIC_9_0...GAUDI_QUEUE_ID_NIC_9_3:
if (!(gaudi->hw_cap_initialized & HW_CAP_NIC9))
invalid_queue = true;
q_off = ((hw_queue_id - 1) & 0x3) * 4;
db_reg_offset = mmNIC4_QM1_PQ_PI_0 + q_off;
break;
default:
invalid_queue = true;
}
if (invalid_queue) {
/* Should never get here */
dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
hw_queue_id);
return;
}
db_value = pi;
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) {
/* make sure device CPU will read latest data from host */
mb();
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_host_pi_upd_irq);
WREG32(irq_handler_offset,
gaudi_irq_map_table[GAUDI_EVENT_PI_UPDATE].cpu_id);
}
}
static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
struct hl_bd *bd)
{
__le64 *pbd = (__le64 *) bd;
/* The QMANs are on the host memory so a simple copy suffice */
pqe[0] = pbd[0];
pqe[1] = pbd[1];
}
static void *gaudi_dma_alloc_coherent(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
dma_handle, flags);
/* Shift to the device's base physical address of host memory */
if (kernel_addr)
*dma_handle += HOST_PHYS_BASE;
return kernel_addr;
}
static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
/* Cancel the device's base physical address of host memory */
dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
}
static int gaudi_scrub_device_dram(struct hl_device *hdev, u64 val)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 cur_addr = prop->dram_user_base_address;
u32 chunk_size, busy;
int rc, dma_id;
while (cur_addr < prop->dram_end_address) {
for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) {
u32 dma_offset = dma_id * DMA_CORE_OFFSET;
chunk_size =
min((u64)SZ_2G, prop->dram_end_address - cur_addr);
dev_dbg(hdev->dev,
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset,
lower_32_bits(val));
WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset,
upper_32_bits(val));
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
upper_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset,
chunk_size);
WREG32(mmDMA0_CORE_COMMIT + dma_offset,
((1 << DMA0_CORE_COMMIT_LIN_SHIFT) |
(1 << DMA0_CORE_COMMIT_MEM_SET_SHIFT)));
cur_addr += chunk_size;
if (cur_addr == prop->dram_end_address)
break;
}
for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) {
u32 dma_offset = dma_id * DMA_CORE_OFFSET;
rc = hl_poll_timeout(
hdev,
mmDMA0_CORE_STS0 + dma_offset,
busy,
((busy & DMA0_CORE_STS0_BUSY_MASK) == 0),
1000,
HBM_SCRUBBING_TIMEOUT_US);
if (rc) {
dev_err(hdev->dev,
"DMA Timeout during HBM scrubbing of DMA #%d\n",
dma_id);
return -EIO;
}
}
}
return 0;
}
static int gaudi_scrub_device_mem(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 wait_to_idle_time = hdev->pdev ? HBM_SCRUBBING_TIMEOUT_US :
min_t(u64, HBM_SCRUBBING_TIMEOUT_US * 10, HL_SIM_MAX_TIMEOUT_US);
u64 addr, size, val = hdev->memory_scrub_val;
ktime_t timeout;
int rc = 0;
if (!hdev->memory_scrub)
return 0;
timeout = ktime_add_us(ktime_get(), wait_to_idle_time);
while (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
if (ktime_compare(ktime_get(), timeout) > 0) {
dev_err(hdev->dev, "waiting for idle timeout\n");
return -ETIMEDOUT;
}
usleep_range((1000 >> 2) + 1, 1000);
}
/* Scrub SRAM */
addr = prop->sram_user_base_address;
size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET;
dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n",
addr, addr + size, val);
rc = gaudi_memset_device_memory(hdev, addr, size, val);
if (rc) {
dev_err(hdev->dev, "Failed to clear SRAM (%d)\n", rc);
return rc;
}
/* Scrub HBM using all DMA channels in parallel */
rc = gaudi_scrub_device_dram(hdev, val);
if (rc) {
dev_err(hdev->dev, "Failed to clear HBM (%d)\n", rc);
return rc;
}
return 0;
}
static void *gaudi_get_int_queue_base(struct hl_device *hdev,
u32 queue_id, dma_addr_t *dma_handle,
u16 *queue_len)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct gaudi_internal_qman_info *q;
if (queue_id >= GAUDI_QUEUE_ID_SIZE ||
gaudi_queue_type[queue_id] != QUEUE_TYPE_INT) {
dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
return NULL;
}
q = &gaudi->internal_qmans[queue_id];
*dma_handle = q->pq_dma_addr;
*queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE;
return q->pq_kernel_addr;
}
static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
u16 len, u32 timeout, u64 *result)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) {
if (result)
*result = 0;
return 0;
}
if (!timeout)
timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
{
struct packet_msg_prot *fence_pkt;
dma_addr_t pkt_dma_addr;
u32 fence_val, tmp, timeout_usec;
dma_addr_t fence_dma_addr;
u32 *fence_ptr;
int rc;
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_TEST_QUEUE_WAIT_USEC;
else
timeout_usec = GAUDI_TEST_QUEUE_WAIT_USEC;
fence_val = GAUDI_QMAN0_FENCE_VAL;
fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate memory for H/W queue %d testing\n",
hw_queue_id);
return -ENOMEM;
}
*fence_ptr = 0;
fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
&pkt_dma_addr);
if (!fence_pkt) {
dev_err(hdev->dev,
"Failed to allocate packet for H/W queue %d testing\n",
hw_queue_id);
rc = -ENOMEM;
goto free_fence_ptr;
}
tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(fence_val);
fence_pkt->addr = cpu_to_le64(fence_dma_addr);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
sizeof(struct packet_msg_prot),
pkt_dma_addr);
if (rc) {
dev_err(hdev->dev,
"Failed to send fence packet to H/W queue %d\n",
hw_queue_id);
goto free_pkt;
}
rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
1000, timeout_usec, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev,
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EIO;
}
free_pkt:
hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
free_fence_ptr:
hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
return rc;
}
static int gaudi_test_cpu_queue(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
/*
* check capability here as send_cpu_message() won't update the result
* value if no capability
*/
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_test_cpu_queue(hdev);
}
static int gaudi_test_queues(struct hl_device *hdev)
{
int i, rc, ret_val = 0;
for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) {
if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
rc = gaudi_test_queue(hdev, i);
if (rc)
ret_val = -EINVAL;
}
}
rc = gaudi_test_cpu_queue(hdev);
if (rc)
ret_val = -EINVAL;
return ret_val;
}
static void *gaudi_dma_pool_zalloc(struct hl_device *hdev, size_t size,
gfp_t mem_flags, dma_addr_t *dma_handle)
{
void *kernel_addr;
if (size > GAUDI_DMA_POOL_BLK_SIZE)
return NULL;
kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
/* Shift to the device's base physical address of host memory */
if (kernel_addr)
*dma_handle += HOST_PHYS_BASE;
return kernel_addr;
}
static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr,
dma_addr_t dma_addr)
{
/* Cancel the device's base physical address of host memory */
dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
}
static void *gaudi_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
size_t size, dma_addr_t *dma_handle)
{
return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
}
static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
size_t size, void *vaddr)
{
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
{
struct scatterlist *sg, *sg_next_iter;
u32 count, dma_desc_cnt;
u64 len, len_next;
dma_addr_t addr, addr_next;
dma_desc_cnt = 0;
for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
addr = sg_dma_address(sg);
if (len == 0)
break;
while ((count + 1) < sgt->nents) {
sg_next_iter = sg_next(sg);
len_next = sg_dma_len(sg_next_iter);
addr_next = sg_dma_address(sg_next_iter);
if (len_next == 0)
break;
if ((addr + len == addr_next) &&
(len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
len += len_next;
count++;
sg = sg_next_iter;
} else {
break;
}
}
dma_desc_cnt++;
}
return dma_desc_cnt * sizeof(struct packet_lin_dma);
}
static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt,
u64 addr, enum dma_data_direction dir)
{
struct hl_userptr *userptr;
int rc;
if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
parser->job_userptr_list, &userptr))
goto already_pinned;
userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
if (!userptr)
return -ENOMEM;
rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
userptr);
if (rc)
goto free_userptr;
list_add_tail(&userptr->job_node, parser->job_userptr_list);
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto unpin_memory;
}
userptr->dma_mapped = true;
userptr->dir = dir;
already_pinned:
parser->patched_cb_size +=
gaudi_get_dma_desc_list_size(hdev, userptr->sgt);
return 0;
unpin_memory:
list_del(&userptr->job_node);
hl_unpin_host_memory(hdev, userptr);
free_userptr:
kfree(userptr);
return rc;
}
static int gaudi_validate_dma_pkt_host(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt,
bool src_in_host)
{
enum dma_data_direction dir;
bool skip_host_mem_pin = false, user_memset;
u64 addr;
int rc = 0;
user_memset = (le32_to_cpu(user_dma_pkt->ctl) &
GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
if (src_in_host) {
if (user_memset)
skip_host_mem_pin = true;
dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n");
dir = DMA_TO_DEVICE;
addr = le64_to_cpu(user_dma_pkt->src_addr);
} else {
dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n");
dir = DMA_FROM_DEVICE;
addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
}
if (skip_host_mem_pin)
parser->patched_cb_size += sizeof(*user_dma_pkt);
else
rc = gaudi_pin_memory_before_cs(hdev, parser, user_dma_pkt,
addr, dir);
return rc;
}
static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt)
{
bool src_in_host = false;
u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
dev_dbg(hdev->dev, "DMA packet details:\n");
dev_dbg(hdev->dev, "source == 0x%llx\n",
le64_to_cpu(user_dma_pkt->src_addr));
dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr);
dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
/*
* Special handling for DMA with size 0. Bypass all validations
* because no transactions will be done except for WR_COMP, which
* is not a security issue
*/
if (!le32_to_cpu(user_dma_pkt->tsize)) {
parser->patched_cb_size += sizeof(*user_dma_pkt);
return 0;
}
if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
src_in_host = true;
return gaudi_validate_dma_pkt_host(hdev, parser, user_dma_pkt,
src_in_host);
}
static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_load_and_exe *user_pkt)
{
u32 cfg;
cfg = le32_to_cpu(user_pkt->cfg);
if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
dev_err(hdev->dev,
"User not allowed to use Load and Execute\n");
return -EPERM;
}
parser->patched_cb_size += sizeof(struct packet_load_and_exe);
return 0;
}
static int gaudi_validate_cb(struct hl_device *hdev,
struct hl_cs_parser *parser, bool is_mmu)
{
u32 cb_parsed_length = 0;
int rc = 0;
parser->patched_cb_size = 0;
/* cb_user_size is more than 0 so loop will always be executed */
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
struct gaudi_packet *user_pkt;
user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
if (!validate_packet_id(pkt_id)) {
dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
rc = -EINVAL;
break;
}
pkt_size = gaudi_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
dev_err(hdev->dev,
"packet 0x%x is out of CB boundary\n", pkt_id);
rc = -EINVAL;
break;
}
switch (pkt_id) {
case PACKET_MSG_PROT:
dev_err(hdev->dev,
"User not allowed to use MSG_PROT\n");
rc = -EPERM;
break;
case PACKET_CP_DMA:
dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
rc = -EPERM;
break;
case PACKET_STOP:
dev_err(hdev->dev, "User not allowed to use STOP\n");
rc = -EPERM;
break;
case PACKET_WREG_BULK:
dev_err(hdev->dev,
"User not allowed to use WREG_BULK\n");
rc = -EPERM;
break;
case PACKET_LOAD_AND_EXE:
rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
(struct packet_load_and_exe *) user_pkt);
break;
case PACKET_LIN_DMA:
parser->contains_dma_pkt = true;
if (is_mmu)
parser->patched_cb_size += pkt_size;
else
rc = gaudi_validate_dma_pkt_no_mmu(hdev, parser,
(struct packet_lin_dma *) user_pkt);
break;
case PACKET_WREG_32:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_REPEAT:
case PACKET_FENCE:
case PACKET_NOP:
case PACKET_ARB_POINT:
parser->patched_cb_size += pkt_size;
break;
default:
dev_err(hdev->dev, "Invalid packet header 0x%x\n",
pkt_id);
rc = -EINVAL;
break;
}
if (rc)
break;
}
/*
* The new CB should have space at the end for two MSG_PROT packets:
* 1. Optional NOP padding for cacheline alignment
* 2. A packet that will act as a completion packet
* 3. A packet that will generate MSI interrupt
*/
if (parser->completion)
parser->patched_cb_size += gaudi_get_patched_cb_extra_size(
parser->patched_cb_size);
return rc;
}
static int gaudi_patch_dma_packet(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt,
struct packet_lin_dma *new_dma_pkt,
u32 *new_dma_pkt_size)
{
struct hl_userptr *userptr;
struct scatterlist *sg, *sg_next_iter;
u32 count, dma_desc_cnt, user_wrcomp_en_mask, ctl;
u64 len, len_next;
dma_addr_t dma_addr, dma_addr_next;
u64 device_memory_addr, addr;
enum dma_data_direction dir;
struct sg_table *sgt;
bool src_in_host = false;
bool skip_host_mem_pin = false;
bool user_memset;
ctl = le32_to_cpu(user_dma_pkt->ctl);
if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
src_in_host = true;
user_memset = (ctl & GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
if (src_in_host) {
addr = le64_to_cpu(user_dma_pkt->src_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
dir = DMA_TO_DEVICE;
if (user_memset)
skip_host_mem_pin = true;
} else {
addr = le64_to_cpu(user_dma_pkt->dst_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
dir = DMA_FROM_DEVICE;
}
if ((!skip_host_mem_pin) &&
(!hl_userptr_is_pinned(hdev, addr,
le32_to_cpu(user_dma_pkt->tsize),
parser->job_userptr_list, &userptr))) {
dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
addr, user_dma_pkt->tsize);
return -EFAULT;
}
if ((user_memset) && (dir == DMA_TO_DEVICE)) {
memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
*new_dma_pkt_size = sizeof(*user_dma_pkt);
return 0;
}
user_wrcomp_en_mask = ctl & GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
sgt = userptr->sgt;
dma_desc_cnt = 0;
for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
dma_addr = sg_dma_address(sg);
if (len == 0)
break;
while ((count + 1) < sgt->nents) {
sg_next_iter = sg_next(sg);
len_next = sg_dma_len(sg_next_iter);
dma_addr_next = sg_dma_address(sg_next_iter);
if (len_next == 0)
break;
if ((dma_addr + len == dma_addr_next) &&
(len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
len += len_next;
count++;
sg = sg_next_iter;
} else {
break;
}
}
ctl = le32_to_cpu(user_dma_pkt->ctl);
if (likely(dma_desc_cnt))
ctl &= ~GAUDI_PKT_CTL_EB_MASK;
ctl &= ~GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
new_dma_pkt->ctl = cpu_to_le32(ctl);
new_dma_pkt->tsize = cpu_to_le32(len);
if (dir == DMA_TO_DEVICE) {
new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
} else {
new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
}
if (!user_memset)
device_memory_addr += len;
dma_desc_cnt++;
new_dma_pkt++;
}
if (!dma_desc_cnt) {
dev_err(hdev->dev,
"Error of 0 SG entries when patching DMA packet\n");
return -EFAULT;
}
/* Fix the last dma packet - wrcomp must be as user set it */
new_dma_pkt--;
new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask);
*new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
return 0;
}
static int gaudi_patch_cb(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
u32 cb_parsed_length = 0;
u32 cb_patched_cur_length = 0;
int rc = 0;
/* cb_user_size is more than 0 so loop will always be executed */
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
u32 new_pkt_size = 0;
struct gaudi_packet *user_pkt, *kernel_pkt;
user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
kernel_pkt = parser->patched_cb->kernel_address +
cb_patched_cur_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
if (!validate_packet_id(pkt_id)) {
dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
rc = -EINVAL;
break;
}
pkt_size = gaudi_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
dev_err(hdev->dev,
"packet 0x%x is out of CB boundary\n", pkt_id);
rc = -EINVAL;
break;
}
switch (pkt_id) {
case PACKET_LIN_DMA:
rc = gaudi_patch_dma_packet(hdev, parser,
(struct packet_lin_dma *) user_pkt,
(struct packet_lin_dma *) kernel_pkt,
&new_pkt_size);
cb_patched_cur_length += new_pkt_size;
break;
case PACKET_MSG_PROT:
dev_err(hdev->dev,
"User not allowed to use MSG_PROT\n");
rc = -EPERM;
break;
case PACKET_CP_DMA:
dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
rc = -EPERM;
break;
case PACKET_STOP:
dev_err(hdev->dev, "User not allowed to use STOP\n");
rc = -EPERM;
break;
case PACKET_WREG_32:
case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_REPEAT:
case PACKET_FENCE:
case PACKET_NOP:
case PACKET_ARB_POINT:
case PACKET_LOAD_AND_EXE:
memcpy(kernel_pkt, user_pkt, pkt_size);
cb_patched_cur_length += pkt_size;
break;
default:
dev_err(hdev->dev, "Invalid packet header 0x%x\n",
pkt_id);
rc = -EINVAL;
break;
}
if (rc)
break;
}
return rc;
}
static int gaudi_parse_cb_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
u64 handle;
u32 patched_cb_size;
struct hl_cb *user_cb;
int rc;
/*
* The new CB should have space at the end for two MSG_PROT packets:
* 1. Optional NOP padding for cacheline alignment
* 2. A packet that will act as a completion packet
* 3. A packet that will generate MSI interrupt
*/
if (parser->completion)
parser->patched_cb_size = parser->user_cb_size +
gaudi_get_patched_cb_extra_size(parser->user_cb_size);
else
parser->patched_cb_size = parser->user_cb_size;
rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
&handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n",
rc);
return rc;
}
parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail */
if (!parser->patched_cb) {
dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
/*
* We are protected from overflow because the check
* "parser->user_cb_size <= parser->user_cb->size" was done in get_cb_from_cs_chunk()
* in the common code. That check is done only if is_kernel_allocated_cb is true.
*
* There is no option to reach here without going through that check because:
* 1. validate_queue_index() assigns true to is_kernel_allocated_cb for any submission to
* an external queue.
* 2. For Gaudi, we only parse CBs that were submitted to the external queues.
*/
memcpy(parser->patched_cb->kernel_address,
parser->user_cb->kernel_address,
parser->user_cb_size);
patched_cb_size = parser->patched_cb_size;
/* Validate patched CB instead of user CB */
user_cb = parser->user_cb;
parser->user_cb = parser->patched_cb;
rc = gaudi_validate_cb(hdev, parser, true);
parser->user_cb = user_cb;
if (rc) {
hl_cb_put(parser->patched_cb);
goto out;
}
if (patched_cb_size != parser->patched_cb_size) {
dev_err(hdev->dev, "user CB size mismatch\n");
hl_cb_put(parser->patched_cb);
rc = -EINVAL;
goto out;
}
out:
/*
* Always call cb destroy here because we still have 1 reference
* to it by calling cb_get earlier. After the job will be completed,
* cb_put will release it, but here we want to remove it from the
* idr
*/
hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
return rc;
}
static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
u64 handle;
int rc;
rc = gaudi_validate_cb(hdev, parser, false);
if (rc)
goto free_userptr;
rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
&handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
goto free_userptr;
}
parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
rc = gaudi_patch_cb(hdev, parser);
if (rc)
hl_cb_put(parser->patched_cb);
out:
/*
* Always call cb destroy here because we still have 1 reference
* to it by calling cb_get earlier. After the job will be completed,
* cb_put will release it, but here we want to remove it from the
* idr
*/
hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
free_userptr:
if (rc)
hl_userptr_delete_list(hdev, parser->job_userptr_list);
return rc;
}
static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
u32 nic_queue_offset, nic_mask_q_id;
if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) &&
(parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3)) {
nic_queue_offset = parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0;
nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT + (nic_queue_offset >> 2));
if (!(gaudi->hw_cap_initialized & nic_mask_q_id)) {
dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
return -EINVAL;
}
}
/* For internal queue jobs just check if CB address is valid */
if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->sram_user_base_address,
asic_prop->sram_end_address))
return 0;
if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->dram_user_base_address,
asic_prop->dram_end_address))
return 0;
/* PMMU and HPMMU addresses are equal, check only one of them */
if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->pmmu.start_addr,
asic_prop->pmmu.end_addr))
return 0;
dev_err(hdev->dev,
"CB address 0x%px + 0x%x for internal QMAN is not valid\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
}
static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (parser->queue_type == QUEUE_TYPE_INT)
return gaudi_parse_cb_no_ext_queue(hdev, parser);
if (gaudi->hw_cap_initialized & HW_CAP_MMU)
return gaudi_parse_cb_mmu(hdev, parser);
else
return gaudi_parse_cb_no_mmu(hdev, parser);
}
static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
u32 msi_vec, bool eb)
{
struct packet_msg_prot *cq_pkt;
struct packet_nop *cq_padding;
u64 msi_addr;
u32 tmp;
cq_padding = kernel_address + original_len;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
while ((void *)cq_padding < (void *)cq_pkt) {
cq_padding->ctl = cpu_to_le32(FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_NOP));
cq_padding++;
}
tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
if (eb)
tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(cq_val);
cq_pkt->addr = cpu_to_le64(cq_addr);
cq_pkt++;
tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
msi_addr = hdev->pdev ? mmPCIE_CORE_MSI_REQ : mmPCIE_MSI_INTR_0 + msi_vec * 4;
cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
}
static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
{
WREG32(mmCPU_IF_EQ_RD_OFFS, val);
}
static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val)
{
struct packet_lin_dma *lin_dma_pkt;
struct hl_cs_job *job;
u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
lin_dma_pkt = cb->kernel_address;
memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
cb_size = sizeof(*lin_dma_pkt);
ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
lin_dma_pkt->ctl = cpu_to_le32(ctl);
lin_dma_pkt->src_addr = cpu_to_le64(val);
lin_dma_pkt->dst_addr |= cpu_to_le64(addr);
lin_dma_pkt->tsize = cpu_to_le32(size);
job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto release_cb;
}
/* Verify DMA is OK */
err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
if (err_cause && !hdev->init_done) {
dev_dbg(hdev->dev,
"Clearing DMA0 engine from errors (cause 0x%x)\n",
err_cause);
WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
}
job->id = 0;
job->user_cb = cb;
atomic_inc(&job->user_cb->cs_cnt);
job->user_cb_size = cb_size;
job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
hl_debugfs_add_job(hdev, job);
rc = gaudi_send_job_on_qman0(hdev, job);
hl_debugfs_remove_job(hdev, job);
kfree(job);
atomic_dec(&cb->cs_cnt);
/* Verify DMA is OK */
err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
if (err_cause) {
dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
rc = -EIO;
if (!hdev->init_done) {
dev_dbg(hdev->dev,
"Clearing DMA0 engine from errors (cause 0x%x)\n",
err_cause);
WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
}
}
release_cb:
hl_cb_put(cb);
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
u32 num_regs, u32 val)
{
struct packet_msg_long *pkt;
struct hl_cs_job *job;
u32 cb_size, ctl;
struct hl_cb *cb;
int i, rc;
cb_size = (sizeof(*pkt) * num_regs) + sizeof(struct packet_msg_prot);
if (cb_size > SZ_2M) {
dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
return -ENOMEM;
}
cb = hl_cb_kernel_create(hdev, cb_size, false);
if (!cb)
return -EFAULT;
pkt = cb->kernel_address;
ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
for (i = 0; i < num_regs ; i++, pkt++) {
pkt->ctl = cpu_to_le32(ctl);
pkt->value = cpu_to_le32(val);
pkt->addr = cpu_to_le64(reg_base + (i * 4));
}
job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto release_cb;
}
job->id = 0;
job->user_cb = cb;
atomic_inc(&job->user_cb->cs_cnt);
job->user_cb_size = cb_size;
job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
job->patched_cb = job->user_cb;
job->job_cb_size = cb_size;
hl_debugfs_add_job(hdev, job);
rc = gaudi_send_job_on_qman0(hdev, job);
hl_debugfs_remove_job(hdev, job);
kfree(job);
atomic_dec(&cb->cs_cnt);
release_cb:
hl_cb_put(cb);
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
static int gaudi_restore_sm_registers(struct hl_device *hdev)
{
u64 base_addr;
u32 num_regs;
int rc;
base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
num_regs = NUM_OF_SOB_IN_BLOCK;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0;
num_regs = NUM_OF_SOB_IN_BLOCK;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
num_regs = NUM_OF_SOB_IN_BLOCK;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0;
num_regs = NUM_OF_MONITORS_IN_BLOCK;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0;
num_regs = NUM_OF_MONITORS_IN_BLOCK;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0;
num_regs = NUM_OF_MONITORS_IN_BLOCK;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
(GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4);
num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 +
(GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4);
num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR;
rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
if (rc) {
dev_err(hdev->dev, "failed resetting SM registers");
return -ENOMEM;
}
return 0;
}
static void gaudi_restore_dma_registers(struct hl_device *hdev)
{
u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 -
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
int i;
for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
u64 sob_addr = CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 +
(i * sob_delta);
u32 dma_offset = i * DMA_CORE_OFFSET;
WREG32(mmDMA0_CORE_WR_COMP_ADDR_LO + dma_offset,
lower_32_bits(sob_addr));
WREG32(mmDMA0_CORE_WR_COMP_ADDR_HI + dma_offset,
upper_32_bits(sob_addr));
WREG32(mmDMA0_CORE_WR_COMP_WDATA + dma_offset, 0x80000001);
/* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be
* modified by the user for SRAM reduction
*/
if (i > 1)
WREG32(mmDMA0_CORE_WR_AWUSER_31_11 + dma_offset,
0x00000001);
}
}
static void gaudi_restore_qm_registers(struct hl_device *hdev)
{
u32 qman_offset;
int i;
for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
qman_offset = i * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_ARB_CFG_0 + qman_offset, 0);
}
for (i = 0 ; i < MME_NUMBER_OF_MASTER_ENGINES ; i++) {
qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE);
WREG32(mmMME0_QM_ARB_CFG_0 + qman_offset, 0);
}
for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
qman_offset = i * TPC_QMAN_OFFSET;
WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0);
}
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
qman_offset = (i >> 1) * NIC_MACRO_QMAN_OFFSET +
(i & 0x1) * NIC_ENGINE_QMAN_OFFSET;
WREG32(mmNIC0_QM0_ARB_CFG_0 + qman_offset, 0);
}
}
static int gaudi_restore_user_registers(struct hl_device *hdev)
{
int rc;
rc = gaudi_restore_sm_registers(hdev);
if (rc)
return rc;
gaudi_restore_dma_registers(hdev);
gaudi_restore_qm_registers(hdev);
return 0;
}
static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
{
return 0;
}
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
{
u32 size = hdev->asic_prop.mmu_pgt_size +
hdev->asic_prop.mmu_cache_mng_size;
struct gaudi_device *gaudi = hdev->asic_specific;
u64 addr = hdev->asic_prop.mmu_pgt_addr;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return 0;
return gaudi_memset_device_memory(hdev, addr, size, 0);
}
static void gaudi_restore_phase_topology(struct hl_device *hdev)
{
}
static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
u32 size_to_dma, dma_addr_t dma_addr)
{
u32 err_cause, val;
u64 dma_offset;
int rc;
dma_offset = dma_id * DMA_CORE_OFFSET;
WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, lower_32_bits(addr));
WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, upper_32_bits(addr));
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, lower_32_bits(dma_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, upper_32_bits(dma_addr));
WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset, size_to_dma);
WREG32(mmDMA0_CORE_COMMIT + dma_offset,
(1 << DMA0_CORE_COMMIT_LIN_SHIFT));
rc = hl_poll_timeout(
hdev,
mmDMA0_CORE_STS0 + dma_offset,
val,
((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
0,
1000000);
if (rc) {
dev_err(hdev->dev,
"DMA %d timed-out during reading of 0x%llx\n",
dma_id, addr);
return -EIO;
}
/* Verify DMA is OK */
err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
if (err_cause) {
dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
dev_dbg(hdev->dev,
"Clearing DMA0 engine from errors (cause 0x%x)\n",
err_cause);
WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
return -EIO;
}
return 0;
}
static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
void *blob_addr)
{
u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma;
u32 qm_glbl_sts0, qm_cgm_sts;
u64 dma_offset, qm_offset;
dma_addr_t dma_addr;
void *kernel_addr;
bool is_eng_idle;
int rc = 0, dma_id;
kernel_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!kernel_addr)
return -ENOMEM;
hdev->asic_funcs->hw_queues_lock(hdev);
dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
dma_offset = dma_id * DMA_CORE_OFFSET;
qm_offset = dma_id * DMA_QMAN_OFFSET;
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
IS_DMA_IDLE(dma_core_sts0);
if (!is_eng_idle) {
dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
dma_offset = dma_id * DMA_CORE_OFFSET;
qm_offset = dma_id * DMA_QMAN_OFFSET;
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
IS_DMA_IDLE(dma_core_sts0);
if (!is_eng_idle) {
dev_err_ratelimited(hdev->dev,
"Can't read via DMA because it is BUSY\n");
rc = -EAGAIN;
goto out;
}
}
cfg1 = RREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset);
WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset,
0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
/* TODO: remove this by mapping the DMA temporary buffer to the MMU
* using the compute ctx ASID, if exists. If not, use the kernel ctx
* ASID
*/
WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
/* Verify DMA is OK */
err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
if (err_cause) {
dev_dbg(hdev->dev,
"Clearing DMA0 engine from errors (cause 0x%x)\n",
err_cause);
WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
}
pos = 0;
size_left = size;
size_to_dma = SZ_2M;
while (size_left > 0) {
if (size_left < SZ_2M)
size_to_dma = size_left;
rc = gaudi_dma_core_transfer(hdev, dma_id, addr, size_to_dma,
dma_addr);
if (rc)
break;
memcpy(blob_addr + pos, kernel_addr, size_to_dma);
if (size_left <= SZ_2M)
break;
pos += SZ_2M;
addr += SZ_2M;
size_left -= SZ_2M;
}
/* TODO: remove this by mapping the DMA temporary buffer to the MMU
* using the compute ctx ASID, if exists. If not, use the kernel ctx
* ASID
*/
WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
~BIT(DMA0_CORE_PROT_VAL_SHIFT));
WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset, cfg1);
out:
hdev->asic_funcs->hw_queues_unlock(hdev);
hl_asic_dma_free_coherent(hdev, SZ_2M, kernel_addr, dma_addr);
return rc;
}
static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (hdev->reset_info.hard_reset_pending)
return U64_MAX;
return readq(hdev->pcie_bar[HBM_BAR_ID] +
(addr - gaudi->hbm_bar_cur_addr));
}
static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (hdev->reset_info.hard_reset_pending)
return;
writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
(addr - gaudi->hbm_bar_cur_addr));
}
void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
{
/* mask to zero the MMBP and ASID bits */
WREG32_AND(reg, ~0x7FF);
WREG32_OR(reg, asid);
}
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return;
if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
dev_crit(hdev->dev, "asid %u is too big\n", asid);
return;
}
gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA0_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA1_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA2_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA3_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA4_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA5_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA6_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmDMA7_CORE_NON_SECURE_PROPS, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_ARUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_AWUSER_LO, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_0, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_1, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_2, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_3, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_4, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER0, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER1, asid);
gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER0, asid);
gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER1, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER0, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER1, asid);
gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER0, asid);
gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER1, asid);
gaudi_mmu_prepare_reg(hdev, mmMME0_ACC_WBC, asid);
gaudi_mmu_prepare_reg(hdev, mmMME1_ACC_WBC, asid);
gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
if (gaudi->hw_cap_initialized & HW_CAP_NIC0) {
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC1) {
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC2) {
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC3) {
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC4) {
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC5) {
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC6) {
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC7) {
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC8) {
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4,
asid);
}
if (gaudi->hw_cap_initialized & HW_CAP_NIC9) {
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3,
asid);
gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4,
asid);
}
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
}
static int gaudi_send_job_on_qman0(struct hl_device *hdev,
struct hl_cs_job *job)
{
struct packet_msg_prot *fence_pkt;
u32 *fence_ptr;
dma_addr_t fence_dma_addr;
struct hl_cb *cb;
u32 tmp, timeout, dma_offset;
int rc;
if (hdev->pldm)
timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC;
else
timeout = HL_DEVICE_TIMEOUT_USEC;
fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate fence memory for QMAN0\n");
return -ENOMEM;
}
cb = job->patched_cb;
fence_pkt = cb->kernel_address +
job->job_cb_size - sizeof(struct packet_msg_prot);
tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL);
fence_pkt->addr = cpu_to_le64(fence_dma_addr);
dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
WREG32(mmDMA0_CORE_PROT + dma_offset,
BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT) | BIT(DMA0_CORE_PROT_VAL_SHIFT));
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
job->job_cb_size, cb->bus_address);
if (rc) {
dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
goto free_fence_ptr;
}
rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
(tmp == GAUDI_QMAN0_FENCE_VAL), 1000,
timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, GAUDI_QUEUE_ID_DMA_0_0);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
goto free_fence_ptr;
}
free_fence_ptr:
WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
return rc;
}
static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size)
{
if (event_type >= GAUDI_EVENT_SIZE)
goto event_not_supported;
if (!gaudi_irq_map_table[event_type].valid)
goto event_not_supported;
snprintf(desc, size, gaudi_irq_map_table[event_type].name);
return;
event_not_supported:
snprintf(desc, size, "N/A");
}
static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y,
bool is_write, u16 *engine_id_1,
u16 *engine_id_2)
{
u32 dma_id[2], dma_offset, err_cause[2], mask, i;
mask = is_write ? DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK :
DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK;
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
dma_id[0] = 0;
dma_id[1] = 2;
break;
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
dma_id[0] = 1;
dma_id[1] = 3;
break;
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
dma_id[0] = 4;
dma_id[1] = 6;
break;
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
dma_id[0] = 5;
dma_id[1] = 7;
break;
default:
goto unknown_initiator;
}
for (i = 0 ; i < 2 ; i++) {
dma_offset = dma_id[i] * DMA_CORE_OFFSET;
err_cause[i] = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
}
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
return "DMA0";
} else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_2;
return "DMA2";
} else {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
*engine_id_2 = GAUDI_ENGINE_ID_DMA_2;
return "DMA0 or DMA2";
}
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
return "DMA1";
} else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_3;
return "DMA3";
} else {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
*engine_id_2 = GAUDI_ENGINE_ID_DMA_3;
return "DMA1 or DMA3";
}
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
return "DMA4";
} else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_6;
return "DMA6";
} else {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
*engine_id_2 = GAUDI_ENGINE_ID_DMA_6;
return "DMA4 or DMA6";
}
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
return "DMA5";
} else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_7;
return "DMA7";
} else {
*engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
*engine_id_2 = GAUDI_ENGINE_ID_DMA_7;
return "DMA5 or DMA7";
}
}
unknown_initiator:
return "unknown initiator";
}
static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write,
u16 *engine_id_1, u16 *engine_id_2)
{
u32 val, x_y, axi_id;
val = is_write ? RREG32(mmMMU_UP_RAZWI_WRITE_ID) :
RREG32(mmMMU_UP_RAZWI_READ_ID);
x_y = val & ((RAZWI_INITIATOR_Y_MASK << RAZWI_INITIATOR_Y_SHIFT) |
(RAZWI_INITIATOR_X_MASK << RAZWI_INITIATOR_X_SHIFT));
axi_id = val & (RAZWI_INITIATOR_AXI_ID_MASK <<
RAZWI_INITIATOR_AXI_ID_SHIFT);
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
*engine_id_1 = GAUDI_ENGINE_ID_TPC_0;
return "TPC0";
}
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
*engine_id_1 = GAUDI_ENGINE_ID_NIC_0;
return "NIC0";
}
break;
case RAZWI_INITIATOR_ID_X_Y_TPC1:
*engine_id_1 = GAUDI_ENGINE_ID_TPC_1;
return "TPC1";
case RAZWI_INITIATOR_ID_X_Y_MME0_0:
case RAZWI_INITIATOR_ID_X_Y_MME0_1:
*engine_id_1 = GAUDI_ENGINE_ID_MME_0;
return "MME0";
case RAZWI_INITIATOR_ID_X_Y_MME1_0:
case RAZWI_INITIATOR_ID_X_Y_MME1_1:
*engine_id_1 = GAUDI_ENGINE_ID_MME_1;
return "MME1";
case RAZWI_INITIATOR_ID_X_Y_TPC2:
*engine_id_1 = GAUDI_ENGINE_ID_TPC_2;
return "TPC2";
case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
*engine_id_1 = GAUDI_ENGINE_ID_TPC_3;
return "TPC3";
}
/* PCI, CPU or PSOC does not have engine id*/
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
return "PCI";
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
return "CPU";
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PSOC))
return "PSOC";
break;
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write,
engine_id_1, engine_id_2);
case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
*engine_id_1 = GAUDI_ENGINE_ID_TPC_4;
return "TPC4";
}
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
*engine_id_1 = GAUDI_ENGINE_ID_NIC_1;
return "NIC1";
}
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
*engine_id_1 = GAUDI_ENGINE_ID_NIC_2;
return "NIC2";
}
break;
case RAZWI_INITIATOR_ID_X_Y_TPC5:
*engine_id_1 = GAUDI_ENGINE_ID_TPC_5;
return "TPC5";
case RAZWI_INITIATOR_ID_X_Y_MME2_0:
case RAZWI_INITIATOR_ID_X_Y_MME2_1:
*engine_id_1 = GAUDI_ENGINE_ID_MME_2;
return "MME2";
case RAZWI_INITIATOR_ID_X_Y_MME3_0:
case RAZWI_INITIATOR_ID_X_Y_MME3_1:
*engine_id_1 = GAUDI_ENGINE_ID_MME_3;
return "MME3";
case RAZWI_INITIATOR_ID_X_Y_TPC6:
*engine_id_1 = GAUDI_ENGINE_ID_TPC_6;
return "TPC6";
case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
*engine_id_1 = GAUDI_ENGINE_ID_TPC_7;
return "TPC7";
}
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
*engine_id_1 = GAUDI_ENGINE_ID_NIC_4;
return "NIC4";
}
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
*engine_id_1 = GAUDI_ENGINE_ID_NIC_5;
return "NIC5";
}
break;
default:
break;
}
dev_err(hdev->dev,
"Unknown RAZWI initiator ID 0x%x [Y=%d, X=%d, AXI_ID=%d]\n",
val,
(val >> RAZWI_INITIATOR_Y_SHIFT) & RAZWI_INITIATOR_Y_MASK,
(val >> RAZWI_INITIATOR_X_SHIFT) & RAZWI_INITIATOR_X_MASK,
(val >> RAZWI_INITIATOR_AXI_ID_SHIFT) &
RAZWI_INITIATOR_AXI_ID_MASK);
return "unknown initiator";
}
static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u16 *engine_id_1,
u16 *engine_id_2, bool *is_read, bool *is_write)
{
if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
dev_err_ratelimited(hdev->dev,
"RAZWI event caused by illegal write of %s\n",
gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
*is_write = true;
}
if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
dev_err_ratelimited(hdev->dev,
"RAZWI event caused by illegal read of %s\n",
gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
*is_read = true;
}
}
static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u64 *event_mask)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 val;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return;
val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
*addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
*addr <<= 32;
*addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr);
hl_handle_page_fault(hdev, *addr, 0, true, event_mask);
WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
}
val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
*addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
*addr <<= 32;
*addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr);
WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
}
}
/*
* +-------------------+------------------------------------------------------+
* | Configuration Reg | Description |
* | Address | |
* +-------------------+------------------------------------------------------+
* | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
* | |0xF30 memory wrappers 31:0 (MSB to LSB) |
* | |0xF34 memory wrappers 63:32 |
* | |0xF38 memory wrappers 95:64 |
* | |0xF3C memory wrappers 127:96 |
* +-------------------+------------------------------------------------------+
* | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
* | |0xF40 memory wrappers 31:0 (MSB to LSB) |
* | |0xF44 memory wrappers 63:32 |
* | |0xF48 memory wrappers 95:64 |
* | |0xF4C memory wrappers 127:96 |
* +-------------------+------------------------------------------------------+
*/
static int gaudi_extract_ecc_info(struct hl_device *hdev,
struct ecc_info_extract_params *params, u64 *ecc_address,
u64 *ecc_syndrom, u8 *memory_wrapper_idx)
{
u32 i, num_mem_regs, reg, err_bit;
u64 err_addr, err_word = 0;
num_mem_regs = params->num_memories / 32 +
((params->num_memories % 32) ? 1 : 0);
if (params->block_address >= CFG_BASE)
params->block_address -= CFG_BASE;
if (params->derr)
err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET;
else
err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET;
/* Set invalid wrapper index */
*memory_wrapper_idx = 0xFF;
/* Iterate through memory wrappers, a single bit must be set */
for (i = 0 ; i < num_mem_regs ; i++) {
err_addr += i * 4;
err_word = RREG32(err_addr);
if (err_word) {
err_bit = __ffs(err_word);
*memory_wrapper_idx = err_bit + (32 * i);
break;
}
}
if (*memory_wrapper_idx == 0xFF) {
dev_err(hdev->dev, "ECC error information cannot be found\n");
return -EINVAL;
}
WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET,
*memory_wrapper_idx);
*ecc_address =
RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET);
*ecc_syndrom =
RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET);
/* Clear error indication */
reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET);
if (params->derr)
reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1);
else
reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1);
WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg);
return 0;
}
/*
* gaudi_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
*
* @idx: the current pi/ci value
* @q_len: the queue length (power of 2)
*
* @return the cyclically decremented index
*/
static inline u32 gaudi_queue_idx_dec(u32 idx, u32 q_len)
{
u32 mask = q_len - 1;
/*
* modular decrement is equivalent to adding (queue_size -1)
* later we take LSBs to make sure the value is in the
* range [0, queue_len - 1]
*/
return (idx + q_len - 1) & mask;
}
/**
* gaudi_handle_sw_config_stream_data - print SW config stream data
*
* @hdev: pointer to the habanalabs device structure
* @stream: the QMAN's stream
* @qman_base: base address of QMAN registers block
* @event_mask: mask of the last events occurred
*/
static void gaudi_handle_sw_config_stream_data(struct hl_device *hdev, u32 stream,
u64 qman_base, u64 event_mask)
{
u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr;
u32 cq_ptr_lo_off, size;
cq_ptr_lo_off = mmTPC0_QM_CQ_PTR_LO_1 - mmTPC0_QM_CQ_PTR_LO_0;
cq_ptr_lo = qman_base + (mmTPC0_QM_CQ_PTR_LO_0 - mmTPC0_QM_BASE) +
stream * cq_ptr_lo_off;
cq_ptr_hi = cq_ptr_lo +
(mmTPC0_QM_CQ_PTR_HI_0 - mmTPC0_QM_CQ_PTR_LO_0);
cq_tsize = cq_ptr_lo +
(mmTPC0_QM_CQ_TSIZE_0 - mmTPC0_QM_CQ_PTR_LO_0);
cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
size = RREG32(cq_tsize);
dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n",
stream, cq_ptr, size);
if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr;
hdev->captured_err_info.undef_opcode.cq_size = size;
hdev->captured_err_info.undef_opcode.stream_id = stream;
}
}
/**
* gaudi_handle_last_pqes_on_err - print last PQEs on error
*
* @hdev: pointer to the habanalabs device structure
* @qid_base: first QID of the QMAN (out of 4 streams)
* @stream: the QMAN's stream
* @qman_base: base address of QMAN registers block
* @event_mask: mask of the last events occurred
* @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE)
*/
static void gaudi_handle_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
u32 stream, u64 qman_base,
u64 event_mask,
bool pr_sw_conf)
{
u32 ci, qm_ci_stream_off, queue_len;
struct hl_hw_queue *q;
u64 pq_ci, addr[PQ_FETCHER_CACHE_SIZE];
int i;
q = &hdev->kernel_queues[qid_base + stream];
qm_ci_stream_off = mmTPC0_QM_PQ_CI_1 - mmTPC0_QM_PQ_CI_0;
pq_ci = qman_base + (mmTPC0_QM_PQ_CI_0 - mmTPC0_QM_BASE) +
stream * qm_ci_stream_off;
queue_len = (q->queue_type == QUEUE_TYPE_INT) ?
q->int_queue_len : HL_QUEUE_LENGTH;
hdev->asic_funcs->hw_queues_lock(hdev);
if (pr_sw_conf)
gaudi_handle_sw_config_stream_data(hdev, stream, qman_base, event_mask);
ci = RREG32(pq_ci);
/* we should start printing form ci -1 */
ci = gaudi_queue_idx_dec(ci, queue_len);
memset(addr, 0, sizeof(addr));
for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) {
struct hl_bd *bd;
u32 len;
bd = q->kernel_address;
bd += ci;
len = le32_to_cpu(bd->len);
/* len 0 means uninitialized entry- break */
if (!len)
break;
addr[i] = le64_to_cpu(bd->ptr);
dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n",
stream, ci, addr[i], len);
/* get previous ci, wrap if needed */
ci = gaudi_queue_idx_dec(ci, queue_len);
}
if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode;
u32 arr_idx = undef_opcode->cb_addr_streams_len;
if (arr_idx == 0) {
undef_opcode->timestamp = ktime_get();
undef_opcode->engine_id = gaudi_queue_id_to_engine_id[qid_base];
}
memcpy(undef_opcode->cb_addr_streams[arr_idx], addr, sizeof(addr));
undef_opcode->cb_addr_streams_len++;
}
hdev->asic_funcs->hw_queues_unlock(hdev);
}
/**
* handle_qman_data_on_err - extract QMAN data on error
*
* @hdev: pointer to the habanalabs device structure
* @qid_base: first QID of the QMAN (out of 4 streams)
* @stream: the QMAN's stream
* @qman_base: base address of QMAN registers block
* @event_mask: mask of the last events occurred
*
* This function attempt to exatract as much data as possible on QMAN error.
* On upper CP print the SW config stream data and last 8 PQEs.
* On lower CP print SW config data and last PQEs of ALL 4 upper CPs
*/
static void handle_qman_data_on_err(struct hl_device *hdev, u32 qid_base,
u32 stream, u64 qman_base, u64 event_mask)
{
u32 i;
if (stream != QMAN_STREAMS) {
gaudi_handle_last_pqes_on_err(hdev, qid_base, stream,
qman_base, event_mask, true);
return;
}
/* handle Lower-CP */
gaudi_handle_sw_config_stream_data(hdev, stream, qman_base, event_mask);
for (i = 0; i < QMAN_STREAMS; i++)
gaudi_handle_last_pqes_on_err(hdev, qid_base, i,
qman_base, event_mask, false);
}
static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
const char *qm_name,
u64 qman_base,
u32 qid_base,
u64 *event_mask)
{
u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
u64 glbl_sts_addr, arb_err_addr;
char reg_desc[32];
glbl_sts_addr = qman_base + (mmTPC0_QM_GLBL_STS1_0 - mmTPC0_QM_BASE);
arb_err_addr = qman_base + (mmTPC0_QM_ARB_ERR_CAUSE - mmTPC0_QM_BASE);
/* Iterate through all stream GLBL_STS1 registers + Lower CP */
for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
glbl_sts_clr_val = 0;
glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
if (!glbl_sts_val)
continue;
if (i == QMAN_STREAMS)
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
else
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
for (j = 0 ; j < GAUDI_NUM_OF_QM_ERR_CAUSE ; j++) {
if (glbl_sts_val & BIT(j)) {
dev_err_ratelimited(hdev->dev,
"%s %s. err cause: %s\n",
qm_name, reg_desc,
gaudi_qman_error_cause[j]);
glbl_sts_clr_val |= BIT(j);
}
}
/* check for undefined opcode */
if (glbl_sts_val & TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK &&
hdev->captured_err_info.undef_opcode.write_enable) {
memset(&hdev->captured_err_info.undef_opcode, 0,
sizeof(hdev->captured_err_info.undef_opcode));
hdev->captured_err_info.undef_opcode.write_enable = false;
*event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
}
/* Write 1 clear errors */
if (!hdev->stop_on_err)
WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
else
handle_qman_data_on_err(hdev, qid_base, i, qman_base, *event_mask);
}
arb_err_val = RREG32(arb_err_addr);
if (!arb_err_val)
return;
for (j = 0 ; j < GAUDI_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
if (arb_err_val & BIT(j)) {
dev_err_ratelimited(hdev->dev,
"%s ARB_ERR. err cause: %s\n",
qm_name,
gaudi_qman_arb_error_cause[j]);
}
}
}
static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
struct hl_eq_sm_sei_data *sei_data)
{
u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
/* Flip the bits as the enum is ordered in the opposite way */
index = (index ^ 0x3) & 0x3;
switch (sei_data->sei_cause) {
case SM_SEI_SO_OVERFLOW:
dev_err_ratelimited(hdev->dev,
"%s SEI Error: SOB Group %u overflow/underflow",
gaudi_sync_manager_names[index],
le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_LBW_4B_UNALIGNED:
dev_err_ratelimited(hdev->dev,
"%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
gaudi_sync_manager_names[index],
le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_AXI_RESPONSE_ERR:
dev_err_ratelimited(hdev->dev,
"%s SEI Error: AXI ID %u response error",
gaudi_sync_manager_names[index],
le32_to_cpu(sei_data->sei_log));
break;
default:
dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u",
le32_to_cpu(sei_data->sei_log));
break;
}
}
static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
struct hl_eq_ecc_data *ecc_data)
{
struct ecc_info_extract_params params;
u64 ecc_address = 0, ecc_syndrom = 0;
u8 index, memory_wrapper_idx = 0;
bool extract_info_from_fw;
int rc;
if (hdev->asic_prop.fw_security_enabled) {
extract_info_from_fw = true;
goto extract_ecc_info;
}
switch (event_type) {
case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
extract_info_from_fw = true;
break;
case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
index = event_type - GAUDI_EVENT_TPC0_SERR;
params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
params.num_memories = 90;
params.derr = false;
extract_info_from_fw = false;
break;
case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
index = event_type - GAUDI_EVENT_TPC0_DERR;
params.block_address =
mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
params.num_memories = 90;
params.derr = true;
extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_SERR:
case GAUDI_EVENT_MME1_ACC_SERR:
case GAUDI_EVENT_MME2_ACC_SERR:
case GAUDI_EVENT_MME3_ACC_SERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
params.num_memories = 128;
params.derr = false;
extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_DERR:
case GAUDI_EVENT_MME1_ACC_DERR:
case GAUDI_EVENT_MME2_ACC_DERR:
case GAUDI_EVENT_MME3_ACC_DERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
params.num_memories = 128;
params.derr = true;
extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_SERR:
case GAUDI_EVENT_MME1_SBAB_SERR:
case GAUDI_EVENT_MME2_SBAB_SERR:
case GAUDI_EVENT_MME3_SBAB_SERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
params.block_address =
mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
params.num_memories = 33;
params.derr = false;
extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_DERR:
case GAUDI_EVENT_MME1_SBAB_DERR:
case GAUDI_EVENT_MME2_SBAB_DERR:
case GAUDI_EVENT_MME3_SBAB_DERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
params.block_address =
mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
params.num_memories = 33;
params.derr = true;
extract_info_from_fw = false;
break;
default:
return;
}
extract_ecc_info:
if (extract_info_from_fw) {
ecc_address = le64_to_cpu(ecc_data->ecc_address);
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
memory_wrapper_idx = ecc_data->memory_wrapper_idx;
} else {
rc = gaudi_extract_ecc_info(hdev, ¶ms, &ecc_address,
&ecc_syndrom, &memory_wrapper_idx);
if (rc)
return;
}
dev_err(hdev->dev,
"ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n",
ecc_address, ecc_syndrom, memory_wrapper_idx);
}
static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
u64 qman_base;
char desc[32];
u32 qid_base;
u8 index;
switch (event_type) {
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
index = event_type - GAUDI_EVENT_TPC0_QM;
qid_base = GAUDI_QUEUE_ID_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmTPC0_QM_BASE + index * TPC_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
break;
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
if (event_type == GAUDI_EVENT_MME0_QM) {
index = 0;
qid_base = GAUDI_QUEUE_ID_MME_0_0;
} else { /* event_type == GAUDI_EVENT_MME2_QM */
index = 2;
qid_base = GAUDI_QUEUE_ID_MME_1_0;
}
qman_base = mmMME0_QM_BASE + index * MME_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
break;
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
index = event_type - GAUDI_EVENT_DMA0_QM;
qid_base = GAUDI_QUEUE_ID_DMA_0_0 + index * QMAN_STREAMS;
/* skip GAUDI_QUEUE_ID_CPU_PQ if necessary */
if (index > 1)
qid_base++;
qman_base = mmDMA0_QM_BASE + index * DMA_QMAN_OFFSET;
snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
break;
case GAUDI_EVENT_NIC0_QM0:
qid_base = GAUDI_QUEUE_ID_NIC_0_0;
qman_base = mmNIC0_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM0");
break;
case GAUDI_EVENT_NIC0_QM1:
qid_base = GAUDI_QUEUE_ID_NIC_1_0;
qman_base = mmNIC0_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM1");
break;
case GAUDI_EVENT_NIC1_QM0:
qid_base = GAUDI_QUEUE_ID_NIC_2_0;
qman_base = mmNIC1_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM0");
break;
case GAUDI_EVENT_NIC1_QM1:
qid_base = GAUDI_QUEUE_ID_NIC_3_0;
qman_base = mmNIC1_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM1");
break;
case GAUDI_EVENT_NIC2_QM0:
qid_base = GAUDI_QUEUE_ID_NIC_4_0;
qman_base = mmNIC2_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM0");
break;
case GAUDI_EVENT_NIC2_QM1:
qid_base = GAUDI_QUEUE_ID_NIC_5_0;
qman_base = mmNIC2_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM1");
break;
case GAUDI_EVENT_NIC3_QM0:
qid_base = GAUDI_QUEUE_ID_NIC_6_0;
qman_base = mmNIC3_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM0");
break;
case GAUDI_EVENT_NIC3_QM1:
qid_base = GAUDI_QUEUE_ID_NIC_7_0;
qman_base = mmNIC3_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM1");
break;
case GAUDI_EVENT_NIC4_QM0:
qid_base = GAUDI_QUEUE_ID_NIC_8_0;
qman_base = mmNIC4_QM0_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM0");
break;
case GAUDI_EVENT_NIC4_QM1:
qid_base = GAUDI_QUEUE_ID_NIC_9_0;
qman_base = mmNIC4_QM1_BASE;
snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM1");
break;
default:
return;
}
gaudi_handle_qman_err_generic(hdev, desc, qman_base, qid_base, event_mask);
}
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
bool check_razwi, u64 *event_mask)
{
bool is_read = false, is_write = false;
u16 engine_id[2], num_of_razwi_eng = 0;
char desc[64] = "";
u64 razwi_addr = 0;
u8 razwi_flags = 0;
/*
* Init engine id by default as not valid and only if razwi initiated from engine with
* engine id it will get valid value.
*/
engine_id[0] = HL_RAZWI_NA_ENG_ID;
engine_id[1] = HL_RAZWI_NA_ENG_ID;
gaudi_get_event_desc(event_type, desc, sizeof(desc));
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (check_razwi) {
gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read,
&is_write);
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask);
if (is_read)
razwi_flags |= HL_RAZWI_READ;
if (is_write)
razwi_flags |= HL_RAZWI_WRITE;
if (engine_id[0] != HL_RAZWI_NA_ENG_ID) {
if (engine_id[1] != HL_RAZWI_NA_ENG_ID)
num_of_razwi_eng = 2;
else
num_of_razwi_eng = 1;
}
if (razwi_flags)
hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng,
razwi_flags, event_mask);
}
}
static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
}
static void gaudi_print_fw_alive_info(struct hl_device *hdev,
struct hl_eq_fw_alive *fw_alive)
{
dev_err(hdev->dev,
"FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n",
(fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? "Minor" : "Critical",
le32_to_cpu(fw_alive->process_id),
le32_to_cpu(fw_alive->thread_id),
le64_to_cpu(fw_alive->uptime_seconds));
}
static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type,
void *data)
{
char desc[64] = "", *type;
struct eq_nic_sei_event *eq_nic_sei = data;
u16 nic_id = event_type - GAUDI_EVENT_NIC_SEI_0;
switch (eq_nic_sei->axi_error_cause) {
case RXB:
type = "RXB";
break;
case RXE:
type = "RXE";
break;
case TXS:
type = "TXS";
break;
case TXE:
type = "TXE";
break;
case QPC_RESP:
type = "QPC_RESP";
break;
case NON_AXI_ERR:
type = "NON_AXI_ERR";
break;
case TMR:
type = "TMR";
break;
default:
dev_err(hdev->dev, "unknown NIC AXI cause %d\n",
eq_nic_sei->axi_error_cause);
type = "N/A";
break;
}
snprintf(desc, sizeof(desc), "NIC%d_%s%d", nic_id, type,
eq_nic_sei->id);
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
}
static int gaudi_compute_reset_late_init(struct hl_device *hdev)
{
/* GAUDI doesn't support any reset except hard-reset */
return -EPERM;
}
static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
struct hl_eq_hbm_ecc_data *hbm_ecc_data)
{
u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch;
int rc = 0;
if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_HBM_ECC_EN) {
if (!hbm_ecc_data) {
dev_err(hdev->dev, "No FW ECC data");
return 0;
}
wr_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
rd_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
ca_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
derr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_DERR_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
serr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_SERR_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
type = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
ch = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK,
le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
dev_err(hdev->dev,
"HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
device, ch, wr_par, rd_par, ca_par, serr, derr);
dev_err(hdev->dev,
"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%u, SEC_CNT=%d, DEC_CNT=%d\n",
device, ch, hbm_ecc_data->first_addr, type,
hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt,
hbm_ecc_data->dec_cnt);
return 0;
}
if (hdev->asic_prop.fw_security_enabled) {
dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n");
return 0;
}
base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
val = (val & 0xFF) | ((val >> 8) & 0xFF);
if (val) {
rc = -EIO;
dev_err(hdev->dev,
"HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
device, ch * 2, val & 0x1, (val >> 1) & 0x1,
(val >> 2) & 0x1, (val >> 3) & 0x1,
(val >> 4) & 0x1);
val2 = RREG32(base + ch * 0x1000 + 0x060);
dev_err(hdev->dev,
"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n",
device, ch * 2,
RREG32(base + ch * 0x1000 + 0x064),
(val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
(val2 & 0xFF0000) >> 16,
(val2 & 0xFF000000) >> 24);
}
val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF);
val = (val & 0xFF) | ((val >> 8) & 0xFF);
if (val) {
rc = -EIO;
dev_err(hdev->dev,
"HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1,
(val >> 2) & 0x1, (val >> 3) & 0x1,
(val >> 4) & 0x1);
val2 = RREG32(base + ch * 0x1000 + 0x070);
dev_err(hdev->dev,
"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n",
device, ch * 2 + 1,
RREG32(base + ch * 0x1000 + 0x074),
(val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
(val2 & 0xFF0000) >> 16,
(val2 & 0xFF000000) >> 24);
}
/* Clear interrupts */
RMWREG32(base + (ch * 0x1000) + 0x060, 0x1C8, 0x1FF);
RMWREG32(base + (ch * 0x1000) + 0x070, 0x1C8, 0x1FF);
WREG32(base + (ch * 0x1000) + 0x06C, 0x1F1F);
WREG32(base + (ch * 0x1000) + 0x07C, 0x1F1F);
RMWREG32(base + (ch * 0x1000) + 0x060, 0x0, 0xF);
RMWREG32(base + (ch * 0x1000) + 0x070, 0x0, 0xF);
}
val = RREG32(base + 0x8F30);
val2 = RREG32(base + 0x8F34);
if (val | val2) {
rc = -EIO;
dev_err(hdev->dev,
"HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n",
device, val, val2);
}
val = RREG32(base + 0x8F40);
val2 = RREG32(base + 0x8F44);
if (val | val2) {
rc = -EIO;
dev_err(hdev->dev,
"HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n",
device, val, val2);
}
return rc;
}
static int gaudi_hbm_event_to_dev(u16 hbm_event_type)
{
switch (hbm_event_type) {
case GAUDI_EVENT_HBM0_SPI_0:
case GAUDI_EVENT_HBM0_SPI_1:
return 0;
case GAUDI_EVENT_HBM1_SPI_0:
case GAUDI_EVENT_HBM1_SPI_1:
return 1;
case GAUDI_EVENT_HBM2_SPI_0:
case GAUDI_EVENT_HBM2_SPI_1:
return 2;
case GAUDI_EVENT_HBM3_SPI_0:
case GAUDI_EVENT_HBM3_SPI_1:
return 3;
default:
break;
}
/* Should never happen */
return 0;
}
static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
char *interrupt_name)
{
u32 tpc_offset = tpc_id * TPC_CFG_OFFSET, tpc_interrupts_cause, i;
bool soft_reset_required = false;
tpc_interrupts_cause = RREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset) &
TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK;
for (i = 0 ; i < GAUDI_NUM_OF_TPC_INTR_CAUSE ; i++)
if (tpc_interrupts_cause & BIT(i)) {
dev_err_ratelimited(hdev->dev,
"TPC%d_%s interrupt cause: %s\n",
tpc_id, interrupt_name,
gaudi_tpc_interrupts_cause[i]);
/* If this is QM error, we need to soft-reset */
if (i == 15)
soft_reset_required = true;
}
/* Clear interrupts */
WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
return soft_reset_required;
}
static int tpc_dec_event_to_tpc_id(u16 tpc_dec_event_type)
{
return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1;
}
static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
{
return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
}
static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
ktime_t zero_time = ktime_set(0, 0);
mutex_lock(&hdev->clk_throttling.lock);
switch (event_type) {
case GAUDI_EVENT_FIX_POWER_ENV_S:
hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
case GAUDI_EVENT_FIX_POWER_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_S:
hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
default:
dev_err(hdev->dev, "Received invalid clock change event %d\n",
event_type);
break;
}
mutex_unlock(&hdev->clk_throttling.lock);
}
static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hl_info_fw_err_info fw_err_info;
u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u32 fw_fatal_err_flag = 0, flags = 0;
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
bool reset_required, reset_direct = false;
u8 cause;
int rc;
if (event_type >= GAUDI_EVENT_SIZE) {
dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
event_type, GAUDI_EVENT_SIZE - 1);
return;
}
gaudi->events_stat[event_type]++;
gaudi->events_stat_aggregate[event_type]++;
switch (event_type) {
case GAUDI_EVENT_PCIE_CORE_DERR:
case GAUDI_EVENT_PCIE_IF_DERR:
case GAUDI_EVENT_PCIE_PHY_DERR:
case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
case GAUDI_EVENT_MME0_ACC_DERR:
case GAUDI_EVENT_MME0_SBAB_DERR:
case GAUDI_EVENT_MME1_ACC_DERR:
case GAUDI_EVENT_MME1_SBAB_DERR:
case GAUDI_EVENT_MME2_ACC_DERR:
case GAUDI_EVENT_MME2_SBAB_DERR:
case GAUDI_EVENT_MME3_ACC_DERR:
case GAUDI_EVENT_MME3_SBAB_DERR:
case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
fallthrough;
case GAUDI_EVENT_CPU_IF_ECC_DERR:
case GAUDI_EVENT_PSOC_MEM_DERR:
case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
case GAUDI_EVENT_NIC0_DERR ... GAUDI_EVENT_NIC4_DERR:
case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
case GAUDI_EVENT_MMU_DERR:
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_AXI_ECC:
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
case GAUDI_EVENT_HBM1_SPI_0:
case GAUDI_EVENT_HBM2_SPI_0:
case GAUDI_EVENT_HBM3_SPI_0:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
case GAUDI_EVENT_HBM1_SPI_1:
case GAUDI_EVENT_HBM2_SPI_1:
case GAUDI_EVENT_HBM3_SPI_1:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_TPC0_DEC:
case GAUDI_EVENT_TPC1_DEC:
case GAUDI_EVENT_TPC2_DEC:
case GAUDI_EVENT_TPC3_DEC:
case GAUDI_EVENT_TPC4_DEC:
case GAUDI_EVENT_TPC5_DEC:
case GAUDI_EVENT_TPC6_DEC:
case GAUDI_EVENT_TPC7_DEC:
/* In TPC DEC event, notify on TPC assertion. While there isn't
* a specific event for assertion yet, the FW generates TPC DEC event.
* The SW upper layer will inspect an internal mapped area to indicate
* if the event is a TPC Assertion or a "real" TPC DEC.
*/
event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error");
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
reset_direct = true;
goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
}
break;
case GAUDI_EVENT_TPC0_KRN_ERR:
case GAUDI_EVENT_TPC1_KRN_ERR:
case GAUDI_EVENT_TPC2_KRN_ERR:
case GAUDI_EVENT_TPC3_KRN_ERR:
case GAUDI_EVENT_TPC4_KRN_ERR:
case GAUDI_EVENT_TPC5_KRN_ERR:
case GAUDI_EVENT_TPC6_KRN_ERR:
case GAUDI_EVENT_TPC7_KRN_ERR:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR");
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
reset_direct = true;
goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
}
break;
case GAUDI_EVENT_PCIE_CORE_SERR:
case GAUDI_EVENT_PCIE_IF_SERR:
case GAUDI_EVENT_PCIE_PHY_SERR:
case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
case GAUDI_EVENT_MME0_ACC_SERR:
case GAUDI_EVENT_MME0_SBAB_SERR:
case GAUDI_EVENT_MME1_ACC_SERR:
case GAUDI_EVENT_MME1_SBAB_SERR:
case GAUDI_EVENT_MME2_ACC_SERR:
case GAUDI_EVENT_MME2_SBAB_SERR:
case GAUDI_EVENT_MME3_ACC_SERR:
case GAUDI_EVENT_MME3_SBAB_SERR:
case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
case GAUDI_EVENT_CPU_IF_ECC_SERR:
case GAUDI_EVENT_PSOC_MEM_SERR:
case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
case GAUDI_EVENT_NIC0_SERR ... GAUDI_EVENT_NIC4_SERR:
case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
fallthrough;
case GAUDI_EVENT_MMU_SERR:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_PCIE_DEC:
case GAUDI_EVENT_CPU_AXI_SPLITTER:
case GAUDI_EVENT_PSOC_AXI_DEC:
case GAUDI_EVENT_PSOC_PRSTN_FALL:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_MMU_PAGE_FAULT:
case GAUDI_EVENT_MMU_WR_PERM:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_MME0_WBC_RSP:
case GAUDI_EVENT_MME0_SBAB0_RSP:
case GAUDI_EVENT_MME1_WBC_RSP:
case GAUDI_EVENT_MME1_SBAB0_RSP:
case GAUDI_EVENT_MME2_WBC_RSP:
case GAUDI_EVENT_MME2_SBAB0_RSP:
case GAUDI_EVENT_MME3_WBC_RSP:
case GAUDI_EVENT_MME3_SBAB0_RSP:
case GAUDI_EVENT_RAZWI_OR_ADC:
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
fallthrough;
case GAUDI_EVENT_NIC0_QM0:
case GAUDI_EVENT_NIC0_QM1:
case GAUDI_EVENT_NIC1_QM0:
case GAUDI_EVENT_NIC1_QM1:
case GAUDI_EVENT_NIC2_QM0:
case GAUDI_EVENT_NIC2_QM1:
case GAUDI_EVENT_NIC3_QM0:
case GAUDI_EVENT_NIC3_QM1:
case GAUDI_EVENT_NIC4_QM0:
case GAUDI_EVENT_NIC4_QM1:
case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
gaudi_handle_qman_err(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET);
break;
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
goto reset_device;
case GAUDI_EVENT_TPC0_BMON_SPMU:
case GAUDI_EVENT_TPC1_BMON_SPMU:
case GAUDI_EVENT_TPC2_BMON_SPMU:
case GAUDI_EVENT_TPC3_BMON_SPMU:
case GAUDI_EVENT_TPC4_BMON_SPMU:
case GAUDI_EVENT_TPC5_BMON_SPMU:
case GAUDI_EVENT_TPC6_BMON_SPMU:
case GAUDI_EVENT_TPC7_BMON_SPMU:
case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_NIC_SEI_0 ... GAUDI_EVENT_NIC_SEI_4:
gaudi_print_nic_axi_irq_info(hdev, event_type, &data);
hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data);
rc = hl_state_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (rc)
dev_err(hdev->dev,
"Error during system state dump %d\n", rc);
hl_fw_unmask_irq(hdev, event_type);
break;
case GAUDI_EVENT_STATUS_NIC0_ENG0 ... GAUDI_EVENT_STATUS_NIC4_ENG1:
break;
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
gaudi_print_clk_change_info(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
break;
case GAUDI_EVENT_PSOC_GPIO_U16_0:
cause = le64_to_cpu(eq_entry->data[0]) & 0xFF;
dev_err(hdev->dev,
"Received high temp H/W interrupt %d (cause %d)\n",
event_type, cause);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_DEV_RESET_REQ:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
fw_err_info.err_type = HL_INFO_FW_REPORTED_ERR;
fw_err_info.event_id = event_type;
fw_err_info.event_mask = &event_mask;
hl_handle_fw_err(hdev, &fw_err_info);
goto reset_device;
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
break;
}
if (event_mask)
hl_notifier_event_send_all(hdev, event_mask);
return;
reset_device:
reset_required = true;
if (hdev->asic_prop.fw_security_enabled && !reset_direct) {
flags = HL_DRV_RESET_HARD | HL_DRV_RESET_BYPASS_REQ_TO_FW | fw_fatal_err_flag;
/* notify on device unavailable while the reset triggered by fw */
event_mask |= (HL_NOTIFIER_EVENT_DEVICE_RESET |
HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE);
} else if (hdev->hard_reset_on_fw_events) {
flags = HL_DRV_RESET_HARD | HL_DRV_RESET_DELAY | fw_fatal_err_flag;
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
} else {
reset_required = false;
}
if (reset_required) {
/* escalate general hw errors to critical/fatal error */
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
hl_device_cond_reset(hdev, flags, event_mask);
} else {
hl_fw_unmask_irq(hdev, event_type);
/* Notification on occurred event needs to be sent although reset is not executed */
if (event_mask)
hl_notifier_event_send_all(hdev, event_mask);
}
}
static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (aggregate) {
*size = (u32) sizeof(gaudi->events_stat_aggregate);
return gaudi->events_stat_aggregate;
}
*size = (u32) sizeof(gaudi->events_stat);
return gaudi->events_stat;
}
static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 status, timeout_usec;
int rc;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
hdev->reset_info.hard_reset_pending)
return 0;
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_PS, 3);
WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
WREG32(mmSTLB_INV_PS, 2);
rc = hl_poll_timeout(
hdev,
mmSTLB_INV_PS,
status,
!status,
1000,
timeout_usec);
WREG32(mmSTLB_INV_SET, 0);
return rc;
}
static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
bool is_hard, u32 flags,
u32 asid, u64 va, u64 size)
{
/* Treat as invalidate all because there is no range invalidation
* in Gaudi
*/
return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
}
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, u64 phys_addr)
{
u32 status, timeout_usec;
int rc;
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
WREG32(MMU_ASID, asid);
WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
WREG32(MMU_BUSY, 0x80000000);
rc = hl_poll_timeout(
hdev,
MMU_BUSY,
status,
!(status & 0x80000000),
1000,
timeout_usec);
if (rc) {
dev_err(hdev->dev,
"Timeout during MMU hop0 config of asid %d\n", asid);
return rc;
}
return 0;
}
static int gaudi_send_heartbeat(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_send_heartbeat(hdev);
}
static int gaudi_cpucp_info_get(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
mmCPU_BOOT_ERR1);
if (rc)
return rc;
if (!strlen(prop->cpucp_info.card_name))
strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type);
set_default_power_values(hdev);
return 0;
}
static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
struct gaudi_device *gaudi = hdev->asic_specific;
const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
const char *nic_fmt = "%-5d%-9s%#-14x%#x\n";
unsigned long *mask = (unsigned long *)mask_arr;
u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
bool is_idle = true, is_eng_idle, is_slave;
u64 offset;
int i, dma_id, port;
if (e)
hl_engine_data_sprintf(e,
"\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
"--- ------- ------------ ---------- -------------\n");
for (i = 0 ; i < DMA_NUMBER_OF_CHNLS ; i++) {
dma_id = gaudi_dma_assignment[i];
offset = dma_id * DMA_QMAN_OFFSET;
qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + offset);
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
IS_DMA_IDLE(dma_core_sts0);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask);
if (e)
hl_engine_data_sprintf(e, fmt, dma_id,
is_eng_idle ? "Y" : "N", qm_glbl_sts0,
qm_cgm_sts, dma_core_sts0);
}
if (e)
hl_engine_data_sprintf(e,
"\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
"--- ------- ------------ ---------- ----------\n");
for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
offset = i * TPC_QMAN_OFFSET;
qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmTPC0_QM_CGM_STS + offset);
tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
IS_TPC_IDLE(tpc_cfg_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask);
if (e)
hl_engine_data_sprintf(e, fmt, i,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
}
if (e)
hl_engine_data_sprintf(e,
"\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
"--- ------- ------------ ---------- -----------\n");
for (i = 0 ; i < MME_NUMBER_OF_ENGINES ; i++) {
offset = i * MME_QMAN_OFFSET;
mme_arch_sts = RREG32(mmMME0_CTRL_ARCH_STATUS + offset);
is_eng_idle = IS_MME_IDLE(mme_arch_sts);
/* MME 1 & 3 are slaves, no need to check their QMANs */
is_slave = i % 2;
if (!is_slave) {
qm_glbl_sts0 = RREG32(mmMME0_QM_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmMME0_QM_CGM_STS + offset);
is_eng_idle &= IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
}
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask);
if (e) {
if (!is_slave)
hl_engine_data_sprintf(e, fmt, i,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
else
hl_engine_data_sprintf(e, mme_slave_fmt, i,
is_eng_idle ? "Y" : "N", "-",
"-", mme_arch_sts);
}
}
if (e)
hl_engine_data_sprintf(e,
"\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
"--- ------- ------------ ----------\n");
for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) {
offset = i * NIC_MACRO_QMAN_OFFSET;
port = 2 * i;
if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) {
qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
if (e)
hl_engine_data_sprintf(e, nic_fmt, port,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
port = 2 * i + 1;
if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) {
qm_glbl_sts0 = RREG32(mmNIC0_QM1_GLBL_STS0 + offset);
qm_cgm_sts = RREG32(mmNIC0_QM1_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
if (e)
hl_engine_data_sprintf(e, nic_fmt, port,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
}
if (e)
hl_engine_data_sprintf(e, "\n");
return is_idle;
}
static void gaudi_hw_queues_lock(struct hl_device *hdev)
__acquires(&gaudi->hw_queues_lock)
{
struct gaudi_device *gaudi = hdev->asic_specific;
spin_lock(&gaudi->hw_queues_lock);
}
static void gaudi_hw_queues_unlock(struct hl_device *hdev)
__releases(&gaudi->hw_queues_lock)
{
struct gaudi_device *gaudi = hdev->asic_specific;
spin_unlock(&gaudi->hw_queues_lock);
}
static u32 gaudi_get_pci_id(struct hl_device *hdev)
{
return hdev->pdev->device;
}
static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
size_t max_size)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_get_eeprom_data(hdev, data, max_size);
}
static int gaudi_get_monitor_dump(struct hl_device *hdev, void *data)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_get_monitor_dump(hdev, data);
}
/*
* this function should be used only during initialization and/or after reset,
* when there are no active users.
*/
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, u32 tpc_id)
{
u64 kernel_timeout;
u32 status, offset;
int rc;
offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS);
if (hdev->pldm)
kernel_timeout = GAUDI_PLDM_TPC_KERNEL_WAIT_USEC;
else
kernel_timeout = HL_DEVICE_TIMEOUT_USEC;
WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW + offset,
lower_32_bits(tpc_kernel));
WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH + offset,
upper_32_bits(tpc_kernel));
WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW + offset,
lower_32_bits(tpc_kernel));
WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH + offset,
upper_32_bits(tpc_kernel));
/* set a valid LUT pointer, content is of no significance */
WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO + offset,
lower_32_bits(tpc_kernel));
WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI + offset,
upper_32_bits(tpc_kernel));
WREG32(mmTPC0_CFG_QM_SYNC_OBJECT_ADDR + offset,
lower_32_bits(CFG_BASE +
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0));
WREG32(mmTPC0_CFG_TPC_CMD + offset,
(1 << TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT |
1 << TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT));
/* wait a bit for the engine to start executing */
usleep_range(1000, 1500);
/* wait until engine has finished executing */
rc = hl_poll_timeout(
hdev,
mmTPC0_CFG_STATUS + offset,
status,
(status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
1000,
kernel_timeout);
if (rc) {
dev_err(hdev->dev,
"Timeout while waiting for TPC%d icache prefetch\n",
tpc_id);
return -EIO;
}
WREG32(mmTPC0_CFG_TPC_EXECUTE + offset,
1 << TPC0_CFG_TPC_EXECUTE_V_SHIFT);
/* wait a bit for the engine to start executing */
usleep_range(1000, 1500);
/* wait until engine has finished executing */
rc = hl_poll_timeout(
hdev,
mmTPC0_CFG_STATUS + offset,
status,
(status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
1000,
kernel_timeout);
if (rc) {
dev_err(hdev->dev,
"Timeout while waiting for TPC%d vector pipe\n",
tpc_id);
return -EIO;
}
rc = hl_poll_timeout(
hdev,
mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset,
status,
(status == 0),
1000,
kernel_timeout);
if (rc) {
dev_err(hdev->dev,
"Timeout while waiting for TPC%d kernel to execute\n",
tpc_id);
return -EIO;
}
return 0;
}
static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
struct hl_ctx *ctx)
{
struct gaudi_device *gaudi = hdev->asic_specific;
int min_alloc_order, rc, collective_cb_size;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return 0;
hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev,
HOST_SPACE_INTERNAL_CB_SZ,
&hdev->internal_cb_pool_dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (!hdev->internal_cb_pool_virt_addr)
return -ENOMEM;
collective_cb_size = sizeof(struct packet_msg_short) * 5 +
sizeof(struct packet_fence);
min_alloc_order = ilog2(collective_cb_size);
hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1);
if (!hdev->internal_cb_pool) {
dev_err(hdev->dev,
"Failed to create internal CB pool\n");
rc = -ENOMEM;
goto free_internal_cb_pool;
}
rc = gen_pool_add(hdev->internal_cb_pool,
(uintptr_t) hdev->internal_cb_pool_virt_addr,
HOST_SPACE_INTERNAL_CB_SZ, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to internal CB pool\n");
rc = -EFAULT;
goto destroy_internal_cb_pool;
}
hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx,
HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ,
HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
if (!hdev->internal_cb_va_base) {
rc = -ENOMEM;
goto destroy_internal_cb_pool;
}
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
if (rc)
goto unreserve_internal_cb_pool;
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
if (rc)
goto unmap_internal_cb_pool;
mutex_unlock(&hdev->mmu_lock);
return 0;
unmap_internal_cb_pool:
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:
gen_pool_destroy(hdev->internal_cb_pool);
free_internal_cb_pool:
hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
hdev->internal_cb_pool_dma_addr);
return rc;
}
static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
struct hl_ctx *ctx)
{
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return;
mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&hdev->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
hdev->internal_cb_pool_dma_addr);
}
static int gaudi_ctx_init(struct hl_ctx *ctx)
{
int rc;
if (ctx->asid == HL_KERNEL_ASID_ID)
return 0;
rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx);
if (rc)
return rc;
rc = gaudi_restore_user_registers(ctx->hdev);
if (rc)
gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
return rc;
}
static void gaudi_ctx_fini(struct hl_ctx *ctx)
{
if (ctx->asid == HL_KERNEL_ASID_ID)
return;
gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
}
static int gaudi_pre_schedule_cs(struct hl_cs *cs)
{
return 0;
}
static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
return gaudi_cq_assignment[cq_idx];
}
static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
{
return sizeof(struct packet_msg_short) +
sizeof(struct packet_msg_prot) * 2;
}
static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
{
return sizeof(struct packet_msg_short) * 4 +
sizeof(struct packet_fence) +
sizeof(struct packet_msg_prot) * 2;
}
static u32 gaudi_get_sob_addr(struct hl_device *hdev, u32 sob_id)
{
return mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + (sob_id * 4);
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
u32 value, ctl, pkt_size = sizeof(*pkt);
pkt = cb->kernel_address + size;
memset(pkt, 0, pkt_size);
/* Inc by 1, Mode ADD */
value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
return size + pkt_size;
}
static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
u16 addr)
{
u32 ctl, pkt_size = sizeof(*pkt);
memset(pkt, 0, pkt_size);
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 0); /* last pkt MB */
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
return pkt_size;
}
static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev,
struct packet_msg_short *pkt, u16 sob_base, u8 sob_mask,
u16 sob_val, u16 mon_id)
{
u64 monitor_base;
u32 ctl, value, pkt_size = sizeof(*pkt);
u16 msg_addr_offset;
u8 mask;
if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) {
dev_err(hdev->dev,
"sob_base %u (mask %#x) is not valid\n",
sob_base, sob_mask);
return 0;
}
/*
* monitor_base should be the content of the base0 address registers,
* so it will be added to the msg short offsets
*/
monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
msg_addr_offset =
(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
monitor_base;
memset(pkt, 0, pkt_size);
/* Monitor config packet: bind the monitor to a sync object */
value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8);
value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
0); /* GREATER OR EQUAL*/
value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
return pkt_size;
}
static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
{
u32 ctl, cfg, pkt_size = sizeof(*pkt);
memset(pkt, 0, pkt_size);
cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_FENCE);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
return pkt_size;
}
static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr)
{
u32 offset, nic_index;
switch (queue_id) {
case GAUDI_QUEUE_ID_DMA_0_0:
offset = mmDMA0_QM_CP_FENCE2_RDATA_0;
break;
case GAUDI_QUEUE_ID_DMA_0_1:
offset = mmDMA0_QM_CP_FENCE2_RDATA_1;
break;
case GAUDI_QUEUE_ID_DMA_0_2:
offset = mmDMA0_QM_CP_FENCE2_RDATA_2;
break;
case GAUDI_QUEUE_ID_DMA_0_3:
offset = mmDMA0_QM_CP_FENCE2_RDATA_3;
break;
case GAUDI_QUEUE_ID_DMA_1_0:
offset = mmDMA1_QM_CP_FENCE2_RDATA_0;
break;
case GAUDI_QUEUE_ID_DMA_1_1:
offset = mmDMA1_QM_CP_FENCE2_RDATA_1;
break;
case GAUDI_QUEUE_ID_DMA_1_2:
offset = mmDMA1_QM_CP_FENCE2_RDATA_2;
break;
case GAUDI_QUEUE_ID_DMA_1_3:
offset = mmDMA1_QM_CP_FENCE2_RDATA_3;
break;
case GAUDI_QUEUE_ID_DMA_5_0:
offset = mmDMA5_QM_CP_FENCE2_RDATA_0;
break;
case GAUDI_QUEUE_ID_DMA_5_1:
offset = mmDMA5_QM_CP_FENCE2_RDATA_1;
break;
case GAUDI_QUEUE_ID_DMA_5_2:
offset = mmDMA5_QM_CP_FENCE2_RDATA_2;
break;
case GAUDI_QUEUE_ID_DMA_5_3:
offset = mmDMA5_QM_CP_FENCE2_RDATA_3;
break;
case GAUDI_QUEUE_ID_TPC_7_0:
offset = mmTPC7_QM_CP_FENCE2_RDATA_0;
break;
case GAUDI_QUEUE_ID_TPC_7_1:
offset = mmTPC7_QM_CP_FENCE2_RDATA_1;
break;
case GAUDI_QUEUE_ID_TPC_7_2:
offset = mmTPC7_QM_CP_FENCE2_RDATA_2;
break;
case GAUDI_QUEUE_ID_TPC_7_3:
offset = mmTPC7_QM_CP_FENCE2_RDATA_3;
break;
case GAUDI_QUEUE_ID_NIC_0_0:
case GAUDI_QUEUE_ID_NIC_1_0:
case GAUDI_QUEUE_ID_NIC_2_0:
case GAUDI_QUEUE_ID_NIC_3_0:
case GAUDI_QUEUE_ID_NIC_4_0:
case GAUDI_QUEUE_ID_NIC_5_0:
case GAUDI_QUEUE_ID_NIC_6_0:
case GAUDI_QUEUE_ID_NIC_7_0:
case GAUDI_QUEUE_ID_NIC_8_0:
case GAUDI_QUEUE_ID_NIC_9_0:
nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2;
offset = mmNIC0_QM0_CP_FENCE2_RDATA_0 +
(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
break;
case GAUDI_QUEUE_ID_NIC_0_1:
case GAUDI_QUEUE_ID_NIC_1_1:
case GAUDI_QUEUE_ID_NIC_2_1:
case GAUDI_QUEUE_ID_NIC_3_1:
case GAUDI_QUEUE_ID_NIC_4_1:
case GAUDI_QUEUE_ID_NIC_5_1:
case GAUDI_QUEUE_ID_NIC_6_1:
case GAUDI_QUEUE_ID_NIC_7_1:
case GAUDI_QUEUE_ID_NIC_8_1:
case GAUDI_QUEUE_ID_NIC_9_1:
nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2;
offset = mmNIC0_QM0_CP_FENCE2_RDATA_1 +
(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
break;
case GAUDI_QUEUE_ID_NIC_0_2:
case GAUDI_QUEUE_ID_NIC_1_2:
case GAUDI_QUEUE_ID_NIC_2_2:
case GAUDI_QUEUE_ID_NIC_3_2:
case GAUDI_QUEUE_ID_NIC_4_2:
case GAUDI_QUEUE_ID_NIC_5_2:
case GAUDI_QUEUE_ID_NIC_6_2:
case GAUDI_QUEUE_ID_NIC_7_2:
case GAUDI_QUEUE_ID_NIC_8_2:
case GAUDI_QUEUE_ID_NIC_9_2:
nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2;
offset = mmNIC0_QM0_CP_FENCE2_RDATA_2 +
(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
break;
case GAUDI_QUEUE_ID_NIC_0_3:
case GAUDI_QUEUE_ID_NIC_1_3:
case GAUDI_QUEUE_ID_NIC_2_3:
case GAUDI_QUEUE_ID_NIC_3_3:
case GAUDI_QUEUE_ID_NIC_4_3:
case GAUDI_QUEUE_ID_NIC_5_3:
case GAUDI_QUEUE_ID_NIC_6_3:
case GAUDI_QUEUE_ID_NIC_7_3:
case GAUDI_QUEUE_ID_NIC_8_3:
case GAUDI_QUEUE_ID_NIC_9_3:
nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2;
offset = mmNIC0_QM0_CP_FENCE2_RDATA_3 +
(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
break;
default:
return -EINVAL;
}
*addr = CFG_BASE + offset;
return 0;
}
static u32 gaudi_add_mon_pkts(void *buf, u16 mon_id, u64 fence_addr)
{
u64 monitor_base;
u32 size = 0;
u16 msg_addr_offset;
/*
* monitor_base should be the content of the base0 address registers,
* so it will be added to the msg short offsets
*/
monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
/* First monitor config packet: low address of the sync */
msg_addr_offset =
(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) -
monitor_base;
size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr,
msg_addr_offset);
/* Second monitor config packet: high address of the sync */
msg_addr_offset =
(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) -
monitor_base;
size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32),
msg_addr_offset);
/*
* Third monitor config packet: the payload, i.e. what to write when the
* sync triggers
*/
msg_addr_offset =
(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) -
monitor_base;
size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset);
return size;
}
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop)
{
struct hl_cb *cb = (struct hl_cb *) prop->data;
void *buf = cb->kernel_address;
u64 fence_addr = 0;
u32 size = prop->size;
if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) {
dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
prop->q_idx);
return 0;
}
size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr);
size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base,
prop->sob_mask, prop->sob_val, prop->mon_id);
size += gaudi_add_fence_pkt(buf + size);
return size;
}
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
hw_sob->sob_id * 4, 0);
kref_init(&hw_sob->kref);
}
static u64 gaudi_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
static int gaudi_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
u32 *block_size, u32 *block_id)
{
return -EPERM;
}
static int gaudi_block_mmap(struct hl_device *hdev,
struct vm_area_struct *vma,
u32 block_id, u32 block_size)
{
return -EPERM;
}
static void gaudi_enable_events_from_fw(struct hl_device *hdev)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_host_ints_irq);
WREG32(irq_handler_offset,
gaudi_irq_map_table[GAUDI_EVENT_INTS_REGISTER].cpu_id);
}
static int gaudi_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
{
return -EINVAL;
}
static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
{
switch (pll_idx) {
case HL_GAUDI_CPU_PLL: return CPU_PLL;
case HL_GAUDI_PCI_PLL: return PCI_PLL;
case HL_GAUDI_NIC_PLL: return NIC_PLL;
case HL_GAUDI_DMA_PLL: return DMA_PLL;
case HL_GAUDI_MESH_PLL: return MESH_PLL;
case HL_GAUDI_MME_PLL: return MME_PLL;
case HL_GAUDI_TPC_PLL: return TPC_PLL;
case HL_GAUDI_IF_PLL: return IF_PLL;
case HL_GAUDI_SRAM_PLL: return SRAM_PLL;
case HL_GAUDI_HBM_PLL: return HBM_PLL;
default: return -EINVAL;
}
}
static int gaudi_add_sync_to_engine_map_entry(
struct hl_sync_to_engine_map *map, u32 reg_value,
enum hl_sync_engine_type engine_type, u32 engine_id)
{
struct hl_sync_to_engine_map_entry *entry;
/* Reg value represents a partial address of sync object,
* it is used as unique identifier. For this we need to
* clear the cutoff cfg base bits from the value.
*/
if (reg_value == 0 || reg_value == 0xffffffff)
return 0;
reg_value -= lower_32_bits(CFG_BASE);
/* create a new hash entry */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->engine_type = engine_type;
entry->engine_id = engine_id;
entry->sync_id = reg_value;
hash_add(map->tb, &entry->node, reg_value);
return 0;
}
static int gaudi_gen_sync_to_engine_map(struct hl_device *hdev,
struct hl_sync_to_engine_map *map)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
int i, j, rc;
u32 reg_value;
/* Iterate over TPC engines */
for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) {
reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] +
sds->props[SP_NEXT_TPC] * i);
rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
ENGINE_TPC, i);
if (rc)
goto free_sync_to_engine_map;
}
/* Iterate over MME engines */
for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) {
for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) {
reg_value = RREG32(sds->props[SP_MME_CFG_SO] +
sds->props[SP_NEXT_MME] * i +
j * sizeof(u32));
rc = gaudi_add_sync_to_engine_map_entry(
map, reg_value, ENGINE_MME,
i * sds->props[SP_SUB_MME_ENG_NUM] + j);
if (rc)
goto free_sync_to_engine_map;
}
}
/* Iterate over DMA engines */
for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) {
reg_value = RREG32(sds->props[SP_DMA_CFG_SO] +
sds->props[SP_DMA_QUEUES_OFFSET] * i);
rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
ENGINE_DMA, i);
if (rc)
goto free_sync_to_engine_map;
}
return 0;
free_sync_to_engine_map:
hl_state_dump_free_sync_to_engine_map(map);
return rc;
}
static int gaudi_monitor_valid(struct hl_mon_state_dump *mon)
{
return FIELD_GET(
SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK,
mon->status);
}
static void gaudi_fill_sobs_from_mon(char *sobs, struct hl_mon_state_dump *mon)
{
const size_t max_write = 10;
u32 gid, mask, sob;
int i, offset;
/* Sync object ID is calculated as follows:
* (8 * group_id + cleared bits in mask)
*/
gid = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
mon->arm_data);
mask = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
mon->arm_data);
for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE -
max_write; mask >>= 1, i++) {
if (!(mask & 1)) {
sob = gid * MONITOR_MAX_SOBS + i;
if (offset > 0)
offset += snprintf(sobs + offset, max_write,
", ");
offset += snprintf(sobs + offset, max_write, "%u", sob);
}
}
}
static int gaudi_print_single_monitor(char **buf, size_t *size, size_t *offset,
struct hl_device *hdev,
struct hl_mon_state_dump *mon)
{
const char *name;
char scratch_buf1[BIN_REG_STRING_SIZE],
scratch_buf2[BIN_REG_STRING_SIZE];
char monitored_sobs[MONITOR_SOB_STRING_SIZE] = {0};
name = hl_state_dump_get_monitor_name(hdev, mon);
if (!name)
name = "";
gaudi_fill_sobs_from_mon(monitored_sobs, mon);
return hl_snprintf_resize(
buf, size, offset,
"Mon id: %u%s, wait for group id: %u mask %s to reach val: %u and write %u to address 0x%llx. Pending: %s. Means sync objects [%s] are being monitored.",
mon->id, name,
FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
mon->arm_data),
hl_format_as_binary(
scratch_buf1, sizeof(scratch_buf1),
FIELD_GET(
SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
mon->arm_data)),
FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK,
mon->arm_data),
mon->wr_data,
(((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low,
hl_format_as_binary(
scratch_buf2, sizeof(scratch_buf2),
FIELD_GET(
SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK,
mon->status)),
monitored_sobs);
}
static int gaudi_print_fences_single_engine(
struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
size_t *size, size_t *offset)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
int rc = -ENOMEM, i;
u32 *statuses, *fences;
statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES],
sizeof(*statuses), GFP_KERNEL);
if (!statuses)
goto out;
fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] *
sds->props[SP_ENGINE_NUM_OF_QUEUES],
sizeof(*fences), GFP_KERNEL);
if (!fences)
goto free_status;
for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i)
statuses[i] = RREG32(status_base_offset + i * sizeof(u32));
for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] *
sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i)
fences[i] = RREG32(base_offset + i * sizeof(u32));
/* The actual print */
for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) {
u32 fence_id;
u64 fence_cnt, fence_rdata;
const char *engine_name;
if (!FIELD_GET(TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK,
statuses[i]))
continue;
fence_id =
FIELD_GET(TPC0_QM_CP_STS_0_FENCE_ID_MASK, statuses[i]);
fence_cnt = base_offset + CFG_BASE +
sizeof(u32) *
(i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]);
fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] +
sds->props[SP_FENCE0_RDATA_OFFSET];
engine_name = hl_sync_engine_to_string(engine_type);
rc = hl_snprintf_resize(
buf, size, offset,
"%s%u, stream %u: fence id %u cnt = 0x%llx (%s%u_QM.CP_FENCE%u_CNT_%u) rdata = 0x%llx (%s%u_QM.CP_FENCE%u_RDATA_%u) value = %u, cp_status = %u\n",
engine_name, engine_id,
i, fence_id,
fence_cnt, engine_name, engine_id, fence_id, i,
fence_rdata, engine_name, engine_id, fence_id, i,
fences[fence_id],
statuses[i]);
if (rc)
goto free_fences;
}
rc = 0;
free_fences:
kfree(fences);
free_status:
kfree(statuses);
out:
return rc;
}
static struct hl_state_dump_specs_funcs gaudi_state_dump_funcs = {
.monitor_valid = gaudi_monitor_valid,
.print_single_monitor = gaudi_print_single_monitor,
.gen_sync_to_engine_map = gaudi_gen_sync_to_engine_map,
.print_fences_single_engine = gaudi_print_fences_single_engine,
};
static void gaudi_state_dump_init(struct hl_device *hdev)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
int i;
for (i = 0; i < ARRAY_SIZE(gaudi_so_id_to_str); ++i)
hash_add(sds->so_id_to_str_tb,
&gaudi_so_id_to_str[i].node,
gaudi_so_id_to_str[i].id);
for (i = 0; i < ARRAY_SIZE(gaudi_monitor_id_to_str); ++i)
hash_add(sds->monitor_id_to_str_tb,
&gaudi_monitor_id_to_str[i].node,
gaudi_monitor_id_to_str[i].id);
sds->props = gaudi_state_dump_specs_props;
sds->sync_namager_names = gaudi_sync_manager_names;
sds->funcs = gaudi_state_dump_funcs;
}
static u32 *gaudi_get_stream_master_qid_arr(void)
{
return gaudi_stream_master;
}
static int gaudi_set_dram_properties(struct hl_device *hdev)
{
return 0;
}
static int gaudi_set_binning_masks(struct hl_device *hdev)
{
return 0;
}
static void gaudi_check_if_razwi_happened(struct hl_device *hdev)
{
}
static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct cpucp_info *cpucp_info;
cpucp_info = &hdev->asic_prop.cpucp_info;
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
}
static DEVICE_ATTR_RO(infineon_ver);
static struct attribute *gaudi_vrm_dev_attrs[] = {
&dev_attr_infineon_ver.attr,
NULL,
};
static void gaudi_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
struct attribute_group *dev_vrm_attr_grp)
{
hl_sysfs_add_dev_clk_attr(hdev, dev_clk_attr_grp);
dev_vrm_attr_grp->attrs = gaudi_vrm_dev_attrs;
}
static int gaudi_send_device_activity(struct hl_device *hdev, bool open)
{
return 0;
}
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
.late_init = gaudi_late_init,
.late_fini = gaudi_late_fini,
.sw_init = gaudi_sw_init,
.sw_fini = gaudi_sw_fini,
.hw_init = gaudi_hw_init,
.hw_fini = gaudi_hw_fini,
.halt_engines = gaudi_halt_engines,
.suspend = gaudi_suspend,
.resume = gaudi_resume,
.mmap = gaudi_mmap,
.ring_doorbell = gaudi_ring_doorbell,
.pqe_write = gaudi_pqe_write,
.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
.asic_dma_free_coherent = gaudi_dma_free_coherent,
.scrub_device_mem = gaudi_scrub_device_mem,
.scrub_device_dram = gaudi_scrub_device_dram,
.get_int_queue_base = gaudi_get_int_queue_base,
.test_queues = gaudi_test_queues,
.asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
.asic_dma_pool_free = gaudi_dma_pool_free,
.cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = gaudi_cs_parser,
.asic_dma_map_sgtable = hl_dma_map_sgtable,
.add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
.update_eq_ci = gaudi_update_eq_ci,
.context_switch = gaudi_context_switch,
.restore_phase_topology = gaudi_restore_phase_topology,
.debugfs_read_dma = gaudi_debugfs_read_dma,
.add_device_attr = gaudi_add_device_attr,
.handle_eqe = gaudi_handle_eqe,
.get_events_stat = gaudi_get_events_stat,
.read_pte = gaudi_read_pte,
.write_pte = gaudi_write_pte,
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
.mmu_prefetch_cache_range = NULL,
.send_heartbeat = gaudi_send_heartbeat,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
.compute_reset_late_init = gaudi_compute_reset_late_init,
.hw_queues_lock = gaudi_hw_queues_lock,
.hw_queues_unlock = gaudi_hw_queues_unlock,
.get_pci_id = gaudi_get_pci_id,
.get_eeprom_data = gaudi_get_eeprom_data,
.get_monitor_dump = gaudi_get_monitor_dump,
.send_cpu_message = gaudi_send_cpu_message,
.pci_bars_map = gaudi_pci_bars_map,
.init_iatu = gaudi_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = gaudi_halt_coresight,
.ctx_init = gaudi_ctx_init,
.ctx_fini = gaudi_ctx_fini,
.pre_schedule_cs = gaudi_pre_schedule_cs,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
.get_signal_cb_size = gaudi_get_signal_cb_size,
.get_wait_cb_size = gaudi_get_wait_cb_size,
.gen_signal_cb = gaudi_gen_signal_cb,
.gen_wait_cb = gaudi_gen_wait_cb,
.reset_sob = gaudi_reset_sob,
.reset_sob_group = gaudi_reset_sob_group,
.get_device_time = gaudi_get_device_time,
.pb_print_security_errors = NULL,
.collective_wait_init_cs = gaudi_collective_wait_init_cs,
.collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
.get_dec_base_addr = NULL,
.scramble_addr = hl_mmu_scramble_addr,
.descramble_addr = hl_mmu_descramble_addr,
.ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
.get_hw_block_id = gaudi_get_hw_block_id,
.hw_block_mmap = gaudi_block_mmap,
.enable_events_from_fw = gaudi_enable_events_from_fw,
.ack_mmu_errors = gaudi_ack_mmu_page_fault_or_access_error,
.map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
.init_firmware_preload_params = gaudi_init_firmware_preload_params,
.init_firmware_loader = gaudi_init_firmware_loader,
.init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
.state_dump_init = gaudi_state_dump_init,
.get_sob_addr = gaudi_get_sob_addr,
.set_pci_memory_regions = gaudi_set_pci_memory_regions,
.get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr,
.check_if_razwi_happened = gaudi_check_if_razwi_happened,
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi_set_hbm_bar_base,
.send_device_activity = gaudi_send_device_activity,
.set_dram_properties = gaudi_set_dram_properties,
.set_binning_masks = gaudi_set_binning_masks,
};
/**
* gaudi_set_asic_funcs - set GAUDI function pointers
*
* @hdev: pointer to hl_device structure
*
*/
void gaudi_set_asic_funcs(struct hl_device *hdev)
{
hdev->asic_funcs = &gaudi_funcs;
}
| linux-master | drivers/accel/habanalabs/gaudi/gaudi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "goyaP.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include "../include/hw_ip/mmu/mmu_v1_0.h"
#include "../include/goya/asic_reg/goya_masks.h"
#include "../include/goya/goya_reg_map.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
/*
* GOYA security scheme:
*
* 1. Host is protected by:
* - Range registers (When MMU is enabled, DMA RR does NOT protect host)
* - MMU
*
* 2. DRAM is protected by:
* - Range registers (protect the first 512MB)
* - MMU (isolation between users)
*
* 3. Configuration is protected by:
* - Range registers
* - Protection bits
*
* When MMU is disabled:
*
* QMAN DMA: PQ, CQ, CP, DMA are secured.
* PQ, CB and the data are on the host.
*
* QMAN TPC/MME:
* PQ, CQ and CP are not secured.
* PQ, CB and the data are on the SRAM/DRAM.
*
* Since QMAN DMA is secured, the driver is parsing the DMA CB:
* - checks DMA pointer
* - WREG, MSG_PROT are not allowed.
* - MSG_LONG/SHORT are allowed.
*
* A read/write transaction by the QMAN to a protected area will succeed if
* and only if the QMAN's CP is secured and MSG_PROT is used
*
*
* When MMU is enabled:
*
* QMAN DMA: PQ, CQ and CP are secured.
* MMU is set to bypass on the Secure props register of the QMAN.
* The reasons we don't enable MMU for PQ, CQ and CP are:
* - PQ entry is in kernel address space and the driver doesn't map it.
* - CP writes to MSIX register and to kernel address space (completion
* queue).
*
* DMA is not secured but because CP is secured, the driver still needs to parse
* the CB, but doesn't need to check the DMA addresses.
*
* For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
* the driver doesn't map memory in MMU.
*
* QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
*
* DMA RR does NOT protect host because DMA is not secured
*
*/
#define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
#define GOYA_MMU_REGS_NUM 63
#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GOYA_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
#define GOYA_MAX_STRING_LEN 20
#define GOYA_CB_POOL_CB_CNT 512
#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
#define IS_QM_IDLE(engine, qm_glbl_sts0) \
(((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
#define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0)
#define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0)
#define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0)
#define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
(((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
engine##_CMDQ_IDLE_MASK)
#define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
#define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
#define IS_DMA_IDLE(dma_core_sts0) \
!((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
#define IS_TPC_IDLE(tpc_cfg_sts) \
(((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
#define IS_MME_IDLE(mme_arch_sts) \
(((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
"goya cq 4", "goya cpu eq"
};
static u16 goya_packet_sizes[MAX_PACKET_ID] = {
[PACKET_WREG_32] = sizeof(struct packet_wreg32),
[PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
[PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
[PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
[PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
[PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
[PACKET_FENCE] = sizeof(struct packet_fence),
[PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
[PACKET_NOP] = sizeof(struct packet_nop),
[PACKET_STOP] = sizeof(struct packet_stop)
};
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
case PACKET_WREG_32:
case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_CP_DMA:
case PACKET_MSG_PROT:
case PACKET_FENCE:
case PACKET_LIN_DMA:
case PACKET_NOP:
case PACKET_STOP:
return true;
default:
return false;
}
}
static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
mmTPC0_QM_GLBL_SECURE_PROPS,
mmTPC0_QM_GLBL_NON_SECURE_PROPS,
mmTPC0_CMDQ_GLBL_SECURE_PROPS,
mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC0_CFG_ARUSER,
mmTPC0_CFG_AWUSER,
mmTPC1_QM_GLBL_SECURE_PROPS,
mmTPC1_QM_GLBL_NON_SECURE_PROPS,
mmTPC1_CMDQ_GLBL_SECURE_PROPS,
mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC1_CFG_ARUSER,
mmTPC1_CFG_AWUSER,
mmTPC2_QM_GLBL_SECURE_PROPS,
mmTPC2_QM_GLBL_NON_SECURE_PROPS,
mmTPC2_CMDQ_GLBL_SECURE_PROPS,
mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC2_CFG_ARUSER,
mmTPC2_CFG_AWUSER,
mmTPC3_QM_GLBL_SECURE_PROPS,
mmTPC3_QM_GLBL_NON_SECURE_PROPS,
mmTPC3_CMDQ_GLBL_SECURE_PROPS,
mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC3_CFG_ARUSER,
mmTPC3_CFG_AWUSER,
mmTPC4_QM_GLBL_SECURE_PROPS,
mmTPC4_QM_GLBL_NON_SECURE_PROPS,
mmTPC4_CMDQ_GLBL_SECURE_PROPS,
mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC4_CFG_ARUSER,
mmTPC4_CFG_AWUSER,
mmTPC5_QM_GLBL_SECURE_PROPS,
mmTPC5_QM_GLBL_NON_SECURE_PROPS,
mmTPC5_CMDQ_GLBL_SECURE_PROPS,
mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC5_CFG_ARUSER,
mmTPC5_CFG_AWUSER,
mmTPC6_QM_GLBL_SECURE_PROPS,
mmTPC6_QM_GLBL_NON_SECURE_PROPS,
mmTPC6_CMDQ_GLBL_SECURE_PROPS,
mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC6_CFG_ARUSER,
mmTPC6_CFG_AWUSER,
mmTPC7_QM_GLBL_SECURE_PROPS,
mmTPC7_QM_GLBL_NON_SECURE_PROPS,
mmTPC7_CMDQ_GLBL_SECURE_PROPS,
mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
mmTPC7_CFG_ARUSER,
mmTPC7_CFG_AWUSER,
mmMME_QM_GLBL_SECURE_PROPS,
mmMME_QM_GLBL_NON_SECURE_PROPS,
mmMME_CMDQ_GLBL_SECURE_PROPS,
mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
mmMME_SBA_CONTROL_DATA,
mmMME_SBB_CONTROL_DATA,
mmMME_SBC_CONTROL_DATA,
mmMME_WBC_CONTROL_DATA,
mmPCIE_WRAP_PSOC_ARUSER,
mmPCIE_WRAP_PSOC_AWUSER
};
static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_PCIE_IF,
GOYA_ASYNC_EVENT_ID_TPC0_ECC,
GOYA_ASYNC_EVENT_ID_TPC1_ECC,
GOYA_ASYNC_EVENT_ID_TPC2_ECC,
GOYA_ASYNC_EVENT_ID_TPC3_ECC,
GOYA_ASYNC_EVENT_ID_TPC4_ECC,
GOYA_ASYNC_EVENT_ID_TPC5_ECC,
GOYA_ASYNC_EVENT_ID_TPC6_ECC,
GOYA_ASYNC_EVENT_ID_TPC7_ECC,
GOYA_ASYNC_EVENT_ID_MME_ECC,
GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
GOYA_ASYNC_EVENT_ID_MMU_ECC,
GOYA_ASYNC_EVENT_ID_DMA_MACRO,
GOYA_ASYNC_EVENT_ID_DMA_ECC,
GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
GOYA_ASYNC_EVENT_ID_PSOC_MEM,
GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
GOYA_ASYNC_EVENT_ID_SRAM0,
GOYA_ASYNC_EVENT_ID_SRAM1,
GOYA_ASYNC_EVENT_ID_SRAM2,
GOYA_ASYNC_EVENT_ID_SRAM3,
GOYA_ASYNC_EVENT_ID_SRAM4,
GOYA_ASYNC_EVENT_ID_SRAM5,
GOYA_ASYNC_EVENT_ID_SRAM6,
GOYA_ASYNC_EVENT_ID_SRAM7,
GOYA_ASYNC_EVENT_ID_SRAM8,
GOYA_ASYNC_EVENT_ID_SRAM9,
GOYA_ASYNC_EVENT_ID_SRAM10,
GOYA_ASYNC_EVENT_ID_SRAM11,
GOYA_ASYNC_EVENT_ID_SRAM12,
GOYA_ASYNC_EVENT_ID_SRAM13,
GOYA_ASYNC_EVENT_ID_SRAM14,
GOYA_ASYNC_EVENT_ID_SRAM15,
GOYA_ASYNC_EVENT_ID_SRAM16,
GOYA_ASYNC_EVENT_ID_SRAM17,
GOYA_ASYNC_EVENT_ID_SRAM18,
GOYA_ASYNC_EVENT_ID_SRAM19,
GOYA_ASYNC_EVENT_ID_SRAM20,
GOYA_ASYNC_EVENT_ID_SRAM21,
GOYA_ASYNC_EVENT_ID_SRAM22,
GOYA_ASYNC_EVENT_ID_SRAM23,
GOYA_ASYNC_EVENT_ID_SRAM24,
GOYA_ASYNC_EVENT_ID_SRAM25,
GOYA_ASYNC_EVENT_ID_SRAM26,
GOYA_ASYNC_EVENT_ID_SRAM27,
GOYA_ASYNC_EVENT_ID_SRAM28,
GOYA_ASYNC_EVENT_ID_SRAM29,
GOYA_ASYNC_EVENT_ID_GIC500,
GOYA_ASYNC_EVENT_ID_PLL0,
GOYA_ASYNC_EVENT_ID_PLL1,
GOYA_ASYNC_EVENT_ID_PLL3,
GOYA_ASYNC_EVENT_ID_PLL4,
GOYA_ASYNC_EVENT_ID_PLL5,
GOYA_ASYNC_EVENT_ID_PLL6,
GOYA_ASYNC_EVENT_ID_AXI_ECC,
GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
GOYA_ASYNC_EVENT_ID_PCIE_DEC,
GOYA_ASYNC_EVENT_ID_TPC0_DEC,
GOYA_ASYNC_EVENT_ID_TPC1_DEC,
GOYA_ASYNC_EVENT_ID_TPC2_DEC,
GOYA_ASYNC_EVENT_ID_TPC3_DEC,
GOYA_ASYNC_EVENT_ID_TPC4_DEC,
GOYA_ASYNC_EVENT_ID_TPC5_DEC,
GOYA_ASYNC_EVENT_ID_TPC6_DEC,
GOYA_ASYNC_EVENT_ID_TPC7_DEC,
GOYA_ASYNC_EVENT_ID_MME_WACS,
GOYA_ASYNC_EVENT_ID_MME_WACSD,
GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
GOYA_ASYNC_EVENT_ID_PSOC,
GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
GOYA_ASYNC_EVENT_ID_TPC0_QM,
GOYA_ASYNC_EVENT_ID_TPC1_QM,
GOYA_ASYNC_EVENT_ID_TPC2_QM,
GOYA_ASYNC_EVENT_ID_TPC3_QM,
GOYA_ASYNC_EVENT_ID_TPC4_QM,
GOYA_ASYNC_EVENT_ID_TPC5_QM,
GOYA_ASYNC_EVENT_ID_TPC6_QM,
GOYA_ASYNC_EVENT_ID_TPC7_QM,
GOYA_ASYNC_EVENT_ID_MME_QM,
GOYA_ASYNC_EVENT_ID_MME_CMDQ,
GOYA_ASYNC_EVENT_ID_DMA0_QM,
GOYA_ASYNC_EVENT_ID_DMA1_QM,
GOYA_ASYNC_EVENT_ID_DMA2_QM,
GOYA_ASYNC_EVENT_ID_DMA3_QM,
GOYA_ASYNC_EVENT_ID_DMA4_QM,
GOYA_ASYNC_EVENT_ID_DMA0_CH,
GOYA_ASYNC_EVENT_ID_DMA1_CH,
GOYA_ASYNC_EVENT_ID_DMA2_CH,
GOYA_ASYNC_EVENT_ID_DMA3_CH,
GOYA_ASYNC_EVENT_ID_DMA4_CH,
GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
};
static s64 goya_state_dump_specs_props[SP_MAX] = {0};
static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
int goya_set_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
prop->max_queues = GOYA_QUEUE_ID_SIZE;
prop->hw_queues_props = kcalloc(prop->max_queues,
sizeof(struct hw_queue_properties),
GFP_KERNEL);
if (!prop->hw_queues_props)
return -ENOMEM;
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
}
prop->cfg_base_address = CFG_BASE;
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->host_base_address = HOST_PHYS_BASE;
prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->completion_mode = HL_COMPLETION_MODE_JOB;
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
prop->dram_end_address = prop->dram_base_address + prop->dram_size;
prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
prop->sram_base_address = SRAM_BASE_ADDR;
prop->sram_size = SRAM_SIZE;
prop->sram_end_address = prop->sram_base_address + prop->sram_size;
prop->sram_user_base_address = prop->sram_base_address +
SRAM_USER_BASE_OFFSET;
prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
if (hdev->pldm)
prop->mmu_pgt_size = 0x800000; /* 8MB */
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_supports_virtual_memory = true;
prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT;
prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK;
prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK;
prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK;
prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK;
prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK;
prop->dmmu.start_addr = VA_DDR_SPACE_START;
prop->dmmu.end_addr = VA_DDR_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
prop->dmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr = VA_HOST_SPACE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
prop->pmmu.last_mask = LAST_MASK;
/* TODO: will be duplicated until implementing per-MMU props */
prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
prop->high_pll = PLL_HIGH_DEFAULT;
prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
prop->max_power_default = MAX_POWER_DEFAULT;
prop->dc_power_default = DC_POWER_DEFAULT;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
prop->first_available_user_interrupt = USHRT_MAX;
prop->tpc_interrupt_id = USHRT_MAX;
prop->eq_interrupt_id = GOYA_EVENT_QUEUE_MSIX_IDX;
for (i = 0 ; i < HL_MAX_DCORES ; i++)
prop->first_available_cq[i] = USHRT_MAX;
prop->fw_cpu_boot_dev_sts0_valid = false;
prop->fw_cpu_boot_dev_sts1_valid = false;
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
prop->clk_pll_index = HL_GOYA_MME_PLL;
prop->use_get_power_for_reset_history = true;
prop->configurable_stop_on_err = true;
prop->set_max_power_on_device_init = true;
prop->dma_mask = 48;
return 0;
}
/*
* goya_pci_bars_map - Map PCI BARS of Goya device
*
* @hdev: pointer to hl_device structure
*
* Request PCI regions and map them to kernel virtual addresses.
* Returns 0 on success
*
*/
static int goya_pci_bars_map(struct hl_device *hdev)
{
static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
bool is_wc[3] = {false, false, true};
int rc;
rc = hl_pci_bars_map(hdev, name, is_wc);
if (rc)
return rc;
hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(CFG_BASE - SRAM_BASE_ADDR);
return 0;
}
static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
if ((goya) && (goya->ddr_bar_cur_addr == addr))
return old_addr;
/* Inbound Region 1 - Bar 4 - Point to DDR */
pci_region.mode = PCI_BAR_MATCH_MODE;
pci_region.bar = DDR_BAR_ID;
pci_region.addr = addr;
rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
if (rc)
return U64_MAX;
if (goya) {
old_addr = goya->ddr_bar_cur_addr;
goya->ddr_bar_cur_addr = addr;
}
return old_addr;
}
/*
* goya_init_iatu - Initialize the iATU unit inside the PCI controller
*
* @hdev: pointer to hl_device structure
*
* This is needed in case the firmware doesn't initialize the iATU
*
*/
static int goya_init_iatu(struct hl_device *hdev)
{
struct hl_inbound_pci_region inbound_region;
struct hl_outbound_pci_region outbound_region;
int rc;
if (hdev->asic_prop.iatu_done_by_fw)
return 0;
/* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = SRAM_CFG_BAR_ID;
inbound_region.addr = SRAM_BASE_ADDR;
rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
if (rc)
goto done;
/* Inbound Region 1 - Bar 4 - Point to DDR */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = DDR_BAR_ID;
inbound_region.addr = DRAM_PHYS_BASE;
rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
if (rc)
goto done;
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE;
outbound_region.size = HOST_PHYS_SIZE;
rc = hl_pci_set_outbound_region(hdev, &outbound_region);
done:
return rc;
}
static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
{
return RREG32(mmHW_STATE);
}
/*
* goya_early_init - GOYA early initialization code
*
* @hdev: pointer to hl_device structure
*
* Verify PCI bars
* Set DMA masks
* PCI controller initialization
* Map PCI bars
*
*/
static int goya_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
resource_size_t pci_bar_size;
u32 fw_boot_status, val;
int rc;
rc = goya_set_fixed_properties(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to get fixed properties\n");
return rc;
}
/* Check BAR sizes */
pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID);
if (pci_bar_size != CFG_BAR_SIZE) {
dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID);
if (pci_bar_size != MSIX_BAR_SIZE) {
dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, DDR_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) {
hdev->asic_prop.iatu_done_by_fw = true;
goto pci_init;
}
rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
&fw_boot_status);
if (rc)
goto free_queue_props;
/* Check whether FW is configuring iATU */
if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
(fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
hdev->asic_prop.iatu_done_by_fw = true;
pci_init:
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
/* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
rc = hdev->asic_funcs->hw_fini(hdev, true, false);
if (rc) {
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
}
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
dev_warn(hdev->dev,
"PCI strap is not configured correctly, PCI bus errors may occur\n");
}
return 0;
pci_fini:
hl_pci_fini(hdev);
free_queue_props:
kfree(hdev->asic_prop.hw_queues_props);
return rc;
}
/*
* goya_early_fini - GOYA early finalization code
*
* @hdev: pointer to hl_device structure
*
* Unmap PCI bars
*
*/
static int goya_early_fini(struct hl_device *hdev)
{
kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
}
static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
{
/* mask to zero the MMBP and ASID bits */
WREG32_AND(reg, ~0x7FF);
WREG32_OR(reg, asid);
}
static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return;
if (secure)
WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
else
WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
RREG32(mmDMA_QM_0_GLBL_PROT);
}
/*
* goya_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
*
*/
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
int rc;
if (hdev->asic_prop.fw_security_enabled) {
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return;
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
pll_freq_arr);
if (rc)
return;
freq = pll_freq_arr[1];
} else {
div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
nr = RREG32(mmPSOC_PCI_PLL_NR);
nf = RREG32(mmPSOC_PCI_PLL_NF);
od = RREG32(mmPSOC_PCI_PLL_OD);
if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
if (div_sel == DIV_SEL_REF_CLK)
freq = PLL_REF_CLK;
else
freq = PLL_REF_CLK / (div_fctr + 1);
} else if (div_sel == DIV_SEL_PLL_CLK ||
div_sel == DIV_SEL_DIVIDED_PLL) {
pll_clk = PLL_REF_CLK * (nf + 1) /
((nr + 1) * (od + 1));
if (div_sel == DIV_SEL_PLL_CLK)
freq = pll_clk;
else
freq = pll_clk / (div_fctr + 1);
} else {
dev_warn(hdev->dev,
"Received invalid div select value: %d",
div_sel);
freq = 0;
}
}
prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
prop->psoc_pci_pll_div_factor = div_fctr;
}
/*
* goya_set_frequency - set the frequency of the device
*
* @hdev: pointer to habanalabs device structure
* @freq: the new frequency value
*
* Change the frequency if needed. This function has no protection against
* concurrency, therefore it is assumed that the calling function has protected
* itself against the case of calling this function from multiple threads with
* different values
*
* Returns 0 if no change was done, otherwise returns 1
*/
int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
{
struct goya_device *goya = hdev->asic_specific;
if ((goya->pm_mng_profile == PM_MANUAL) ||
(goya->curr_pll_profile == freq))
return 0;
dev_dbg(hdev->dev, "Changing device frequency to %s\n",
freq == PLL_HIGH ? "high" : "low");
goya_set_pll_profile(hdev, freq);
goya->curr_pll_profile = freq;
return 1;
}
static void goya_set_freq_to_low_job(struct work_struct *work)
{
struct goya_work_freq *goya_work = container_of(work,
struct goya_work_freq,
work_freq.work);
struct hl_device *hdev = goya_work->hdev;
mutex_lock(&hdev->fpriv_list_lock);
if (!hdev->is_compute_ctx_active)
goya_set_frequency(hdev, PLL_LOW);
mutex_unlock(&hdev->fpriv_list_lock);
schedule_delayed_work(&goya_work->work_freq,
usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
}
int goya_late_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
int rc;
goya_fetch_psoc_frequency(hdev);
rc = goya_mmu_clear_pgt_range(hdev);
if (rc) {
dev_err(hdev->dev,
"Failed to clear MMU page tables range %d\n", rc);
return rc;
}
rc = goya_mmu_set_dram_default_page(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
return rc;
}
rc = goya_mmu_add_mappings_for_device_cpu(hdev);
if (rc)
return rc;
rc = goya_init_cpu_queues(hdev);
if (rc)
return rc;
rc = goya_test_cpu_queue(hdev);
if (rc)
return rc;
rc = goya_cpucp_info_get(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc);
return rc;
}
/* Now that we have the DRAM size in ASIC prop, we need to check
* its size and configure the DMA_IF DDR wrap protection (which is in
* the MMU block) accordingly. The value is the log2 of the DRAM size
*/
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI access from CPU %d\n", rc);
return rc;
}
/* force setting to low frequency */
goya->curr_pll_profile = PLL_LOW;
goya->pm_mng_profile = PM_AUTO;
goya_set_pll_profile(hdev, PLL_LOW);
schedule_delayed_work(&goya->goya_work->work_freq,
usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
return 0;
}
/*
* goya_late_fini - GOYA late tear-down code
*
* @hdev: pointer to hl_device structure
*
* Free sensors allocated structures
*/
void goya_late_fini(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
cancel_delayed_work_sync(&goya->goya_work->work_freq);
hl_hwmon_release_resources(hdev);
}
static void goya_set_pci_memory_regions(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_mem_region *region;
/* CFG */
region = &hdev->pci_mem_region[PCI_REGION_CFG];
region->region_base = CFG_BASE;
region->region_size = CFG_SIZE;
region->offset_in_bar = CFG_BASE - SRAM_BASE_ADDR;
region->bar_size = CFG_BAR_SIZE;
region->bar_id = SRAM_CFG_BAR_ID;
region->used = 1;
/* SRAM */
region = &hdev->pci_mem_region[PCI_REGION_SRAM];
region->region_base = SRAM_BASE_ADDR;
region->region_size = SRAM_SIZE;
region->offset_in_bar = 0;
region->bar_size = CFG_BAR_SIZE;
region->bar_id = SRAM_CFG_BAR_ID;
region->used = 1;
/* DRAM */
region = &hdev->pci_mem_region[PCI_REGION_DRAM];
region->region_base = DRAM_PHYS_BASE;
region->region_size = hdev->asic_prop.dram_size;
region->offset_in_bar = 0;
region->bar_size = prop->dram_pci_bar_size;
region->bar_id = DDR_BAR_ID;
region->used = 1;
}
/*
* goya_sw_init - Goya software initialization code
*
* @hdev: pointer to hl_device structure
*
*/
static int goya_sw_init(struct hl_device *hdev)
{
struct goya_device *goya;
int rc;
/* Allocate device structure */
goya = kzalloc(sizeof(*goya), GFP_KERNEL);
if (!goya)
return -ENOMEM;
/* according to goya_init_iatu */
goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
goya->mme_clk = GOYA_PLL_FREQ_LOW;
goya->tpc_clk = GOYA_PLL_FREQ_LOW;
goya->ic_clk = GOYA_PLL_FREQ_LOW;
hdev->asic_specific = goya;
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
if (!hdev->dma_pool) {
dev_err(hdev->dev, "failed to create DMA pool\n");
rc = -ENOMEM;
goto free_goya_device;
}
hdev->cpu_accessible_dma_mem = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
&hdev->cpu_accessible_dma_address,
GFP_KERNEL | __GFP_ZERO);
if (!hdev->cpu_accessible_dma_mem) {
rc = -ENOMEM;
goto free_dma_pool;
}
dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
&hdev->cpu_accessible_dma_address);
hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
if (!hdev->cpu_accessible_dma_pool) {
dev_err(hdev->dev,
"Failed to create CPU accessible DMA pool\n");
rc = -ENOMEM;
goto free_cpu_dma_mem;
}
rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
(uintptr_t) hdev->cpu_accessible_dma_mem,
HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to CPU accessible DMA pool\n");
rc = -EFAULT;
goto free_cpu_accessible_dma_pool;
}
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
hdev->asic_prop.supports_compute_reset = true;
hdev->asic_prop.allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
hdev->supports_ctx_switch = true;
hdev->asic_funcs->set_pci_memory_regions(hdev);
goya->goya_work = kmalloc(sizeof(struct goya_work_freq), GFP_KERNEL);
if (!goya->goya_work) {
rc = -ENOMEM;
goto free_cpu_accessible_dma_pool;
}
goya->goya_work->hdev = hdev;
INIT_DELAYED_WORK(&goya->goya_work->work_freq, goya_set_freq_to_low_job);
return 0;
free_cpu_accessible_dma_pool:
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
free_cpu_dma_mem:
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
free_dma_pool:
dma_pool_destroy(hdev->dma_pool);
free_goya_device:
kfree(goya);
return rc;
}
/*
* goya_sw_fini - Goya software tear-down code
*
* @hdev: pointer to hl_device structure
*
*/
static int goya_sw_fini(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
dma_pool_destroy(hdev->dma_pool);
kfree(goya->goya_work);
kfree(goya);
return 0;
}
static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
dma_addr_t bus_address)
{
struct goya_device *goya = hdev->asic_specific;
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
gic_base_lo =
lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
/* PQ has buffer of 2 cache lines, while CQ has 8 lines */
WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
if (goya->hw_cap_initialized & HW_CAP_MMU)
WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
else
WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
if (hdev->stop_on_err)
dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
}
static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
{
u32 gic_base_lo, gic_base_hi;
u64 sob_addr;
u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
gic_base_lo =
lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
if (dma_id)
sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
(dma_id - 1) * 4;
else
sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
}
/*
* goya_init_dma_qmans - Initialize QMAN DMA registers
*
* @hdev: pointer to hl_device structure
*
* Initialize the H/W registers of the QMAN DMA channels
*
*/
void goya_init_dma_qmans(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct hl_hw_queue *q;
int i;
if (goya->hw_cap_initialized & HW_CAP_DMA)
return;
q = &hdev->kernel_queues[0];
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
q->cq_id = q->msi_vec = i;
goya_init_dma_qman(hdev, i, q->bus_address);
goya_init_dma_ch(hdev, i);
}
goya->hw_cap_initialized |= HW_CAP_DMA;
}
/*
* goya_disable_external_queues - Disable external queues
*
* @hdev: pointer to hl_device structure
*
*/
static void goya_disable_external_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_DMA))
return;
WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
}
static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
u32 cp_sts_reg, u32 glbl_sts0_reg)
{
int rc;
u32 status;
/* use the values of TPC0 as they are all the same*/
WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
status = RREG32(cp_sts_reg);
if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
rc = hl_poll_timeout(
hdev,
cp_sts_reg,
status,
!(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1000,
QMAN_FENCE_TIMEOUT_USEC);
/* if QMAN is stuck in fence no need to check for stop */
if (rc)
return 0;
}
rc = hl_poll_timeout(
hdev,
glbl_sts0_reg,
status,
(status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1000,
QMAN_STOP_TIMEOUT_USEC);
if (rc) {
dev_err(hdev->dev,
"Timeout while waiting for QMAN to stop\n");
return -EINVAL;
}
return 0;
}
/*
* goya_stop_external_queues - Stop external queues
*
* @hdev: pointer to hl_device structure
*
* Returns 0 on success
*
*/
static int goya_stop_external_queues(struct hl_device *hdev)
{
int rc, retval = 0;
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_DMA))
return retval;
rc = goya_stop_queue(hdev,
mmDMA_QM_0_GLBL_CFG1,
mmDMA_QM_0_CP_STS,
mmDMA_QM_0_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmDMA_QM_1_GLBL_CFG1,
mmDMA_QM_1_CP_STS,
mmDMA_QM_1_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmDMA_QM_2_GLBL_CFG1,
mmDMA_QM_2_CP_STS,
mmDMA_QM_2_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmDMA_QM_3_GLBL_CFG1,
mmDMA_QM_3_CP_STS,
mmDMA_QM_3_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmDMA_QM_4_GLBL_CFG1,
mmDMA_QM_4_CP_STS,
mmDMA_QM_4_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
retval = -EIO;
}
return retval;
}
/*
* goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
*
* @hdev: pointer to hl_device structure
*
* Returns 0 on success
*
*/
int goya_init_cpu_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_eq *eq;
u32 status;
struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
int err;
if (!hdev->cpu_queues_enable)
return 0;
if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
return 0;
eq = &hdev->event_queue;
WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
WREG32(mmCPU_CQ_BASE_ADDR_LOW,
lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
/* Used for EQ CI */
WREG32(mmCPU_EQ_CI, 0);
WREG32(mmCPU_IF_PF_PQ_PI, 0);
WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
err = hl_poll_timeout(
hdev,
mmCPU_PQ_INIT_STATUS,
status,
(status == PQ_INIT_STATUS_READY_FOR_HOST),
1000,
GOYA_CPU_TIMEOUT_USEC);
if (err) {
dev_err(hdev->dev,
"Failed to setup communication with device CPU\n");
return -EIO;
}
/* update FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid)
prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
if (prop->fw_cpu_boot_dev_sts1_valid)
prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
goya->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
static void goya_set_pll_refclk(struct hl_device *hdev)
{
WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
}
static void goya_disable_clk_rlx(struct hl_device *hdev)
{
WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
}
static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
{
u64 tpc_eml_address;
u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
int err, slm_index;
tpc_offset = tpc_id * 0x40000;
tpc_eml_offset = tpc_id * 0x200000;
tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
tpc_slm_offset = tpc_eml_address + 0x100000;
/*
* Workaround for Bug H2 #2443 :
* "TPC SB is not initialized on chip reset"
*/
val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
tpc_id);
WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
err = hl_poll_timeout(
hdev,
mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
val,
(val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1000,
HL_DEVICE_TIMEOUT_USEC);
if (err)
dev_err(hdev->dev,
"Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
msleep(GOYA_RESET_WAIT_MSEC);
WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
msleep(GOYA_RESET_WAIT_MSEC);
for (slm_index = 0 ; slm_index < 256 ; slm_index++)
WREG32(tpc_slm_offset + (slm_index << 2), 0);
val = RREG32(tpc_slm_offset);
}
static void goya_tpc_mbist_workaround(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
int i;
if (hdev->pldm)
return;
if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
return;
/* Workaround for H2 #2443 */
for (i = 0 ; i < TPC_MAX_NUM ; i++)
_goya_tpc_mbist_workaround(hdev, i);
goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
}
/*
* goya_init_golden_registers - Initialize golden registers
*
* @hdev: pointer to hl_device structure
*
* Initialize the H/W registers of the device
*
*/
static void goya_init_golden_registers(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u32 polynom[10], tpc_intr_mask, offset;
int i;
if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
return;
polynom[0] = 0x00020080;
polynom[1] = 0x00401000;
polynom[2] = 0x00200800;
polynom[3] = 0x00002000;
polynom[4] = 0x00080200;
polynom[5] = 0x00040100;
polynom[6] = 0x00100400;
polynom[7] = 0x00004000;
polynom[8] = 0x00010000;
polynom[9] = 0x00008000;
/* Mask all arithmetic interrupts from TPC */
tpc_intr_mask = 0x7FFF;
for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
}
WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
WREG32(mmMME_AGU, 0x0f0f0f10);
WREG32(mmMME_SEI_MASK, ~0x0);
WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
}
for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
}
for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
/*
* Workaround for Bug H2 #2441 :
* "ST.NOP set trace event illegal opcode"
*/
WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
ICACHE_FETCH_LINE_NUM, 2);
}
WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
/*
* Workaround for H2 #HW-23 bug
* Set DMA max outstanding read requests to 240 on DMA CH 1.
* This limitation is still large enough to not affect Gen4 bandwidth.
* We need to only limit that DMA channel because the user can only read
* from Host using DMA CH 1
*/
WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
goya->hw_cap_initialized |= HW_CAP_GOLDEN;
}
static void goya_init_mme_qman(struct hl_device *hdev)
{
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
u64 qman_base_addr;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
gic_base_lo =
lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
qman_base_addr = hdev->asic_prop.sram_base_address +
MME_QMAN_BASE_OFFSET;
WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
WREG32(mmMME_QM_PQ_PI, 0);
WREG32(mmMME_QM_PQ_CI, 0);
WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
/* QMAN CQ has 8 cache lines */
WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
}
static void goya_init_mme_cmdq(struct hl_device *hdev)
{
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
gic_base_lo =
lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
/* CMDQ CQ has 20 cache lines */
WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
}
void goya_init_mme_qmans(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u32 so_base_lo, so_base_hi;
if (goya->hw_cap_initialized & HW_CAP_MME)
return;
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
goya_init_mme_qman(hdev);
goya_init_mme_cmdq(hdev);
goya->hw_cap_initialized |= HW_CAP_MME;
}
static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
{
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
u64 qman_base_addr;
u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
gic_base_lo =
lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
}
static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
{
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
gic_base_lo =
lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
}
void goya_init_tpc_qmans(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u32 so_base_lo, so_base_hi;
u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
int i;
if (goya->hw_cap_initialized & HW_CAP_TPC)
return;
so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
for (i = 0 ; i < TPC_MAX_NUM ; i++) {
WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
so_base_lo);
WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
so_base_hi);
}
goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
for (i = 0 ; i < TPC_MAX_NUM ; i++)
goya_init_tpc_cmdq(hdev, i);
goya->hw_cap_initialized |= HW_CAP_TPC;
}
/*
* goya_disable_internal_queues - Disable internal queues
*
* @hdev: pointer to hl_device structure
*
*/
static void goya_disable_internal_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_MME))
goto disable_tpc;
WREG32(mmMME_QM_GLBL_CFG0, 0);
WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
disable_tpc:
if (!(goya->hw_cap_initialized & HW_CAP_TPC))
return;
WREG32(mmTPC0_QM_GLBL_CFG0, 0);
WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC1_QM_GLBL_CFG0, 0);
WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC2_QM_GLBL_CFG0, 0);
WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC3_QM_GLBL_CFG0, 0);
WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC4_QM_GLBL_CFG0, 0);
WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC5_QM_GLBL_CFG0, 0);
WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC6_QM_GLBL_CFG0, 0);
WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
WREG32(mmTPC7_QM_GLBL_CFG0, 0);
WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
}
/*
* goya_stop_internal_queues - Stop internal queues
*
* @hdev: pointer to hl_device structure
*
* Returns 0 on success
*
*/
static int goya_stop_internal_queues(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
int rc, retval = 0;
if (!(goya->hw_cap_initialized & HW_CAP_MME))
goto stop_tpc;
/*
* Each queue (QMAN) is a separate H/W logic. That means that each
* QMAN can be stopped independently and failure to stop one does NOT
* mandate we should not try to stop other QMANs
*/
rc = goya_stop_queue(hdev,
mmMME_QM_GLBL_CFG1,
mmMME_QM_CP_STS,
mmMME_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop MME QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmMME_CMDQ_GLBL_CFG1,
mmMME_CMDQ_CP_STS,
mmMME_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop MME CMDQ\n");
retval = -EIO;
}
stop_tpc:
if (!(goya->hw_cap_initialized & HW_CAP_TPC))
return retval;
rc = goya_stop_queue(hdev,
mmTPC0_QM_GLBL_CFG1,
mmTPC0_QM_CP_STS,
mmTPC0_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC0_CMDQ_GLBL_CFG1,
mmTPC0_CMDQ_CP_STS,
mmTPC0_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC1_QM_GLBL_CFG1,
mmTPC1_QM_CP_STS,
mmTPC1_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC1_CMDQ_GLBL_CFG1,
mmTPC1_CMDQ_CP_STS,
mmTPC1_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC2_QM_GLBL_CFG1,
mmTPC2_QM_CP_STS,
mmTPC2_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC2_CMDQ_GLBL_CFG1,
mmTPC2_CMDQ_CP_STS,
mmTPC2_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC3_QM_GLBL_CFG1,
mmTPC3_QM_CP_STS,
mmTPC3_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC3_CMDQ_GLBL_CFG1,
mmTPC3_CMDQ_CP_STS,
mmTPC3_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC4_QM_GLBL_CFG1,
mmTPC4_QM_CP_STS,
mmTPC4_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC4_CMDQ_GLBL_CFG1,
mmTPC4_CMDQ_CP_STS,
mmTPC4_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC5_QM_GLBL_CFG1,
mmTPC5_QM_CP_STS,
mmTPC5_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC5_CMDQ_GLBL_CFG1,
mmTPC5_CMDQ_CP_STS,
mmTPC5_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC6_QM_GLBL_CFG1,
mmTPC6_QM_CP_STS,
mmTPC6_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC6_CMDQ_GLBL_CFG1,
mmTPC6_CMDQ_CP_STS,
mmTPC6_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC7_QM_GLBL_CFG1,
mmTPC7_QM_CP_STS,
mmTPC7_QM_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
retval = -EIO;
}
rc = goya_stop_queue(hdev,
mmTPC7_CMDQ_GLBL_CFG1,
mmTPC7_CMDQ_CP_STS,
mmTPC7_CMDQ_GLBL_STS0);
if (rc) {
dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
retval = -EIO;
}
return retval;
}
static void goya_dma_stall(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_DMA))
return;
WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
}
static void goya_tpc_stall(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_TPC))
return;
WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
}
static void goya_mme_stall(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_MME))
return;
WREG32(mmMME_STALL, 0xFFFFFFFF);
}
static int goya_enable_msix(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
int cq_cnt = hdev->asic_prop.completion_queues_count;
int rc, i, irq_cnt_init, irq;
if (goya->hw_cap_initialized & HW_CAP_MSIX)
return 0;
rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
if (rc < 0) {
dev_err(hdev->dev,
"MSI-X: Failed to enable support -- %d/%d\n",
GOYA_MSIX_ENTRIES, rc);
return rc;
}
for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
irq = pci_irq_vector(hdev->pdev, i);
rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
&hdev->completion_queue[i]);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_irqs;
}
}
irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
rc = request_irq(irq, hl_irq_handler_eq, 0,
goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
&hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_irqs;
}
goya->hw_cap_initialized |= HW_CAP_MSIX;
return 0;
free_irqs:
for (i = 0 ; i < irq_cnt_init ; i++)
free_irq(pci_irq_vector(hdev->pdev, i),
&hdev->completion_queue[i]);
pci_free_irq_vectors(hdev->pdev);
return rc;
}
static void goya_sync_irqs(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
int i;
if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
return;
/* Wait for all pending IRQs to be finished */
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
synchronize_irq(pci_irq_vector(hdev->pdev, i));
synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
}
static void goya_disable_msix(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
int i, irq;
if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
return;
goya_sync_irqs(hdev);
irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
free_irq(irq, &hdev->event_queue);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
irq = pci_irq_vector(hdev->pdev, i);
free_irq(irq, &hdev->completion_queue[i]);
}
pci_free_irq_vectors(hdev->pdev);
goya->hw_cap_initialized &= ~HW_CAP_MSIX;
}
static void goya_enable_timestamp(struct hl_device *hdev)
{
/* Disable the timestamp counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
/* Zero the lower/upper parts of the 64-bit counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
/* Enable the counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
}
static void goya_disable_timestamp(struct hl_device *hdev)
{
/* Disable the timestamp counter */
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
if (hdev->pldm)
wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
else
wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
goya_stop_external_queues(hdev);
goya_stop_internal_queues(hdev);
msleep(wait_timeout_ms);
goya_dma_stall(hdev);
goya_tpc_stall(hdev);
goya_mme_stall(hdev);
msleep(wait_timeout_ms);
goya_disable_external_queues(hdev);
goya_disable_internal_queues(hdev);
goya_disable_timestamp(hdev);
if (hard_reset) {
goya_disable_msix(hdev);
goya_mmu_remove_device_cpu_mappings(hdev);
} else {
goya_sync_irqs(hdev);
}
}
/*
* goya_load_firmware_to_device() - Load LINUX FW code to device.
* @hdev: Pointer to hl_device structure.
*
* Copy LINUX fw code from firmware file to HBM BAR.
*
* Return: 0 on success, non-zero for failure.
*/
static int goya_load_firmware_to_device(struct hl_device *hdev)
{
void __iomem *dst;
dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0);
}
/*
* goya_load_boot_fit_to_device() - Load boot fit to device.
* @hdev: Pointer to hl_device structure.
*
* Copy boot fit file to SRAM BAR.
*
* Return: 0 on success, non-zero for failure.
*/
static int goya_load_boot_fit_to_device(struct hl_device *hdev)
{
void __iomem *dst;
dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
}
static void goya_init_dynamic_firmware_loader(struct hl_device *hdev)
{
struct dynamic_fw_load_mgr *dynamic_loader;
struct cpu_dyn_regs *dyn_regs;
dynamic_loader = &hdev->fw_loader.dynamic_loader;
/*
* here we update initial values for few specific dynamic regs (as
* before reading the first descriptor from FW those value has to be
* hard-coded) in later stages of the protocol those values will be
* updated automatically by reading the FW descriptor so data there
* will always be up-to-date
*/
dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
dyn_regs->kmd_msg_to_cpu =
cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
dyn_regs->cpu_cmd_status_to_host =
cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
dynamic_loader->wait_for_bl_timeout = GOYA_WAIT_FOR_BL_TIMEOUT_USEC;
}
static void goya_init_static_firmware_loader(struct hl_device *hdev)
{
struct static_fw_load_mgr *static_loader;
static_loader = &hdev->fw_loader.static_loader;
static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
}
static void goya_init_firmware_preload_params(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
pre_fw_load->wait_for_preboot_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
}
static void goya_init_firmware_loader(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
fw_loader->boot_fit_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
fw_loader->skip_bmc = false;
fw_loader->sram_bar_id = SRAM_CFG_BAR_ID;
fw_loader->dram_bar_id = DDR_BAR_ID;
if (prop->dynamic_fw_load)
goya_init_dynamic_firmware_loader(hdev);
else
goya_init_static_firmware_loader(hdev);
}
static int goya_init_cpu(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
int rc;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
if (goya->hw_cap_initialized & HW_CAP_CPU)
return 0;
/*
* Before pushing u-boot/linux to device, need to set the ddr bar to
* base address of dram
*/
if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
dev_err(hdev->dev,
"failed to map DDR bar to DRAM base address\n");
return -EIO;
}
rc = hl_fw_init_cpu(hdev);
if (rc)
return rc;
goya->hw_cap_initialized |= HW_CAP_CPU;
return 0;
}
static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
u64 phys_addr)
{
u32 status, timeout_usec;
int rc;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
rc = hl_poll_timeout(
hdev,
MMU_ASID_BUSY,
status,
!(status & 0x80000000),
1000,
timeout_usec);
if (rc) {
dev_err(hdev->dev,
"Timeout during MMU hop0 config of asid %d\n", asid);
return rc;
}
return 0;
}
int goya_mmu_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
u64 hop0_addr;
int rc, i;
if (goya->hw_cap_initialized & HW_CAP_MMU)
return 0;
hdev->dram_default_page_mapping = true;
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_addr = prop->mmu_pgt_addr +
(i * prop->mmu_hop_table_size);
rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
if (rc) {
dev_err(hdev->dev,
"failed to set hop0 addr for asid %d\n", i);
goto err;
}
}
goya->hw_cap_initialized |= HW_CAP_MMU;
/* init MMU cache manage page */
WREG32(mmSTLB_CACHE_INV_BASE_39_8,
lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
/* Remove follower feature due to performance bug */
WREG32_AND(mmSTLB_STLB_FEATURE_EN,
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR | MMU_OP_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
return 0;
err:
return rc;
}
/*
* goya_hw_init - Goya hardware initialization code
*
* @hdev: pointer to hl_device structure
*
* Returns 0 on success
*
*/
static int goya_hw_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
/* Perform read from the device to make sure device is up */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
/*
* Let's mark in the H/W that we have reached this point. We check
* this value in the reset_before_init function to understand whether
* we need to reset the chip before doing H/W init. This register is
* cleared by the H/W upon H/W reset
*/
WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
rc = goya_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
return rc;
}
goya_tpc_mbist_workaround(hdev);
goya_init_golden_registers(hdev);
/*
* After CPU initialization is finished, change DDR bar mapping inside
* iATU to point to the start address of the MMU page tables
*/
if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
dev_err(hdev->dev,
"failed to map DDR bar to MMU page tables\n");
return -EIO;
}
rc = goya_mmu_init(hdev);
if (rc)
return rc;
goya_init_security(hdev);
goya_init_dma_qmans(hdev);
goya_init_mme_qmans(hdev);
goya_init_tpc_qmans(hdev);
goya_enable_timestamp(hdev);
/* MSI-X must be enabled before CPU queues are initialized */
rc = goya_enable_msix(hdev);
if (rc)
goto disable_queues;
/* Perform read from the device to flush all MSI-X configuration */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return 0;
disable_queues:
goya_disable_internal_queues(hdev);
goya_disable_external_queues(hdev);
return rc;
}
static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct goya_device *goya = hdev->asic_specific;
u32 reset_timeout_ms, cpu_timeout_ms, status;
if (hdev->pldm) {
reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
} else {
reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
}
if (hard_reset) {
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
msleep(cpu_timeout_ms);
goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
goya_disable_clk_rlx(hdev);
goya_set_pll_refclk(hdev);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
dev_dbg(hdev->dev,
"Issued HARD reset command, going to wait %dms\n",
reset_timeout_ms);
} else {
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
dev_dbg(hdev->dev,
"Issued SOFT reset command, going to wait %dms\n",
reset_timeout_ms);
}
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
* itself is in reset. In either reset we need to wait until the reset
* is deasserted
*/
msleep(reset_timeout_ms);
status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
return -ETIMEDOUT;
}
if (!hard_reset && goya) {
goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
HW_CAP_GOLDEN | HW_CAP_TPC);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_SOFT_RESET);
return 0;
}
/* Chicken bit to re-initiate boot sequencer flow */
WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
/* Move boot manager FSM to pre boot sequencer init state */
WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
if (goya) {
goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
HW_CAP_DDR_0 | HW_CAP_DDR_1 |
HW_CAP_DMA | HW_CAP_MME |
HW_CAP_MMU | HW_CAP_TPC_MBIST |
HW_CAP_GOLDEN | HW_CAP_TPC);
memset(goya->events_stat, 0, sizeof(goya->events_stat));
}
return 0;
}
int goya_suspend(struct hl_device *hdev)
{
int rc;
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
return rc;
}
int goya_resume(struct hl_device *hdev)
{
return goya_init_iatu(hdev);
}
static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
return rc;
}
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
{
u32 db_reg_offset, db_value;
switch (hw_queue_id) {
case GOYA_QUEUE_ID_DMA_0:
db_reg_offset = mmDMA_QM_0_PQ_PI;
break;
case GOYA_QUEUE_ID_DMA_1:
db_reg_offset = mmDMA_QM_1_PQ_PI;
break;
case GOYA_QUEUE_ID_DMA_2:
db_reg_offset = mmDMA_QM_2_PQ_PI;
break;
case GOYA_QUEUE_ID_DMA_3:
db_reg_offset = mmDMA_QM_3_PQ_PI;
break;
case GOYA_QUEUE_ID_DMA_4:
db_reg_offset = mmDMA_QM_4_PQ_PI;
break;
case GOYA_QUEUE_ID_CPU_PQ:
db_reg_offset = mmCPU_IF_PF_PQ_PI;
break;
case GOYA_QUEUE_ID_MME:
db_reg_offset = mmMME_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC0:
db_reg_offset = mmTPC0_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC1:
db_reg_offset = mmTPC1_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC2:
db_reg_offset = mmTPC2_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC3:
db_reg_offset = mmTPC3_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC4:
db_reg_offset = mmTPC4_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC5:
db_reg_offset = mmTPC5_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC6:
db_reg_offset = mmTPC6_QM_PQ_PI;
break;
case GOYA_QUEUE_ID_TPC7:
db_reg_offset = mmTPC7_QM_PQ_PI;
break;
default:
/* Should never get here */
dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
hw_queue_id);
return;
}
db_value = pi;
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) {
/* make sure device CPU will read latest data from host */
mb();
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
}
}
void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
{
/* The QMANs are on the SRAM so need to copy to IO space */
memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
}
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
dma_handle, flags);
/* Shift to the device's base physical address of host memory */
if (kernel_addr)
*dma_handle += HOST_PHYS_BASE;
return kernel_addr;
}
static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
/* Cancel the device's base physical address of host memory */
dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
}
int goya_scrub_device_mem(struct hl_device *hdev)
{
return 0;
}
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len)
{
void *base;
u32 offset;
*dma_handle = hdev->asic_prop.sram_base_address;
base = (__force void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
switch (queue_id) {
case GOYA_QUEUE_ID_MME:
offset = MME_QMAN_BASE_OFFSET;
*queue_len = MME_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC0:
offset = TPC0_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC1:
offset = TPC1_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC2:
offset = TPC2_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC3:
offset = TPC3_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC4:
offset = TPC4_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC5:
offset = TPC5_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC6:
offset = TPC6_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
case GOYA_QUEUE_ID_TPC7:
offset = TPC7_QMAN_BASE_OFFSET;
*queue_len = TPC_QMAN_LENGTH;
break;
default:
dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
return NULL;
}
base += offset;
*dma_handle += offset;
return base;
}
static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
{
struct packet_msg_prot *fence_pkt;
u32 *fence_ptr;
dma_addr_t fence_dma_addr;
struct hl_cb *cb;
u32 tmp, timeout;
int rc;
if (hdev->pldm)
timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
else
timeout = HL_DEVICE_TIMEOUT_USEC;
if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
dev_err_ratelimited(hdev->dev,
"Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
}
fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate fence memory for QMAN0\n");
return -ENOMEM;
}
goya_qman0_set_security(hdev, true);
cb = job->patched_cb;
fence_pkt = cb->kernel_address +
job->job_cb_size - sizeof(struct packet_msg_prot);
tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_CTL_EB_SHIFT) |
(1 << GOYA_PKT_CTL_MB_SHIFT);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
fence_pkt->addr = cpu_to_le64(fence_dma_addr);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
job->job_cb_size, cb->bus_address);
if (rc) {
dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
goto free_fence_ptr;
}
rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
(tmp == GOYA_QMAN0_FENCE_VAL), 1000,
timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
goto free_fence_ptr;
}
free_fence_ptr:
hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
goya_qman0_set_security(hdev, false);
return rc;
}
int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
u32 timeout, u64 *result)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
if (result)
*result = 0;
return 0;
}
if (!timeout)
timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
{
struct packet_msg_prot *fence_pkt;
dma_addr_t pkt_dma_addr;
u32 fence_val, tmp;
dma_addr_t fence_dma_addr;
u32 *fence_ptr;
int rc;
fence_val = GOYA_QMAN0_FENCE_VAL;
fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
"Failed to allocate memory for H/W queue %d testing\n",
hw_queue_id);
return -ENOMEM;
}
*fence_ptr = 0;
fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
&pkt_dma_addr);
if (!fence_pkt) {
dev_err(hdev->dev,
"Failed to allocate packet for H/W queue %d testing\n",
hw_queue_id);
rc = -ENOMEM;
goto free_fence_ptr;
}
tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_CTL_EB_SHIFT) |
(1 << GOYA_PKT_CTL_MB_SHIFT);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(fence_val);
fence_pkt->addr = cpu_to_le64(fence_dma_addr);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
sizeof(struct packet_msg_prot),
pkt_dma_addr);
if (rc) {
dev_err(hdev->dev,
"Failed to send fence packet to H/W queue %d\n",
hw_queue_id);
goto free_pkt;
}
rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev,
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EIO;
}
free_pkt:
hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
free_fence_ptr:
hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
return rc;
}
int goya_test_cpu_queue(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
/*
* check capability here as send_cpu_message() won't update the result
* value if no capability
*/
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_test_cpu_queue(hdev);
}
int goya_test_queues(struct hl_device *hdev)
{
int i, rc, ret_val = 0;
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
rc = goya_test_queue(hdev, i);
if (rc)
ret_val = -EINVAL;
}
return ret_val;
}
static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
gfp_t mem_flags, dma_addr_t *dma_handle)
{
void *kernel_addr;
if (size > GOYA_DMA_POOL_BLK_SIZE)
return NULL;
kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
/* Shift to the device's base physical address of host memory */
if (kernel_addr)
*dma_handle += HOST_PHYS_BASE;
return kernel_addr;
}
static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
dma_addr_t dma_addr)
{
/* Cancel the device's base physical address of host memory */
dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
}
void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle)
{
void *vaddr;
vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
*dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
VA_CPU_ACCESSIBLE_MEM_ADDR;
return vaddr;
}
void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr)
{
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
{
struct scatterlist *sg, *sg_next_iter;
u32 count, dma_desc_cnt;
u64 len, len_next;
dma_addr_t addr, addr_next;
dma_desc_cnt = 0;
for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
addr = sg_dma_address(sg);
if (len == 0)
break;
while ((count + 1) < sgt->nents) {
sg_next_iter = sg_next(sg);
len_next = sg_dma_len(sg_next_iter);
addr_next = sg_dma_address(sg_next_iter);
if (len_next == 0)
break;
if ((addr + len == addr_next) &&
(len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
len += len_next;
count++;
sg = sg_next_iter;
} else {
break;
}
}
dma_desc_cnt++;
}
return dma_desc_cnt * sizeof(struct packet_lin_dma);
}
static int goya_pin_memory_before_cs(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt,
u64 addr, enum dma_data_direction dir)
{
struct hl_userptr *userptr;
int rc;
if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
parser->job_userptr_list, &userptr))
goto already_pinned;
userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
if (!userptr)
return -ENOMEM;
rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
userptr);
if (rc)
goto free_userptr;
list_add_tail(&userptr->job_node, parser->job_userptr_list);
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto unpin_memory;
}
userptr->dma_mapped = true;
userptr->dir = dir;
already_pinned:
parser->patched_cb_size +=
goya_get_dma_desc_list_size(hdev, userptr->sgt);
return 0;
unpin_memory:
list_del(&userptr->job_node);
hl_unpin_host_memory(hdev, userptr);
free_userptr:
kfree(userptr);
return rc;
}
static int goya_validate_dma_pkt_host(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt)
{
u64 device_memory_addr, addr;
enum dma_data_direction dir;
enum hl_goya_dma_direction user_dir;
bool sram_addr = true;
bool skip_host_mem_pin = false;
bool user_memset;
u32 ctl;
int rc = 0;
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
switch (user_dir) {
case HL_DMA_HOST_TO_DRAM:
dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
dir = DMA_TO_DEVICE;
sram_addr = false;
addr = le64_to_cpu(user_dma_pkt->src_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
if (user_memset)
skip_host_mem_pin = true;
break;
case HL_DMA_DRAM_TO_HOST:
dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
dir = DMA_FROM_DEVICE;
sram_addr = false;
addr = le64_to_cpu(user_dma_pkt->dst_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
break;
case HL_DMA_HOST_TO_SRAM:
dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
dir = DMA_TO_DEVICE;
addr = le64_to_cpu(user_dma_pkt->src_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
if (user_memset)
skip_host_mem_pin = true;
break;
case HL_DMA_SRAM_TO_HOST:
dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
dir = DMA_FROM_DEVICE;
addr = le64_to_cpu(user_dma_pkt->dst_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
break;
default:
dev_err(hdev->dev, "DMA direction %d is unsupported/undefined\n", user_dir);
return -EFAULT;
}
if (sram_addr) {
if (!hl_mem_area_inside_range(device_memory_addr,
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.sram_user_base_address,
hdev->asic_prop.sram_end_address)) {
dev_err(hdev->dev,
"SRAM address 0x%llx + 0x%x is invalid\n",
device_memory_addr,
user_dma_pkt->tsize);
return -EFAULT;
}
} else {
if (!hl_mem_area_inside_range(device_memory_addr,
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.dram_user_base_address,
hdev->asic_prop.dram_end_address)) {
dev_err(hdev->dev,
"DRAM address 0x%llx + 0x%x is invalid\n",
device_memory_addr,
user_dma_pkt->tsize);
return -EFAULT;
}
}
if (skip_host_mem_pin)
parser->patched_cb_size += sizeof(*user_dma_pkt);
else {
if ((dir == DMA_TO_DEVICE) &&
(parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
dev_err(hdev->dev,
"Can't DMA from host on queue other then 1\n");
return -EFAULT;
}
rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
addr, dir);
}
return rc;
}
static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt)
{
u64 sram_memory_addr, dram_memory_addr;
enum hl_goya_dma_direction user_dir;
u32 ctl;
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
if (user_dir == HL_DMA_DRAM_TO_SRAM) {
dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
} else {
dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
}
if (!hl_mem_area_inside_range(sram_memory_addr,
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.sram_user_base_address,
hdev->asic_prop.sram_end_address)) {
dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
sram_memory_addr, user_dma_pkt->tsize);
return -EFAULT;
}
if (!hl_mem_area_inside_range(dram_memory_addr,
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.dram_user_base_address,
hdev->asic_prop.dram_end_address)) {
dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
dram_memory_addr, user_dma_pkt->tsize);
return -EFAULT;
}
parser->patched_cb_size += sizeof(*user_dma_pkt);
return 0;
}
static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt)
{
enum hl_goya_dma_direction user_dir;
u32 ctl;
int rc;
dev_dbg(hdev->dev, "DMA packet details:\n");
dev_dbg(hdev->dev, "source == 0x%llx\n",
le64_to_cpu(user_dma_pkt->src_addr));
dev_dbg(hdev->dev, "destination == 0x%llx\n",
le64_to_cpu(user_dma_pkt->dst_addr));
dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
/*
* Special handling for DMA with size 0. The H/W has a bug where
* this can cause the QMAN DMA to get stuck, so block it here.
*/
if (user_dma_pkt->tsize == 0) {
dev_err(hdev->dev,
"Got DMA with size 0, might reset the device\n");
return -EINVAL;
}
if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM))
rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
else
rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
return rc;
}
static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt)
{
dev_dbg(hdev->dev, "DMA packet details:\n");
dev_dbg(hdev->dev, "source == 0x%llx\n",
le64_to_cpu(user_dma_pkt->src_addr));
dev_dbg(hdev->dev, "destination == 0x%llx\n",
le64_to_cpu(user_dma_pkt->dst_addr));
dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
/*
* WA for HW-23.
* We can't allow user to read from Host using QMANs other than 1.
* PMMU and HPMMU addresses are equal, check only one of them.
*/
if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
le32_to_cpu(user_dma_pkt->tsize),
hdev->asic_prop.pmmu.start_addr,
hdev->asic_prop.pmmu.end_addr)) {
dev_err(hdev->dev,
"Can't DMA from host on queue other then 1\n");
return -EFAULT;
}
if (user_dma_pkt->tsize == 0) {
dev_err(hdev->dev,
"Got DMA with size 0, might reset the device\n");
return -EINVAL;
}
parser->patched_cb_size += sizeof(*user_dma_pkt);
return 0;
}
static int goya_validate_wreg32(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_wreg32 *wreg_pkt)
{
struct goya_device *goya = hdev->asic_specific;
u32 sob_start_addr, sob_end_addr;
u16 reg_offset;
reg_offset = le32_to_cpu(wreg_pkt->ctl) &
GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
dev_dbg(hdev->dev, "WREG32 packet details:\n");
dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
dev_dbg(hdev->dev, "value == 0x%x\n",
le32_to_cpu(wreg_pkt->value));
if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
reg_offset);
return -EPERM;
}
/*
* With MMU, DMA channels are not secured, so it doesn't matter where
* the WR COMP will be written to because it will go out with
* non-secured property
*/
if (goya->hw_cap_initialized & HW_CAP_MMU)
return 0;
sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
(le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
wreg_pkt->value);
return -EPERM;
}
return 0;
}
static int goya_validate_cb(struct hl_device *hdev,
struct hl_cs_parser *parser, bool is_mmu)
{
u32 cb_parsed_length = 0;
int rc = 0;
parser->patched_cb_size = 0;
/* cb_user_size is more than 0 so loop will always be executed */
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
struct goya_packet *user_pkt;
user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
if (!validate_packet_id(pkt_id)) {
dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
rc = -EINVAL;
break;
}
pkt_size = goya_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
dev_err(hdev->dev,
"packet 0x%x is out of CB boundary\n", pkt_id);
rc = -EINVAL;
break;
}
switch (pkt_id) {
case PACKET_WREG_32:
/*
* Although it is validated after copy in patch_cb(),
* need to validate here as well because patch_cb() is
* not called in MMU path while this function is called
*/
rc = goya_validate_wreg32(hdev,
parser, (struct packet_wreg32 *) user_pkt);
parser->patched_cb_size += pkt_size;
break;
case PACKET_WREG_BULK:
dev_err(hdev->dev,
"User not allowed to use WREG_BULK\n");
rc = -EPERM;
break;
case PACKET_MSG_PROT:
dev_err(hdev->dev,
"User not allowed to use MSG_PROT\n");
rc = -EPERM;
break;
case PACKET_CP_DMA:
dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
rc = -EPERM;
break;
case PACKET_STOP:
dev_err(hdev->dev, "User not allowed to use STOP\n");
rc = -EPERM;
break;
case PACKET_LIN_DMA:
if (is_mmu)
rc = goya_validate_dma_pkt_mmu(hdev, parser,
(struct packet_lin_dma *) user_pkt);
else
rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
(struct packet_lin_dma *) user_pkt);
break;
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_FENCE:
case PACKET_NOP:
parser->patched_cb_size += pkt_size;
break;
default:
dev_err(hdev->dev, "Invalid packet header 0x%x\n",
pkt_id);
rc = -EINVAL;
break;
}
if (rc)
break;
}
/*
* The new CB should have space at the end for two MSG_PROT packets:
* 1. A packet that will act as a completion packet
* 2. A packet that will generate MSI-X interrupt
*/
parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
return rc;
}
static int goya_patch_dma_packet(struct hl_device *hdev,
struct hl_cs_parser *parser,
struct packet_lin_dma *user_dma_pkt,
struct packet_lin_dma *new_dma_pkt,
u32 *new_dma_pkt_size)
{
struct hl_userptr *userptr;
struct scatterlist *sg, *sg_next_iter;
u32 count, dma_desc_cnt;
u64 len, len_next;
dma_addr_t dma_addr, dma_addr_next;
enum hl_goya_dma_direction user_dir;
u64 device_memory_addr, addr;
enum dma_data_direction dir;
struct sg_table *sgt;
bool skip_host_mem_pin = false;
bool user_memset;
u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM) ||
(user_dma_pkt->tsize == 0)) {
memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
*new_dma_pkt_size = sizeof(*new_dma_pkt);
return 0;
}
if ((user_dir == HL_DMA_HOST_TO_DRAM) || (user_dir == HL_DMA_HOST_TO_SRAM)) {
addr = le64_to_cpu(user_dma_pkt->src_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
dir = DMA_TO_DEVICE;
if (user_memset)
skip_host_mem_pin = true;
} else {
addr = le64_to_cpu(user_dma_pkt->dst_addr);
device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
dir = DMA_FROM_DEVICE;
}
if ((!skip_host_mem_pin) &&
(hl_userptr_is_pinned(hdev, addr,
le32_to_cpu(user_dma_pkt->tsize),
parser->job_userptr_list, &userptr) == false)) {
dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
addr, user_dma_pkt->tsize);
return -EFAULT;
}
if ((user_memset) && (dir == DMA_TO_DEVICE)) {
memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
*new_dma_pkt_size = sizeof(*user_dma_pkt);
return 0;
}
user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
sgt = userptr->sgt;
dma_desc_cnt = 0;
for_each_sgtable_dma_sg(sgt, sg, count) {
len = sg_dma_len(sg);
dma_addr = sg_dma_address(sg);
if (len == 0)
break;
while ((count + 1) < sgt->nents) {
sg_next_iter = sg_next(sg);
len_next = sg_dma_len(sg_next_iter);
dma_addr_next = sg_dma_address(sg_next_iter);
if (len_next == 0)
break;
if ((dma_addr + len == dma_addr_next) &&
(len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
len += len_next;
count++;
sg = sg_next_iter;
} else {
break;
}
}
ctl = le32_to_cpu(user_dma_pkt->ctl);
if (likely(dma_desc_cnt))
ctl &= ~GOYA_PKT_CTL_EB_MASK;
ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
new_dma_pkt->ctl = cpu_to_le32(ctl);
new_dma_pkt->tsize = cpu_to_le32((u32) len);
if (dir == DMA_TO_DEVICE) {
new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
} else {
new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
}
if (!user_memset)
device_memory_addr += len;
dma_desc_cnt++;
new_dma_pkt++;
}
if (!dma_desc_cnt) {
dev_err(hdev->dev,
"Error of 0 SG entries when patching DMA packet\n");
return -EFAULT;
}
/* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
new_dma_pkt--;
new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
*new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
return 0;
}
static int goya_patch_cb(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
u32 cb_parsed_length = 0;
u32 cb_patched_cur_length = 0;
int rc = 0;
/* cb_user_size is more than 0 so loop will always be executed */
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
u32 new_pkt_size = 0;
struct goya_packet *user_pkt, *kernel_pkt;
user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
kernel_pkt = parser->patched_cb->kernel_address +
cb_patched_cur_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
if (!validate_packet_id(pkt_id)) {
dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
rc = -EINVAL;
break;
}
pkt_size = goya_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
dev_err(hdev->dev,
"packet 0x%x is out of CB boundary\n", pkt_id);
rc = -EINVAL;
break;
}
switch (pkt_id) {
case PACKET_LIN_DMA:
rc = goya_patch_dma_packet(hdev, parser,
(struct packet_lin_dma *) user_pkt,
(struct packet_lin_dma *) kernel_pkt,
&new_pkt_size);
cb_patched_cur_length += new_pkt_size;
break;
case PACKET_WREG_32:
memcpy(kernel_pkt, user_pkt, pkt_size);
cb_patched_cur_length += pkt_size;
rc = goya_validate_wreg32(hdev, parser,
(struct packet_wreg32 *) kernel_pkt);
break;
case PACKET_WREG_BULK:
dev_err(hdev->dev,
"User not allowed to use WREG_BULK\n");
rc = -EPERM;
break;
case PACKET_MSG_PROT:
dev_err(hdev->dev,
"User not allowed to use MSG_PROT\n");
rc = -EPERM;
break;
case PACKET_CP_DMA:
dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
rc = -EPERM;
break;
case PACKET_STOP:
dev_err(hdev->dev, "User not allowed to use STOP\n");
rc = -EPERM;
break;
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_FENCE:
case PACKET_NOP:
memcpy(kernel_pkt, user_pkt, pkt_size);
cb_patched_cur_length += pkt_size;
break;
default:
dev_err(hdev->dev, "Invalid packet header 0x%x\n",
pkt_id);
rc = -EINVAL;
break;
}
if (rc)
break;
}
return rc;
}
static int goya_parse_cb_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
u64 handle;
u32 patched_cb_size;
struct hl_cb *user_cb;
int rc;
/*
* The new CB should have space at the end for two MSG_PROT pkt:
* 1. A packet that will act as a completion packet
* 2. A packet that will generate MSI-X interrupt
*/
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
&handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n",
rc);
return rc;
}
parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
/*
* The check that parser->user_cb_size <= parser->user_cb->size was done
* in validate_queue_index().
*/
memcpy(parser->patched_cb->kernel_address,
parser->user_cb->kernel_address,
parser->user_cb_size);
patched_cb_size = parser->patched_cb_size;
/* validate patched CB instead of user CB */
user_cb = parser->user_cb;
parser->user_cb = parser->patched_cb;
rc = goya_validate_cb(hdev, parser, true);
parser->user_cb = user_cb;
if (rc) {
hl_cb_put(parser->patched_cb);
goto out;
}
if (patched_cb_size != parser->patched_cb_size) {
dev_err(hdev->dev, "user CB size mismatch\n");
hl_cb_put(parser->patched_cb);
rc = -EINVAL;
goto out;
}
out:
/*
* Always call cb destroy here because we still have 1 reference
* to it by calling cb_get earlier. After the job will be completed,
* cb_put will release it, but here we want to remove it from the
* idr
*/
hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
return rc;
}
static int goya_parse_cb_no_mmu(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
u64 handle;
int rc;
rc = goya_validate_cb(hdev, parser, false);
if (rc)
goto free_userptr;
rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
parser->patched_cb_size, false, false,
&handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
goto free_userptr;
}
parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
/* hl_cb_get should never fail here */
if (!parser->patched_cb) {
dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
rc = -EFAULT;
goto out;
}
rc = goya_patch_cb(hdev, parser);
if (rc)
hl_cb_put(parser->patched_cb);
out:
/*
* Always call cb destroy here because we still have 1 reference
* to it by calling cb_get earlier. After the job will be completed,
* cb_put will release it, but here we want to remove it from the
* idr
*/
hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
free_userptr:
if (rc)
hl_userptr_delete_list(hdev, parser->job_userptr_list);
return rc;
}
static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
struct hl_cs_parser *parser)
{
struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
if (goya->hw_cap_initialized & HW_CAP_MMU)
return 0;
/* For internal queue jobs, just check if CB address is valid */
if (hl_mem_area_inside_range(
(u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->sram_user_base_address,
asic_prop->sram_end_address))
return 0;
if (hl_mem_area_inside_range(
(u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->dram_user_base_address,
asic_prop->dram_end_address))
return 0;
dev_err(hdev->dev,
"Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
}
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct goya_device *goya = hdev->asic_specific;
if (parser->queue_type == QUEUE_TYPE_INT)
return goya_parse_cb_no_ext_queue(hdev, parser);
if (goya->hw_cap_initialized & HW_CAP_MMU)
return goya_parse_cb_mmu(hdev, parser);
else
return goya_parse_cb_no_mmu(hdev, parser);
}
void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
u32 msix_vec, bool eb)
{
struct packet_msg_prot *cq_pkt;
u32 tmp;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_CTL_EB_SHIFT) |
(1 << GOYA_PKT_CTL_MB_SHIFT);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(cq_val);
cq_pkt->addr = cpu_to_le64(cq_addr);
cq_pkt++;
tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_CTL_MB_SHIFT);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
}
void goya_update_eq_ci(struct hl_device *hdev, u32 val)
{
WREG32(mmCPU_EQ_CI, val);
}
void goya_restore_phase_topology(struct hl_device *hdev)
{
}
static void goya_clear_sm_regs(struct hl_device *hdev)
{
int i, num_of_sob_in_longs, num_of_mon_in_longs;
num_of_sob_in_longs =
((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
num_of_mon_in_longs =
((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
/* Flush all WREG to prevent race */
i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
}
static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
{
dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
return -EPERM;
}
static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
if (hdev->reset_info.hard_reset_pending)
return U64_MAX;
return readq(hdev->pcie_bar[DDR_BAR_ID] +
(addr - goya->ddr_bar_cur_addr));
}
static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct goya_device *goya = hdev->asic_specific;
if (hdev->reset_info.hard_reset_pending)
return;
writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
(addr - goya->ddr_bar_cur_addr));
}
static const char *_goya_get_event_desc(u16 event_type)
{
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_PCIE_IF:
return "PCIe_if";
case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
return "TPC%d_ecc";
case GOYA_ASYNC_EVENT_ID_MME_ECC:
return "MME_ecc";
case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
return "MME_ecc_ext";
case GOYA_ASYNC_EVENT_ID_MMU_ECC:
return "MMU_ecc";
case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
return "DMA_macro";
case GOYA_ASYNC_EVENT_ID_DMA_ECC:
return "DMA_ecc";
case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
return "CPU_if_ecc";
case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
return "PSOC_mem";
case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
return "PSOC_coresight";
case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
return "SRAM%d";
case GOYA_ASYNC_EVENT_ID_GIC500:
return "GIC500";
case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
return "PLL%d";
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
return "AXI_ecc";
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
return "L2_ram_ecc";
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
return "PSOC_gpio_05_sw_reset";
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
return "PSOC_gpio_10_vrhot_icrit";
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
return "PCIe_dec";
case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
return "TPC%d_dec";
case GOYA_ASYNC_EVENT_ID_MME_WACS:
return "MME_wacs";
case GOYA_ASYNC_EVENT_ID_MME_WACSD:
return "MME_wacsd";
case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
return "CPU_axi_splitter";
case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
return "PSOC_axi_dec";
case GOYA_ASYNC_EVENT_ID_PSOC:
return "PSOC";
case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
return "TPC%d_krn_err";
case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
return "TPC%d_cq";
case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
return "TPC%d_qm";
case GOYA_ASYNC_EVENT_ID_MME_QM:
return "MME_qm";
case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
return "MME_cq";
case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
return "DMA%d_qm";
case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
return "DMA%d_ch";
case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
return "TPC%d_bmon_spmu";
case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
return "DMA_bm_ch%d";
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
return "POWER_ENV_S";
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
return "POWER_ENV_E";
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
return "THERMAL_ENV_S";
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
return "THERMAL_ENV_E";
case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
return "QUEUE_OUT_OF_SYNC";
default:
return "N/A";
}
}
static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
{
u8 index;
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
snprintf(desc, size, _goya_get_event_desc(event_type), index);
break;
case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
snprintf(desc, size, _goya_get_event_desc(event_type));
break;
default:
snprintf(desc, size, _goya_get_event_desc(event_type));
break;
}
}
static void goya_print_razwi_info(struct hl_device *hdev)
{
if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
}
}
static void goya_print_mmu_error_info(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u64 addr;
u32 val;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return;
val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
addr <<= 32;
addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
addr);
WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
}
}
static void goya_print_out_of_sync_info(struct hl_device *hdev,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
}
static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
bool razwi)
{
char desc[20] = "";
goya_get_event_desc(event_type, desc, sizeof(desc));
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (razwi) {
goya_print_razwi_info(hdev);
goya_print_mmu_error_info(hdev);
}
}
static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
size_t irq_arr_size)
{
struct cpucp_unmask_irq_arr_packet *pkt;
size_t total_pkt_size;
u64 result;
int rc;
int irq_num_entries, irq_arr_index;
__le32 *goya_irq_arr;
total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
irq_arr_size;
/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
/* total_pkt_size is casted to u16 later on */
if (total_pkt_size > USHRT_MAX) {
dev_err(hdev->dev, "too many elements in IRQ array\n");
return -EINVAL;
}
pkt = kzalloc(total_pkt_size, GFP_KERNEL);
if (!pkt)
return -ENOMEM;
irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
pkt->length = cpu_to_le32(irq_num_entries);
/* We must perform any necessary endianness conversation on the irq
* array being passed to the goya hardware
*/
for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
irq_arr_index < irq_num_entries ; irq_arr_index++)
goya_irq_arr[irq_arr_index] =
cpu_to_le32(irq_arr[irq_arr_index]);
pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
kfree(pkt);
return rc;
}
static int goya_compute_reset_late_init(struct hl_device *hdev)
{
/*
* Unmask all IRQs since some could have been received
* during the soft reset
*/
return goya_unmask_irq_arr(hdev, goya_all_events,
sizeof(goya_all_events));
}
static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
return rc;
}
static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
{
ktime_t zero_time = ktime_set(0, 0);
mutex_lock(&hdev->clk_throttling.lock);
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
default:
dev_err(hdev->dev, "Received invalid clock change event %d\n",
event_type);
break;
}
mutex_unlock(&hdev->clk_throttling.lock);
}
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
struct goya_device *goya = hdev->asic_specific;
if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
return;
}
goya->events_stat[event_type]++;
goya->events_stat_aggregate[event_type]++;
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_PCIE_IF:
case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
case GOYA_ASYNC_EVENT_ID_MME_ECC:
case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
case GOYA_ASYNC_EVENT_ID_MMU_ECC:
case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
case GOYA_ASYNC_EVENT_ID_DMA_ECC:
case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
case GOYA_ASYNC_EVENT_ID_GIC500:
case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
hl_device_reset(hdev, (HL_DRV_RESET_HARD |
HL_DRV_RESET_FW_FATAL_ERR));
break;
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
hl_device_reset(hdev, HL_DRV_RESET_HARD);
break;
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
case GOYA_ASYNC_EVENT_ID_MME_WACS:
case GOYA_ASYNC_EVENT_ID_MME_WACSD:
case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
case GOYA_ASYNC_EVENT_ID_PSOC:
case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
case GOYA_ASYNC_EVENT_ID_MME_QM:
case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
goya_print_irq_info(hdev, event_type, true);
goya_unmask_irq(hdev, event_type);
break;
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
goya_print_irq_info(hdev, event_type, false);
goya_unmask_irq(hdev, event_type);
break;
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
goya_print_clk_change_info(hdev, event_type);
goya_unmask_irq(hdev, event_type);
break;
case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
goya_print_irq_info(hdev, event_type, false);
goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
if (hdev->hard_reset_on_fw_events)
hl_device_reset(hdev, HL_DRV_RESET_HARD);
else
hl_fw_unmask_irq(hdev, event_type);
break;
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
break;
}
}
void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
{
struct goya_device *goya = hdev->asic_specific;
if (aggregate) {
*size = (u32) sizeof(goya->events_stat_aggregate);
return goya->events_stat_aggregate;
}
*size = (u32) sizeof(goya->events_stat);
return goya->events_stat;
}
static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
u64 val, bool is_dram)
{
struct packet_lin_dma *lin_dma_pkt;
struct hl_cs_job *job;
u32 cb_size, ctl;
struct hl_cb *cb;
int rc, lin_dma_pkts_cnt;
lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
sizeof(struct packet_msg_prot);
cb = hl_cb_kernel_create(hdev, cb_size, false);
if (!cb)
return -ENOMEM;
lin_dma_pkt = cb->kernel_address;
do {
memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
(1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
(1 << GOYA_PKT_CTL_RB_SHIFT) |
(1 << GOYA_PKT_CTL_MB_SHIFT));
ctl |= (is_dram ? HL_DMA_HOST_TO_DRAM : HL_DMA_HOST_TO_SRAM) <<
GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
lin_dma_pkt->ctl = cpu_to_le32(ctl);
lin_dma_pkt->src_addr = cpu_to_le64(val);
lin_dma_pkt->dst_addr = cpu_to_le64(addr);
if (lin_dma_pkts_cnt > 1)
lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
else
lin_dma_pkt->tsize = cpu_to_le32(size);
size -= SZ_2G;
addr += SZ_2G;
lin_dma_pkt++;
} while (--lin_dma_pkts_cnt);
job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto release_cb;
}
job->id = 0;
job->user_cb = cb;
atomic_inc(&job->user_cb->cs_cnt);
job->user_cb_size = cb_size;
job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size;
hl_debugfs_add_job(hdev, job);
rc = goya_send_job_on_qman0(hdev, job);
hl_debugfs_remove_job(hdev, job);
kfree(job);
atomic_dec(&cb->cs_cnt);
release_cb:
hl_cb_put(cb);
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
return rc;
}
int goya_context_switch(struct hl_device *hdev, u32 asid)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 addr = prop->sram_base_address, sob_addr;
u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
u64 val = 0x7777777777777777ull;
int rc, dma_id;
u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
mmDMA_CH_0_WR_COMP_ADDR_LO;
rc = goya_memset_device_memory(hdev, addr, size, val, false);
if (rc) {
dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
return rc;
}
/* we need to reset registers that the user is allowed to change */
sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
(dma_id - 1) * 4;
WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
lower_32_bits(sob_addr));
}
WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
goya_clear_sm_regs(hdev);
return 0;
}
static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
u64 addr = prop->mmu_pgt_addr;
u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
MMU_CACHE_MNG_SIZE;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return 0;
return goya_memset_device_memory(hdev, addr, size, 0, true);
}
static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
u64 val = 0x9999999999999999ull;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return 0;
return goya_memset_device_memory(hdev, addr, size, val, true);
}
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
s64 off, cpu_off;
int rc;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return 0;
for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
rc = hl_mmu_map_page(hdev->kernel_ctx,
prop->dram_base_address + off,
prop->dram_base_address + off, PAGE_SIZE_2MB,
(off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
if (rc) {
dev_err(hdev->dev, "Map failed for address 0x%llx\n",
prop->dram_base_address + off);
goto unmap;
}
}
if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
rc = hl_mmu_map_page(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR,
hdev->cpu_accessible_dma_address,
PAGE_SIZE_2MB, true);
if (rc) {
dev_err(hdev->dev,
"Map failed for CPU accessible memory\n");
off -= PAGE_SIZE_2MB;
goto unmap;
}
} else {
for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
rc = hl_mmu_map_page(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
hdev->cpu_accessible_dma_address + cpu_off,
PAGE_SIZE_4KB, true);
if (rc) {
dev_err(hdev->dev,
"Map failed for CPU accessible memory\n");
cpu_off -= PAGE_SIZE_4KB;
goto unmap_cpu;
}
}
}
goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
/* Make sure configuration is flushed to device */
RREG32(mmCPU_IF_AWUSER_OVR_EN);
goya->device_cpu_mmu_mappings_done = true;
return 0;
unmap_cpu:
for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
if (hl_mmu_unmap_page(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
PAGE_SIZE_4KB, true))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
unmap:
for (; off >= 0 ; off -= PAGE_SIZE_2MB)
if (hl_mmu_unmap_page(hdev->kernel_ctx,
prop->dram_base_address + off, PAGE_SIZE_2MB,
true))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
prop->dram_base_address + off);
return rc;
}
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct goya_device *goya = hdev->asic_specific;
u32 off, cpu_off;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return;
if (!goya->device_cpu_mmu_mappings_done)
return;
WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
if (hl_mmu_unmap_page(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR,
PAGE_SIZE_2MB, true))
dev_warn(hdev->dev,
"Failed to unmap CPU accessible memory\n");
} else {
for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
if (hl_mmu_unmap_page(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
PAGE_SIZE_4KB,
(cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
}
for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
if (hl_mmu_unmap_page(hdev->kernel_ctx,
prop->dram_base_address + off, PAGE_SIZE_2MB,
(off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
dev_warn_ratelimited(hdev->dev,
"Failed to unmap address 0x%llx\n",
prop->dram_base_address + off);
goya->device_cpu_mmu_mappings_done = false;
}
static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
{
struct goya_device *goya = hdev->asic_specific;
int i;
if (!(goya->hw_cap_initialized & HW_CAP_MMU))
return;
if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
dev_crit(hdev->dev, "asid %u is too big\n", asid);
return;
}
/* zero the MMBP and ASID bits and then set the ASID */
for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec;
int rc;
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->reset_info.hard_reset_pending)
return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_ALL_START, 1);
rc = hl_poll_timeout(
hdev,
mmSTLB_INV_ALL_START,
status,
!status,
1000,
timeout_usec);
return rc;
}
static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
bool is_hard, u32 flags,
u32 asid, u64 va, u64 size)
{
/* Treat as invalidate all because there is no range invalidation
* in Goya
*/
return hl_mmu_invalidate_cache(hdev, is_hard, flags);
}
int goya_send_heartbeat(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_send_heartbeat(hdev);
}
int goya_cpucp_info_get(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 dram_size;
int rc;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
mmCPU_BOOT_ERR1);
if (rc)
return rc;
dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
if (dram_size) {
if ((!is_power_of_2(dram_size)) ||
(dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
dev_err(hdev->dev,
"F/W reported invalid DRAM size %llu. Trying to use default size\n",
dram_size);
dram_size = DRAM_PHYS_DEFAULT_SIZE;
}
prop->dram_size = dram_size;
prop->dram_end_address = prop->dram_base_address + dram_size;
}
if (!strlen(prop->cpucp_info.card_name))
strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
return 0;
}
static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
unsigned long *mask = (unsigned long *)mask_arr;
u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
mme_arch_sts;
bool is_idle = true, is_eng_idle;
u64 offset;
int i;
if (e)
hl_engine_data_sprintf(e, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
"--- ------- ------------ -------------\n");
offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
for (i = 0 ; i < DMA_MAX_NUM ; i++) {
qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
IS_DMA_IDLE(dma_core_sts0);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
if (e)
hl_engine_data_sprintf(e, dma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0);
}
if (e)
hl_engine_data_sprintf(e,
"\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
"--- ------- ------------ -------------- ----------\n");
offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
for (i = 0 ; i < TPC_MAX_NUM ; i++) {
qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
IS_TPC_IDLE(tpc_cfg_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
if (e)
hl_engine_data_sprintf(e, fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
}
if (e)
hl_engine_data_sprintf(e,
"\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
"--- ------- ------------ -------------- -----------\n");
qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
IS_MME_IDLE(mme_arch_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_MME_0, mask);
if (e) {
hl_engine_data_sprintf(e, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
cmdq_glbl_sts0, mme_arch_sts);
hl_engine_data_sprintf(e, "\n");
}
return is_idle;
}
static void goya_hw_queues_lock(struct hl_device *hdev)
__acquires(&goya->hw_queues_lock)
{
struct goya_device *goya = hdev->asic_specific;
spin_lock(&goya->hw_queues_lock);
}
static void goya_hw_queues_unlock(struct hl_device *hdev)
__releases(&goya->hw_queues_lock)
{
struct goya_device *goya = hdev->asic_specific;
spin_unlock(&goya->hw_queues_lock);
}
static u32 goya_get_pci_id(struct hl_device *hdev)
{
return hdev->pdev->device;
}
static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
size_t max_size)
{
struct goya_device *goya = hdev->asic_specific;
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_get_eeprom_data(hdev, data, max_size);
}
static void goya_cpu_init_scrambler_dram(struct hl_device *hdev)
{
}
static int goya_ctx_init(struct hl_ctx *ctx)
{
if (ctx->asid != HL_KERNEL_ASID_ID)
goya_mmu_prepare(ctx->hdev, ctx->asid);
return 0;
}
static int goya_pre_schedule_cs(struct hl_cs *cs)
{
return 0;
}
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
return cq_idx;
}
static u32 goya_get_signal_cb_size(struct hl_device *hdev)
{
return 0;
}
static u32 goya_get_wait_cb_size(struct hl_device *hdev)
{
return 0;
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb)
{
return 0;
}
static u32 goya_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop)
{
return 0;
}
static void goya_reset_sob(struct hl_device *hdev, void *data)
{
}
static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
{
}
u64 goya_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
static int goya_collective_wait_init_cs(struct hl_cs *cs)
{
return 0;
}
static int goya_collective_wait_create_jobs(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
u32 collective_engine_id, u32 encaps_signal_offset)
{
return -EINVAL;
}
static void goya_ctx_fini(struct hl_ctx *ctx)
{
}
static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
u32 *block_size, u32 *block_id)
{
return -EPERM;
}
static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
u32 block_id, u32 block_size)
{
return -EPERM;
}
static void goya_enable_events_from_fw(struct hl_device *hdev)
{
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
}
static int goya_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
{
return -EINVAL;
}
static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
{
switch (pll_idx) {
case HL_GOYA_CPU_PLL: return CPU_PLL;
case HL_GOYA_PCI_PLL: return PCI_PLL;
case HL_GOYA_MME_PLL: return MME_PLL;
case HL_GOYA_TPC_PLL: return TPC_PLL;
case HL_GOYA_IC_PLL: return IC_PLL;
case HL_GOYA_MC_PLL: return MC_PLL;
case HL_GOYA_EMMC_PLL: return EMMC_PLL;
default: return -EINVAL;
}
}
static int goya_gen_sync_to_engine_map(struct hl_device *hdev,
struct hl_sync_to_engine_map *map)
{
/* Not implemented */
return 0;
}
static int goya_monitor_valid(struct hl_mon_state_dump *mon)
{
/* Not implemented */
return 0;
}
static int goya_print_single_monitor(char **buf, size_t *size, size_t *offset,
struct hl_device *hdev,
struct hl_mon_state_dump *mon)
{
/* Not implemented */
return 0;
}
static int goya_print_fences_single_engine(
struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
size_t *size, size_t *offset)
{
/* Not implemented */
return 0;
}
static struct hl_state_dump_specs_funcs goya_state_dump_funcs = {
.monitor_valid = goya_monitor_valid,
.print_single_monitor = goya_print_single_monitor,
.gen_sync_to_engine_map = goya_gen_sync_to_engine_map,
.print_fences_single_engine = goya_print_fences_single_engine,
};
static void goya_state_dump_init(struct hl_device *hdev)
{
/* Not implemented */
hdev->state_dump_specs.props = goya_state_dump_specs_props;
hdev->state_dump_specs.funcs = goya_state_dump_funcs;
}
static u32 goya_get_sob_addr(struct hl_device *hdev, u32 sob_id)
{
return 0;
}
static u32 *goya_get_stream_master_qid_arr(void)
{
return NULL;
}
static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
{
return -EOPNOTSUPP;
}
static void goya_check_if_razwi_happened(struct hl_device *hdev)
{
}
static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
{
return -EOPNOTSUPP;
}
static int goya_set_dram_properties(struct hl_device *hdev)
{
return 0;
}
static int goya_set_binning_masks(struct hl_device *hdev)
{
return 0;
}
static int goya_send_device_activity(struct hl_device *hdev, bool open)
{
return 0;
}
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
.late_init = goya_late_init,
.late_fini = goya_late_fini,
.sw_init = goya_sw_init,
.sw_fini = goya_sw_fini,
.hw_init = goya_hw_init,
.hw_fini = goya_hw_fini,
.halt_engines = goya_halt_engines,
.suspend = goya_suspend,
.resume = goya_resume,
.mmap = goya_mmap,
.ring_doorbell = goya_ring_doorbell,
.pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
.asic_dma_free_coherent = goya_dma_free_coherent,
.scrub_device_mem = goya_scrub_device_mem,
.scrub_device_dram = goya_scrub_device_dram,
.get_int_queue_base = goya_get_int_queue_base,
.test_queues = goya_test_queues,
.asic_dma_pool_zalloc = goya_dma_pool_zalloc,
.asic_dma_pool_free = goya_dma_pool_free,
.cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = goya_cs_parser,
.asic_dma_map_sgtable = hl_dma_map_sgtable,
.add_end_of_cb_packets = goya_add_end_of_cb_packets,
.update_eq_ci = goya_update_eq_ci,
.context_switch = goya_context_switch,
.restore_phase_topology = goya_restore_phase_topology,
.debugfs_read_dma = goya_debugfs_read_dma,
.add_device_attr = goya_add_device_attr,
.handle_eqe = goya_handle_eqe,
.get_events_stat = goya_get_events_stat,
.read_pte = goya_read_pte,
.write_pte = goya_write_pte,
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.mmu_prefetch_cache_range = NULL,
.send_heartbeat = goya_send_heartbeat,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
.compute_reset_late_init = goya_compute_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
.hw_queues_unlock = goya_hw_queues_unlock,
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
.get_monitor_dump = goya_get_monitor_dump,
.send_cpu_message = goya_send_cpu_message,
.pci_bars_map = goya_pci_bars_map,
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = goya_halt_coresight,
.ctx_init = goya_ctx_init,
.ctx_fini = goya_ctx_fini,
.pre_schedule_cs = goya_pre_schedule_cs,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
.get_signal_cb_size = goya_get_signal_cb_size,
.get_wait_cb_size = goya_get_wait_cb_size,
.gen_signal_cb = goya_gen_signal_cb,
.gen_wait_cb = goya_gen_wait_cb,
.reset_sob = goya_reset_sob,
.reset_sob_group = goya_reset_sob_group,
.get_device_time = goya_get_device_time,
.pb_print_security_errors = NULL,
.collective_wait_init_cs = goya_collective_wait_init_cs,
.collective_wait_create_jobs = goya_collective_wait_create_jobs,
.get_dec_base_addr = NULL,
.scramble_addr = hl_mmu_scramble_addr,
.descramble_addr = hl_mmu_descramble_addr,
.ack_protection_bits_errors = goya_ack_protection_bits_errors,
.get_hw_block_id = goya_get_hw_block_id,
.hw_block_mmap = goya_block_mmap,
.enable_events_from_fw = goya_enable_events_from_fw,
.ack_mmu_errors = goya_ack_mmu_page_fault_or_access_error,
.map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
.init_firmware_preload_params = goya_init_firmware_preload_params,
.init_firmware_loader = goya_init_firmware_loader,
.init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
.state_dump_init = goya_state_dump_init,
.get_sob_addr = &goya_get_sob_addr,
.set_pci_memory_regions = goya_set_pci_memory_regions,
.get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
.check_if_razwi_happened = goya_check_if_razwi_happened,
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = goya_set_ddr_bar_base,
.send_device_activity = goya_send_device_activity,
.set_dram_properties = goya_set_dram_properties,
.set_binning_masks = goya_set_binning_masks,
};
/*
* goya_set_asic_funcs - set Goya function pointers
*
* @*hdev: pointer to hl_device structure
*
*/
void goya_set_asic_funcs(struct hl_device *hdev)
{
hdev->asic_funcs = &goya_funcs;
}
| linux-master | drivers/accel/habanalabs/goya/goya.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "goyaP.h"
#include "../include/goya/goya_coresight.h"
#include "../include/goya/asic_reg/goya_regs.h"
#include "../include/goya/asic_reg/goya_masks.h"
#include <uapi/drm/habanalabs_accel.h>
#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
#define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET
#define SPMU_EVENT_TYPES_OFFSET 0x400
#define SPMU_MAX_COUNTERS 6
static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
[GOYA_STM_CPU] = mmCPU_STM_BASE,
[GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
[GOYA_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
[GOYA_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
[GOYA_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
[GOYA_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
[GOYA_STM_DMA_MACRO_CS] = mmDMA_MACRO_CS_STM_BASE,
[GOYA_STM_MME1_SBA] = mmMME1_SBA_STM_BASE,
[GOYA_STM_MME3_SBB] = mmMME3_SBB_STM_BASE,
[GOYA_STM_MME4_WACS2] = mmMME4_WACS2_STM_BASE,
[GOYA_STM_MME4_WACS] = mmMME4_WACS_STM_BASE,
[GOYA_STM_MMU_CS] = mmMMU_CS_STM_BASE,
[GOYA_STM_PCIE] = mmPCIE_STM_BASE,
[GOYA_STM_PSOC] = mmPSOC_STM_BASE,
[GOYA_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
[GOYA_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
[GOYA_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
[GOYA_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
[GOYA_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
[GOYA_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
[GOYA_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
[GOYA_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
};
static u64 debug_etf_regs[GOYA_ETF_LAST + 1] = {
[GOYA_ETF_CPU_0] = mmCPU_ETF_0_BASE,
[GOYA_ETF_CPU_1] = mmCPU_ETF_1_BASE,
[GOYA_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
[GOYA_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
[GOYA_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
[GOYA_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
[GOYA_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
[GOYA_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
[GOYA_ETF_DMA_MACRO_CS] = mmDMA_MACRO_CS_ETF_BASE,
[GOYA_ETF_MME1_SBA] = mmMME1_SBA_ETF_BASE,
[GOYA_ETF_MME3_SBB] = mmMME3_SBB_ETF_BASE,
[GOYA_ETF_MME4_WACS2] = mmMME4_WACS2_ETF_BASE,
[GOYA_ETF_MME4_WACS] = mmMME4_WACS_ETF_BASE,
[GOYA_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
[GOYA_ETF_PCIE] = mmPCIE_ETF_BASE,
[GOYA_ETF_PSOC] = mmPSOC_ETF_BASE,
[GOYA_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
[GOYA_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
[GOYA_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
[GOYA_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
[GOYA_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
[GOYA_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
[GOYA_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
[GOYA_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
};
static u64 debug_funnel_regs[GOYA_FUNNEL_LAST + 1] = {
[GOYA_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
[GOYA_FUNNEL_DMA_CH_6_1] = mmDMA_CH_FUNNEL_6_1_BASE,
[GOYA_FUNNEL_DMA_MACRO_3_1] = mmDMA_MACRO_FUNNEL_3_1_BASE,
[GOYA_FUNNEL_MME0_RTR] = mmMME0_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_MME1_RTR] = mmMME1_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_MME2_RTR] = mmMME2_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_MME3_RTR] = mmMME3_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_MME4_RTR] = mmMME4_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_MME5_RTR] = mmMME5_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
[GOYA_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
[GOYA_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC1_RTR] = mmTPC1_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC2_RTR] = mmTPC2_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC3_RTR] = mmTPC3_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC4_RTR] = mmTPC4_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC5_RTR] = mmTPC5_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
[GOYA_FUNNEL_TPC6_RTR] = mmTPC6_RTR_FUNNEL_BASE,
[GOYA_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
};
static u64 debug_bmon_regs[GOYA_BMON_LAST + 1] = {
[GOYA_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
[GOYA_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
[GOYA_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
[GOYA_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
[GOYA_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
[GOYA_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
[GOYA_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
[GOYA_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
[GOYA_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
[GOYA_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
[GOYA_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
[GOYA_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
[GOYA_BMON_DMA_MACRO_0] = mmDMA_MACRO_BMON_0_BASE,
[GOYA_BMON_DMA_MACRO_1] = mmDMA_MACRO_BMON_1_BASE,
[GOYA_BMON_DMA_MACRO_2] = mmDMA_MACRO_BMON_2_BASE,
[GOYA_BMON_DMA_MACRO_3] = mmDMA_MACRO_BMON_3_BASE,
[GOYA_BMON_DMA_MACRO_4] = mmDMA_MACRO_BMON_4_BASE,
[GOYA_BMON_DMA_MACRO_5] = mmDMA_MACRO_BMON_5_BASE,
[GOYA_BMON_DMA_MACRO_6] = mmDMA_MACRO_BMON_6_BASE,
[GOYA_BMON_DMA_MACRO_7] = mmDMA_MACRO_BMON_7_BASE,
[GOYA_BMON_MME1_SBA_0] = mmMME1_SBA_BMON0_BASE,
[GOYA_BMON_MME1_SBA_1] = mmMME1_SBA_BMON1_BASE,
[GOYA_BMON_MME3_SBB_0] = mmMME3_SBB_BMON0_BASE,
[GOYA_BMON_MME3_SBB_1] = mmMME3_SBB_BMON1_BASE,
[GOYA_BMON_MME4_WACS2_0] = mmMME4_WACS2_BMON0_BASE,
[GOYA_BMON_MME4_WACS2_1] = mmMME4_WACS2_BMON1_BASE,
[GOYA_BMON_MME4_WACS2_2] = mmMME4_WACS2_BMON2_BASE,
[GOYA_BMON_MME4_WACS_0] = mmMME4_WACS_BMON0_BASE,
[GOYA_BMON_MME4_WACS_1] = mmMME4_WACS_BMON1_BASE,
[GOYA_BMON_MME4_WACS_2] = mmMME4_WACS_BMON2_BASE,
[GOYA_BMON_MME4_WACS_3] = mmMME4_WACS_BMON3_BASE,
[GOYA_BMON_MME4_WACS_4] = mmMME4_WACS_BMON4_BASE,
[GOYA_BMON_MME4_WACS_5] = mmMME4_WACS_BMON5_BASE,
[GOYA_BMON_MME4_WACS_6] = mmMME4_WACS_BMON6_BASE,
[GOYA_BMON_MMU_0] = mmMMU_BMON_0_BASE,
[GOYA_BMON_MMU_1] = mmMMU_BMON_1_BASE,
[GOYA_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
[GOYA_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
[GOYA_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
[GOYA_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
[GOYA_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
[GOYA_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
[GOYA_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
[GOYA_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
[GOYA_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
};
static u64 debug_spmu_regs[GOYA_SPMU_LAST + 1] = {
[GOYA_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
[GOYA_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
[GOYA_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
[GOYA_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
[GOYA_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
[GOYA_SPMU_DMA_MACRO_CS] = mmDMA_MACRO_CS_SPMU_BASE,
[GOYA_SPMU_MME1_SBA] = mmMME1_SBA_SPMU_BASE,
[GOYA_SPMU_MME3_SBB] = mmMME3_SBB_SPMU_BASE,
[GOYA_SPMU_MME4_WACS2] = mmMME4_WACS2_SPMU_BASE,
[GOYA_SPMU_MME4_WACS] = mmMME4_WACS_SPMU_BASE,
[GOYA_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
[GOYA_SPMU_PCIE] = mmPCIE_SPMU_BASE,
[GOYA_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
[GOYA_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
[GOYA_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
[GOYA_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
[GOYA_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
[GOYA_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
[GOYA_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
[GOYA_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
};
static int goya_coresight_timeout(struct hl_device *hdev, u64 addr,
int position, bool up)
{
int rc;
u32 val, timeout_usec;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_CORESIGHT_TIMEOUT_USEC;
else
timeout_usec = CORESIGHT_TIMEOUT_USEC;
rc = hl_poll_timeout(
hdev,
addr,
val,
up ? val & BIT(position) : !(val & BIT(position)),
1000,
timeout_usec);
if (rc) {
dev_err(hdev->dev,
"Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
addr, position, up);
return -EFAULT;
}
return 0;
}
static int goya_config_stm(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_stm *input;
u64 base_reg;
u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
dev_err(hdev->dev, "Invalid register index in STM\n");
return -EINVAL;
}
base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + 0xE80, 0x80004);
WREG32(base_reg + 0xD64, 7);
WREG32(base_reg + 0xD60, 0);
WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
WREG32(base_reg + 0xD20, lower_32_bits(input->sp_mask));
WREG32(base_reg + 0xD60, 1);
WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
WREG32(base_reg + 0xD20, upper_32_bits(input->sp_mask));
WREG32(base_reg + 0xE70, 0x10);
WREG32(base_reg + 0xE60, 0);
WREG32(base_reg + 0xE64, 0x420000);
WREG32(base_reg + 0xE00, 0xFFFFFFFF);
WREG32(base_reg + 0xE20, 0xFFFFFFFF);
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
frequency = hdev->asic_prop.psoc_timestamp_frequency;
if (frequency == 0)
frequency = input->frequency;
WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
} else {
WREG32(base_reg + 0xE80, 4);
WREG32(base_reg + 0xD64, 0);
WREG32(base_reg + 0xD60, 1);
WREG32(base_reg + 0xD00, 0);
WREG32(base_reg + 0xD20, 0);
WREG32(base_reg + 0xD60, 0);
WREG32(base_reg + 0xE20, 0);
WREG32(base_reg + 0xE00, 0);
WREG32(base_reg + 0xDF4, 0x80);
WREG32(base_reg + 0xE70, 0);
WREG32(base_reg + 0xE60, 0);
WREG32(base_reg + 0xE64, 0);
WREG32(base_reg + 0xE8C, 0);
rc = goya_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
if (rc) {
dev_err(hdev->dev,
"Failed to disable STM on timeout, error %d\n",
rc);
return rc;
}
WREG32(base_reg + 0xE80, 4);
}
return 0;
}
static int goya_config_etf(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etf *input;
u64 base_reg;
u32 val;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
dev_err(hdev->dev, "Invalid register index in ETF\n");
return -EINVAL;
}
base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
val = RREG32(base_reg + 0x304);
val |= 0x1000;
WREG32(base_reg + 0x304, val);
val |= 0x40;
WREG32(base_reg + 0x304, val);
rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
if (rc) {
dev_err(hdev->dev,
"Failed to %s ETF on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
if (rc) {
dev_err(hdev->dev,
"Failed to %s ETF on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
WREG32(base_reg + 0x20, 0);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + 0x34, 0x3FFC);
WREG32(base_reg + 0x28, input->sink_mode);
WREG32(base_reg + 0x304, 0x4001);
WREG32(base_reg + 0x308, 0xA);
WREG32(base_reg + 0x20, 1);
} else {
WREG32(base_reg + 0x34, 0);
WREG32(base_reg + 0x28, 0);
WREG32(base_reg + 0x304, 0);
}
return 0;
}
static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 range_start, range_end;
if (addr > (addr + size)) {
dev_err(hdev->dev,
"ETR buffer size %llu overflow\n", size);
return false;
}
range_start = prop->dmmu.start_addr;
range_end = prop->dmmu.end_addr;
return hl_mem_area_inside_range(addr, size, range_start, range_end);
}
static int goya_config_etr(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
u32 val;
int rc;
WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
WREG32(mmPSOC_ETR_FFCR, val);
rc = goya_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
rc = goya_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
if (input->buffer_size == 0) {
dev_err(hdev->dev,
"ETR buffer size should be bigger than 0\n");
return -EINVAL;
}
if (!goya_etr_validate_address(hdev,
input->buffer_address, input->buffer_size)) {
dev_err(hdev->dev, "buffer address is not valid\n");
return -EINVAL;
}
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
if (!hdev->asic_prop.fw_security_enabled) {
/* make ETR not privileged */
val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
/* make ETR non-secured (inverted logic) */
val |= FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
/* burst size 8 */
val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, 7);
WREG32(mmPSOC_ETR_AXICTL, val);
}
WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI,
upper_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_FFCR, 3);
WREG32(mmPSOC_ETR_PSCR, 0xA);
WREG32(mmPSOC_ETR_CTL, 1);
} else {
WREG32(mmPSOC_ETR_BUFWM, 0);
WREG32(mmPSOC_ETR_RSZ, 0x400);
WREG32(mmPSOC_ETR_DBALO, 0);
WREG32(mmPSOC_ETR_DBAHI, 0);
WREG32(mmPSOC_ETR_PSCR, 0);
WREG32(mmPSOC_ETR_MODE, 0);
WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
/*
* The trace buffer address is 40 bits wide. The end of
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
*/
rwp = RREG32(mmPSOC_ETR_RWP);
rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
*(u64 *) params->output = ((u64) rwphi << 32) | rwp;
}
}
return 0;
}
static int goya_config_funnel(struct hl_device *hdev,
struct hl_debug_params *params)
{
u64 base_reg;
if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
return -EINVAL;
}
base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
WREG32(base_reg, params->enable ? 0x33F : 0);
return 0;
}
static int goya_config_bmon(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_bmon *input;
u64 base_reg;
u32 pcie_base = 0;
if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
dev_err(hdev->dev, "Invalid register index in BMON\n");
return -EINVAL;
}
base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
WREG32(base_reg + 0x104, 1);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0));
WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0));
WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0));
WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0));
WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1));
WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1));
WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1));
WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1));
WREG32(base_reg + 0x224, 0);
WREG32(base_reg + 0x234, 0);
WREG32(base_reg + 0x30C, input->bw_win);
WREG32(base_reg + 0x308, input->win_capture);
/* PCIE IF BMON bug WA */
if (params->reg_idx != GOYA_BMON_PCIE_MSTR_RD &&
params->reg_idx != GOYA_BMON_PCIE_MSTR_WR &&
params->reg_idx != GOYA_BMON_PCIE_SLV_RD &&
params->reg_idx != GOYA_BMON_PCIE_SLV_WR)
pcie_base = 0xA000000;
WREG32(base_reg + 0x700, pcie_base | 0xB00 | (input->id << 12));
WREG32(base_reg + 0x708, pcie_base | 0xA00 | (input->id << 12));
WREG32(base_reg + 0x70C, pcie_base | 0xC00 | (input->id << 12));
WREG32(base_reg + 0x100, 0x11);
WREG32(base_reg + 0x304, 0x1);
} else {
WREG32(base_reg + 0x200, 0);
WREG32(base_reg + 0x204, 0);
WREG32(base_reg + 0x208, 0xFFFFFFFF);
WREG32(base_reg + 0x20C, 0xFFFFFFFF);
WREG32(base_reg + 0x240, 0);
WREG32(base_reg + 0x244, 0);
WREG32(base_reg + 0x248, 0xFFFFFFFF);
WREG32(base_reg + 0x24C, 0xFFFFFFFF);
WREG32(base_reg + 0x224, 0xFFFFFFFF);
WREG32(base_reg + 0x234, 0x1070F);
WREG32(base_reg + 0x30C, 0);
WREG32(base_reg + 0x308, 0xFFFF);
WREG32(base_reg + 0x700, 0xA000B00);
WREG32(base_reg + 0x708, 0xA000A00);
WREG32(base_reg + 0x70C, 0xA000C00);
WREG32(base_reg + 0x100, 1);
WREG32(base_reg + 0x304, 0);
WREG32(base_reg + 0x104, 0);
}
return 0;
}
static int goya_config_spmu(struct hl_device *hdev,
struct hl_debug_params *params)
{
u64 base_reg;
struct hl_debug_params_spmu *input = params->input;
u64 *output;
u32 output_arr_len;
u32 events_num;
u32 overflow_idx;
u32 cycle_cnt_idx;
int i;
if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
dev_err(hdev->dev, "Invalid register index in SPMU\n");
return -EINVAL;
}
base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
if (input->event_types_num < 3) {
dev_err(hdev->dev,
"not enough event types values for SPMU enable\n");
return -EINVAL;
}
if (input->event_types_num > SPMU_MAX_COUNTERS) {
dev_err(hdev->dev,
"too many event types values for SPMU enable\n");
return -EINVAL;
}
WREG32(base_reg + 0xE04, 0x41013046);
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < input->event_types_num ; i++)
WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
input->event_types[i]);
WREG32(base_reg + 0xE04, 0x41013041);
WREG32(base_reg + 0xC00, 0x8000003F);
} else {
output = params->output;
output_arr_len = params->output_size / 8;
events_num = output_arr_len - 2;
overflow_idx = output_arr_len - 2;
cycle_cnt_idx = output_arr_len - 1;
if (!output)
return -EINVAL;
if (output_arr_len < 3) {
dev_err(hdev->dev,
"not enough values for SPMU disable\n");
return -EINVAL;
}
if (events_num > SPMU_MAX_COUNTERS) {
dev_err(hdev->dev,
"too many events values for SPMU disable\n");
return -EINVAL;
}
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < events_num ; i++)
output[i] = RREG32(base_reg + i * 8);
output[overflow_idx] = RREG32(base_reg + 0xCC0);
output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
output[cycle_cnt_idx] <<= 32;
output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
WREG32(base_reg + 0xCC0, 0);
}
return 0;
}
int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
switch (params->op) {
case HL_DEBUG_OP_STM:
rc = goya_config_stm(hdev, params);
break;
case HL_DEBUG_OP_ETF:
rc = goya_config_etf(hdev, params);
break;
case HL_DEBUG_OP_ETR:
rc = goya_config_etr(hdev, params);
break;
case HL_DEBUG_OP_FUNNEL:
rc = goya_config_funnel(hdev, params);
break;
case HL_DEBUG_OP_BMON:
rc = goya_config_bmon(hdev, params);
break;
case HL_DEBUG_OP_SPMU:
rc = goya_config_spmu(hdev, params);
break;
case HL_DEBUG_OP_TIMESTAMP:
/* Do nothing as this opcode is deprecated */
break;
default:
dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
return -EINVAL;
}
/* Perform read from the device to flush all configuration */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return rc;
}
void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
for (i = GOYA_ETF_FIRST ; i <= GOYA_ETF_LAST ; i++) {
params.reg_idx = i;
rc = goya_config_etf(hdev, ¶ms);
if (rc)
dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
}
rc = goya_config_etr(hdev, ¶ms);
if (rc)
dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
}
| linux-master | drivers/accel/habanalabs/goya/goya_coresight.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "goyaP.h"
void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
{
struct goya_device *goya = hdev->asic_specific;
if (!hdev->pdev)
return;
switch (freq) {
case PLL_HIGH:
hl_fw_set_frequency(hdev, HL_GOYA_MME_PLL, hdev->high_pll);
hl_fw_set_frequency(hdev, HL_GOYA_TPC_PLL, hdev->high_pll);
hl_fw_set_frequency(hdev, HL_GOYA_IC_PLL, hdev->high_pll);
break;
case PLL_LOW:
hl_fw_set_frequency(hdev, HL_GOYA_MME_PLL, GOYA_PLL_FREQ_LOW);
hl_fw_set_frequency(hdev, HL_GOYA_TPC_PLL, GOYA_PLL_FREQ_LOW);
hl_fw_set_frequency(hdev, HL_GOYA_IC_PLL, GOYA_PLL_FREQ_LOW);
break;
case PLL_LAST:
hl_fw_set_frequency(hdev, HL_GOYA_MME_PLL, goya->mme_clk);
hl_fw_set_frequency(hdev, HL_GOYA_TPC_PLL, goya->tpc_clk);
hl_fw_set_frequency(hdev, HL_GOYA_IC_PLL, goya->ic_clk);
break;
default:
dev_err(hdev->dev, "unknown frequency setting\n");
}
}
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", value);
}
static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct goya_device *goya = hdev->asic_specific;
int rc;
long value;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto fail;
}
if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto fail;
}
hl_fw_set_frequency(hdev, HL_GOYA_MME_PLL, value);
goya->mme_clk = value;
fail:
return count;
}
static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, HL_GOYA_TPC_PLL, false);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", value);
}
static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct goya_device *goya = hdev->asic_specific;
int rc;
long value;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto fail;
}
if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto fail;
}
hl_fw_set_frequency(hdev, HL_GOYA_TPC_PLL, value);
goya->tpc_clk = value;
fail:
return count;
}
static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, HL_GOYA_IC_PLL, false);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", value);
}
static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct goya_device *goya = hdev->asic_specific;
int rc;
long value;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto fail;
}
if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto fail;
}
hl_fw_set_frequency(hdev, HL_GOYA_IC_PLL, value);
goya->ic_clk = value;
fail:
return count;
}
static ssize_t mme_clk_curr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", value);
}
static ssize_t tpc_clk_curr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, HL_GOYA_TPC_PLL, true);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", value);
}
static ssize_t ic_clk_curr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, HL_GOYA_IC_PLL, true);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", value);
}
static ssize_t pm_mng_profile_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct goya_device *goya = hdev->asic_specific;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
return sprintf(buf, "%s\n",
(goya->pm_mng_profile == PM_AUTO) ? "auto" :
(goya->pm_mng_profile == PM_MANUAL) ? "manual" :
"unknown");
}
static ssize_t pm_mng_profile_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct goya_device *goya = hdev->asic_specific;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto out;
}
mutex_lock(&hdev->fpriv_list_lock);
if (hdev->is_compute_ctx_active) {
dev_err(hdev->dev,
"Can't change PM profile while compute context is opened on the device\n");
count = -EPERM;
goto unlock_mutex;
}
if (strncmp("auto", buf, strlen("auto")) == 0) {
/* Make sure we are in LOW PLL when changing modes */
if (goya->pm_mng_profile == PM_MANUAL) {
goya->curr_pll_profile = PLL_HIGH;
goya->pm_mng_profile = PM_AUTO;
goya_set_frequency(hdev, PLL_LOW);
}
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
if (goya->pm_mng_profile == PM_AUTO) {
/* Must release the lock because the work thread also
* takes this lock. But before we release it, set
* the mode to manual so nothing will change if a user
* suddenly opens the device
*/
goya->pm_mng_profile = PM_MANUAL;
mutex_unlock(&hdev->fpriv_list_lock);
/* Flush the current work so we can return to the user
* knowing that he is the only one changing frequencies
*/
if (goya->goya_work)
flush_delayed_work(&goya->goya_work->work_freq);
return count;
}
} else {
dev_err(hdev->dev, "value should be auto or manual\n");
count = -EINVAL;
}
unlock_mutex:
mutex_unlock(&hdev->fpriv_list_lock);
out:
return count;
}
static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
return sprintf(buf, "%u\n", hdev->high_pll);
}
static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
int rc;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto out;
}
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto out;
}
hdev->high_pll = value;
out:
return count;
}
static DEVICE_ATTR_RW(high_pll);
static DEVICE_ATTR_RW(ic_clk);
static DEVICE_ATTR_RO(ic_clk_curr);
static DEVICE_ATTR_RW(mme_clk);
static DEVICE_ATTR_RO(mme_clk_curr);
static DEVICE_ATTR_RW(pm_mng_profile);
static DEVICE_ATTR_RW(tpc_clk);
static DEVICE_ATTR_RO(tpc_clk_curr);
static struct attribute *goya_clk_dev_attrs[] = {
&dev_attr_high_pll.attr,
&dev_attr_ic_clk.attr,
&dev_attr_ic_clk_curr.attr,
&dev_attr_mme_clk.attr,
&dev_attr_mme_clk_curr.attr,
&dev_attr_pm_mng_profile.attr,
&dev_attr_tpc_clk.attr,
&dev_attr_tpc_clk_curr.attr,
NULL,
};
static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct cpucp_info *cpucp_info;
cpucp_info = &hdev->asic_prop.cpucp_info;
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
}
static DEVICE_ATTR_RO(infineon_ver);
static struct attribute *goya_vrm_dev_attrs[] = {
&dev_attr_infineon_ver.attr,
NULL,
};
void goya_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
struct attribute_group *dev_vrm_attr_grp)
{
dev_clk_attr_grp->attrs = goya_clk_dev_attrs;
dev_vrm_attr_grp->attrs = goya_vrm_dev_attrs;
}
| linux-master | drivers/accel/habanalabs/goya/goya_hwmgr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "goyaP.h"
#include "../include/goya/asic_reg/goya_regs.h"
/*
* goya_set_block_as_protected - set the given block as protected
*
* @hdev: pointer to hl_device structure
* @block: block base address
*
*/
static void goya_pb_set_block(struct hl_device *hdev, u64 base)
{
u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
while (pb_addr & 0xFFF) {
WREG32(pb_addr, 0);
pb_addr += 4;
}
}
static void goya_init_mme_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
/* TODO: change to real reg name when Soc Online is updated */
u64 mmMME_SBB_POWER_ECO1 = 0xDFF60,
mmMME_SBB_POWER_ECO2 = 0xDFF64;
goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_0_BASE);
goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_1_BASE);
goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_2_BASE);
goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_3_BASE);
goya_pb_set_block(hdev, mmSBA_ECC_MEM_BASE);
goya_pb_set_block(hdev, mmSBB_ECC_MEM_BASE);
goya_pb_set_block(hdev, mmMME1_RTR_BASE);
goya_pb_set_block(hdev, mmMME1_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME1_WR_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME2_RTR_BASE);
goya_pb_set_block(hdev, mmMME2_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME2_WR_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME3_RTR_BASE);
goya_pb_set_block(hdev, mmMME3_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME3_WR_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME4_RTR_BASE);
goya_pb_set_block(hdev, mmMME4_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME4_WR_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME5_RTR_BASE);
goya_pb_set_block(hdev, mmMME5_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME5_WR_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME6_RTR_BASE);
goya_pb_set_block(hdev, mmMME6_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmMME6_WR_REGULATOR_BASE);
pb_addr = (mmMME_DUMMY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_DUMMY & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_DUMMY & 0x7F) >> 2);
mask |= 1 << ((mmMME_RESET & 0x7F) >> 2);
mask |= 1 << ((mmMME_STALL & 0x7F) >> 2);
mask |= 1 << ((mmMME_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmMME_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmMME_DBGMEM_ADD & 0x7F) >> 2);
mask |= 1 << ((mmMME_DBGMEM_DATA_WR & 0x7F) >> 2);
mask |= 1 << ((mmMME_DBGMEM_DATA_RD & 0x7F) >> 2);
mask |= 1 << ((mmMME_DBGMEM_CTRL & 0x7F) >> 2);
mask |= 1 << ((mmMME_DBGMEM_RC & 0x7F) >> 2);
mask |= 1 << ((mmMME_LOG_SHADOW & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_STORE_MAX_CREDIT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_STORE_MAX_CREDIT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_STORE_MAX_CREDIT & 0x7F) >> 2);
mask |= 1 << ((mmMME_AGU & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBA & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBB & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBC & 0x7F) >> 2);
mask |= 1 << ((mmMME_WBC & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBA_CONTROL_DATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBB_CONTROL_DATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBC_CONTROL_DATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_WBC_CONTROL_DATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_TE & 0x7F) >> 2);
mask |= 1 << ((mmMME_TE2DEC & 0x7F) >> 2);
mask |= 1 << ((mmMME_REI_STATUS & 0x7F) >> 2);
mask |= 1 << ((mmMME_REI_MASK & 0x7F) >> 2);
mask |= 1 << ((mmMME_SEI_STATUS & 0x7F) >> 2);
mask |= 1 << ((mmMME_SEI_MASK & 0x7F) >> 2);
mask |= 1 << ((mmMME_SPI_STATUS & 0x7F) >> 2);
mask |= 1 << ((mmMME_SPI_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_QM_CP_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_QM_CP_STS & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_QM_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_PQ_BUF_RDATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmMME_QM_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_CMDQ_CQ_IFIFO_CNT &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmMME_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmMME_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME_SBB_POWER_ECO1 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME_SBB_POWER_ECO1 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmMME_SBB_POWER_ECO1 & 0x7F) >> 2);
mask |= 1 << ((mmMME_SBB_POWER_ECO2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
static void goya_init_dma_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
goya_pb_set_block(hdev, mmDMA_NRTR_BASE);
goya_pb_set_block(hdev, mmDMA_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmDMA_WR_REGULATOR_BASE);
pb_addr = (mmDMA_QM_0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_0_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_0_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_0_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_0_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_0_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_0_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_0_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmDMA_CH_0_BASE);
pb_addr = (mmDMA_QM_1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_1_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_1_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_1_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_1_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_1_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_1_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_1_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmDMA_CH_1_BASE);
pb_addr = (mmDMA_QM_2_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_2_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_2_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_2_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_2_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_2_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_2_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_2_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_2_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmDMA_CH_2_BASE);
pb_addr = (mmDMA_QM_3_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_3_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_3_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_3_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_3_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_3_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_3_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_3_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_3_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmDMA_CH_3_BASE);
pb_addr = (mmDMA_QM_4_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_4_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_4_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_4_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_4_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_4_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA_QM_4_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA_QM_4_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmDMA_QM_4_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmDMA_CH_4_BASE);
}
static void goya_init_tpc_protection_bits(struct hl_device *hdev)
{
u32 pb_addr, mask;
u8 word_offset;
goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE);
pb_addr = (mmTPC0_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_FUNC_MBIST_CNTRL &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC0_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC1_RTR_BASE);
goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE);
pb_addr = (mmTPC1_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC1_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC2_RTR_BASE);
goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE);
pb_addr = (mmTPC2_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC2_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC3_RTR_BASE);
goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE);
pb_addr = (mmTPC3_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH
& PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_FUNC_MBIST_CNTRL
& PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC3_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC4_RTR_BASE);
goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE);
pb_addr = (mmTPC4_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_FUNC_MBIST_CNTRL &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC4_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC5_RTR_BASE);
goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE);
pb_addr = (mmTPC5_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_FUNC_MBIST_CNTRL &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC5_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC6_RTR_BASE);
goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE);
pb_addr = (mmTPC6_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_FUNC_MBIST_CNTRL &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC6_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
goya_pb_set_block(hdev, mmTPC7_NRTR_BASE);
goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE);
pb_addr = (mmTPC7_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CFG_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_AWUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_FUNC_MBIST_CNTRL &
PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_GLBL_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_SIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_PI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_CI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_ARUSER & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_QM_PQ_PUSH0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_PUSH1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_PUSH2 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_PUSH3 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_TSIZE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_CTL & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_PROT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CMDQ_CQ_CFG0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_CFG1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_ARUSER & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_STS0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_STS1 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC7_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_STS & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
<< 2;
mask = 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CP_DBG_0 & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
/*
* goya_init_protection_bits - Initialize protection bits for specific registers
*
* @hdev: pointer to hl_device structure
*
* All protection bits are 1 by default, means not protected. Need to set to 0
* each bit that belongs to a protected register.
*
*/
static void goya_init_protection_bits(struct hl_device *hdev)
{
/*
* In each 4K block of registers, the last 128 bytes are protection
* bits - total of 1024 bits, one for each register. Each bit is related
* to a specific register, by the order of the registers.
* So in order to calculate the bit that is related to a given register,
* we need to calculate its word offset and then the exact bit inside
* the word (which is 4 bytes).
*
* Register address:
*
* 31 12 11 7 6 2 1 0
* -----------------------------------------------------------------
* | Don't | word | bit location | 0 |
* | care | offset | inside word | |
* -----------------------------------------------------------------
*
* Bits 7-11 represents the word offset inside the 128 bytes.
* Bits 2-6 represents the bit location inside the word.
*/
u32 pb_addr, mask;
u8 word_offset;
goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmPCI_WR_REGULATOR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X0_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X0_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X1_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X1_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X2_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X2_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X3_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X3_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X4_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y0_X4_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X0_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X0_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X1_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X1_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X2_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X2_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X3_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X3_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X4_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y1_X4_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X0_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X0_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X1_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X1_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X2_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X2_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X3_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X3_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X4_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y2_X4_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X0_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X0_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X1_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X1_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X2_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X2_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X3_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X3_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X4_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y3_X4_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X0_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X0_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X1_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X1_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X2_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X2_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X3_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X3_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X4_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y4_X4_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X0_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X0_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X1_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X1_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X2_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X2_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X3_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X3_RTR_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X4_BANK_BASE);
goya_pb_set_block(hdev, mmSRAM_Y5_X4_RTR_BASE);
goya_pb_set_block(hdev, mmPCIE_WRAP_BASE);
goya_pb_set_block(hdev, mmPCIE_CORE_BASE);
goya_pb_set_block(hdev, mmPCIE_DB_CFG_BASE);
goya_pb_set_block(hdev, mmPCIE_DB_CMD_BASE);
goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
goya_pb_set_block(hdev, mmTPC0_NRTR_BASE);
goya_pb_set_block(hdev, mmTPC_PLL_BASE);
pb_addr = (mmTPC_PLL_CLK_RLX_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC_PLL_CLK_RLX_0 & PROT_BITS_OFFS) >> 7) << 2;
mask = 1 << ((mmTPC_PLL_CLK_RLX_0 & 0x7C) >> 2);
WREG32(pb_addr + word_offset, mask);
goya_init_mme_protection_bits(hdev);
goya_init_dma_protection_bits(hdev);
goya_init_tpc_protection_bits(hdev);
}
/*
* goya_init_security - Initialize security model
*
* @hdev: pointer to hl_device structure
*
* Initialize the security model of the device
* That includes range registers and protection bit per register
*
*/
void goya_init_security(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
u32 lbw_rng0_base = 0xFC440000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng0_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng1_base = 0xFC480000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng1_mask = 0xFFF80000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng2_base = 0xFC600000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng2_mask = 0xFFE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng3_base = 0xFC800000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng3_mask = 0xFFF00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng4_base = 0xFCC02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng4_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng5_base = 0xFCC40000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng5_mask = 0xFFFF8000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng6_base = 0xFCC48000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng6_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng7_base = 0xFCC4A000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng7_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng8_base = 0xFCC4C000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng8_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng9_base = 0xFCC50000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng9_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng11_base = 0xFCE02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng11_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng13_base = 0xFEC43000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
u32 lbw_rng13_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
WREG32(mmDMA_MACRO_LBW_RANGE_HIT_BLOCK, 0xFFFF);
WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFF);
if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFE);
/* Protect HOST */
WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_0, 0);
WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_0, 0);
WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_0, 0);
WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_0, 0xFFF80);
}
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_1, dram_addr_lo);
WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_1, dram_addr_hi);
WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_1, 0xE0000000);
WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_1, 0x3FFFF);
/* Protect registers */
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmDMA_MACRO_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmDMA_MACRO_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmMME1_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmMME2_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmMME3_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmMME4_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmMME5_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmMME6_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmMME1_RTR_HBW_RANGE_HIT, 0xFE);
WREG32(mmMME2_RTR_HBW_RANGE_HIT, 0xFE);
WREG32(mmMME3_RTR_HBW_RANGE_HIT, 0xFE);
WREG32(mmMME4_RTR_HBW_RANGE_HIT, 0xFE);
WREG32(mmMME5_RTR_HBW_RANGE_HIT, 0xFE);
WREG32(mmMME6_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmMME1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmMME1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmMME2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmMME2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmMME3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmMME3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmMME4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmMME4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmMME5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmMME5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmMME6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmMME6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC0_NRTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC1_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC2_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC3_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC4_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC5_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC6_RTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_HIT, 0xFFFF);
WREG32(mmTPC7_NRTR_HBW_RANGE_HIT, 0xFE);
/* Protect HOST */
WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_0, 0);
WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_0, 0);
WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_0, 0);
WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
/*
* Protect DDR @
* DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
* The mask protects the first 512MB
*/
WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
goya_init_protection_bits(hdev);
}
void goya_ack_protection_bits_errors(struct hl_device *hdev)
{
}
| linux-master | drivers/accel/habanalabs/goya/goya_security.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/slab.h>
/*
* hl_queue_add_ptr - add to pi or ci and checks if it wraps around
*
* @ptr: the current pi/ci value
* @val: the amount to add
*
* Add val to ptr. It can go until twice the queue length.
*/
inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
{
ptr += val;
ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
return ptr;
}
static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
{
return atomic_read(ci) & ((queue_len << 1) - 1);
}
static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
{
int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
if (delta >= 0)
return (queue_len - delta);
else
return (abs(delta) - queue_len);
}
void hl_hw_queue_update_ci(struct hl_cs *cs)
{
struct hl_device *hdev = cs->ctx->hdev;
struct hl_hw_queue *q;
int i;
if (hdev->disabled)
return;
q = &hdev->kernel_queues[0];
/* There are no internal queues if H/W queues are being used */
if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
return;
/* We must increment CI for every queue that will never get a
* completion, there are 2 scenarios this can happen:
* 1. All queues of a non completion CS will never get a completion.
* 2. Internal queues never gets completion.
*/
for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT)
atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
}
}
/*
* hl_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
* H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
* @ctl: BD's control word
* @len: BD's length
* @ptr: BD's pointer
*
* This function assumes there is enough space on the queue to submit a new
* BD to it. It initializes the next BD and calls the device specific
* function to set the pi (and doorbell)
*
* This function must be called when the scheduler mutex is taken
*
*/
void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
bd = q->kernel_address;
bd += hl_pi_2_offset(q->pi);
bd->ctl = cpu_to_le32(ctl);
bd->len = cpu_to_le32(len);
bd->ptr = cpu_to_le64(ptr);
q->pi = hl_queue_inc_ptr(q->pi);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
/*
* ext_queue_sanity_checks - perform some sanity checks on external queue
*
* @hdev : pointer to hl_device structure
* @q : pointer to hl_hw_queue structure
* @num_of_entries : how many entries to check for space
* @reserve_cq_entry : whether to reserve an entry in the cq
*
* H/W queues spinlock should be taken before calling this function
*
* Perform the following:
* - Make sure we have enough space in the h/w queue
* - Make sure we have enough space in the completion queue
* - Reserve space in the completion queue (needs to be reversed if there
* is a failure down the road before the actual submission of work). Only
* do this action if reserve_cq_entry is true
*
*/
static int ext_queue_sanity_checks(struct hl_device *hdev,
struct hl_hw_queue *q, int num_of_entries,
bool reserve_cq_entry)
{
atomic_t *free_slots =
&hdev->completion_queue[q->cq_id].free_slots_cnt;
int free_slots_cnt;
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
if (free_slots_cnt < num_of_entries) {
dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
if (reserve_cq_entry) {
/*
* Check we have enough space in the completion queue
* Add -1 to counter (decrement) unless counter was already 0
* In that case, CQ is full so we can't submit a new CB because
* we won't get ack on its completion
* atomic_add_unless will return 0 if counter was already 0
*/
if (atomic_add_negative(num_of_entries * -1, free_slots)) {
dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
num_of_entries, q->hw_queue_id);
atomic_add(num_of_entries, free_slots);
return -EAGAIN;
}
}
return 0;
}
/*
* int_queue_sanity_checks - perform some sanity checks on internal queue
*
* @hdev : pointer to hl_device structure
* @q : pointer to hl_hw_queue structure
* @num_of_entries : how many entries to check for space
*
* H/W queues spinlock should be taken before calling this function
*
* Perform the following:
* - Make sure we have enough space in the h/w queue
*
*/
static int int_queue_sanity_checks(struct hl_device *hdev,
struct hl_hw_queue *q,
int num_of_entries)
{
int free_slots_cnt;
if (num_of_entries > q->int_queue_len) {
dev_err(hdev->dev,
"Cannot populate queue %u with %u jobs\n",
q->hw_queue_id, num_of_entries);
return -ENOMEM;
}
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, q->int_queue_len);
if (free_slots_cnt < num_of_entries) {
dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
return 0;
}
/*
* hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
* @hdev: Pointer to hl_device structure.
* @q: Pointer to hl_hw_queue structure.
* @num_of_entries: How many entries to check for space.
*
* Notice: We do not reserve queue entries so this function mustn't be called
* more than once per CS for the same queue
*
*/
static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
int num_of_entries)
{
int free_slots_cnt;
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
if (free_slots_cnt < num_of_entries) {
dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
return 0;
}
/*
* hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
*
* @hdev: pointer to hl_device structure
* @hw_queue_id: Queue's type
* @cb_size: size of CB
* @cb_ptr: pointer to CB location
*
* This function sends a single CB, that must NOT generate a completion entry.
* Sending CPU messages can be done instead via 'hl_hw_queue_submit_bd()'
*/
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
int rc = 0;
hdev->asic_funcs->hw_queues_lock(hdev);
if (hdev->disabled) {
rc = -EPERM;
goto out;
}
/*
* hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
* type only on init phase, when the queues are empty and being tested,
* so there is no need for sanity checks.
*/
if (q->queue_type != QUEUE_TYPE_HW) {
rc = ext_queue_sanity_checks(hdev, q, 1, false);
if (rc)
goto out;
}
hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
hdev->asic_funcs->hw_queues_unlock(hdev);
return rc;
}
/*
* ext_queue_schedule_job - submit a JOB to an external queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
static void ext_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_cq_entry cq_pkt;
struct hl_cq *cq;
u64 cq_addr;
struct hl_cb *cb;
u32 ctl;
u32 len;
u64 ptr;
/*
* Update the JOB ID inside the BD CTL so the device would know what
* to write in the completion queue
*/
ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
cb = job->patched_cb;
len = job->job_cb_size;
ptr = cb->bus_address;
/* Skip completion flow in case this is a non completion CS */
if (!cs_needs_completion(job->cs))
goto submit_bd;
cq_pkt.data = cpu_to_le32(
((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
& CQ_ENTRY_SHADOW_INDEX_MASK) |
FIELD_PREP(CQ_ENTRY_SHADOW_INDEX_VALID_MASK, 1) |
FIELD_PREP(CQ_ENTRY_READY_MASK, 1));
/*
* No need to protect pi_offset because scheduling to the
* H/W queues is done under the scheduler mutex
*
* No need to check if CQ is full because it was already
* checked in ext_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->cq_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
job->user_cb_size,
cq_addr,
le32_to_cpu(cq_pkt.data),
q->msi_vec,
job->contains_dma_pkt);
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
cq->pi = hl_cq_inc_ptr(cq->pi);
submit_bd:
hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
* int_queue_schedule_job - submit a JOB to an internal queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
static void int_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_bd bd;
__le64 *pi;
bd.ctl = 0;
bd.len = cpu_to_le32(job->job_cb_size);
if (job->is_kernel_allocated_cb)
/* bus_address is actually a mmu mapped address
* allocated from an internal pool
*/
bd.ptr = cpu_to_le64(job->user_cb->bus_address);
else
bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd);
q->pi++;
q->pi &= ((q->int_queue_len << 1) - 1);
hdev->asic_funcs->pqe_write(hdev, pi, &bd);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
/*
* hw_queue_schedule_job - submit a JOB to a H/W queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
static void hw_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
u64 ptr;
u32 offset, ctl, len;
/*
* Upon PQE completion, COMP_DATA is used as the write data to the
* completion queue (QMAN HBW message), and COMP_OFFSET is used as the
* write address offset in the SM block (QMAN LBW message).
* The write address offset is calculated as "COMP_OFFSET << 2".
*/
offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
len = job->job_cb_size;
/*
* A patched CB is created only if a user CB was allocated by driver and
* MMU is disabled. If MMU is enabled, the user CB should be used
* instead. If the user CB wasn't allocated by driver, assume that it
* holds an address.
*/
if (job->patched_cb)
ptr = job->patched_cb->bus_address;
else if (job->is_kernel_allocated_cb)
ptr = job->user_cb->bus_address;
else
ptr = (u64) (uintptr_t) job->user_cb;
hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
static int init_signal_cs(struct hl_device *hdev,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
struct hl_sync_stream_properties *prop;
struct hl_hw_sob *hw_sob;
u32 q_idx;
int rc = 0;
q_idx = job->hw_queue_id;
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
hw_sob = &prop->hw_sob[prop->curr_sob_offset];
cs_cmpl->hw_sob = hw_sob;
cs_cmpl->sob_val = prop->next_sob_val;
dev_dbg(hdev->dev,
"generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx,
cs_cmpl->cs_seq);
/* we set an EB since we must make sure all oeprations are done
* when sending the signal
*/
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
cs_cmpl->hw_sob->sob_id, 0, true);
rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
false);
job->cs->sob_addr_offset = hw_sob->sob_addr;
job->cs->initial_sob_count = prop->next_sob_val - 1;
return rc;
}
void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs *cs, struct hl_cs_job *job,
struct hl_cs_compl *cs_cmpl)
{
struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
u32 offset = 0;
cs_cmpl->hw_sob = handle->hw_sob;
/* Note that encaps_sig_wait_offset was validated earlier in the flow
* for offset value which exceeds the max reserved signal count.
* always decrement 1 of the offset since when the user
* set offset 1 for example he mean to wait only for the first
* signal only, which will be pre_sob_val, and if he set offset 2
* then the value required is (pre_sob_val + 1) and so on...
* if user set wait offset to 0, then treat it as legacy wait cs,
* wait for the next signal.
*/
if (job->encaps_sig_wait_offset)
offset = job->encaps_sig_wait_offset - 1;
cs_cmpl->sob_val = handle->pre_sob_val + offset;
}
static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
struct hl_gen_wait_properties wait_prop;
struct hl_sync_stream_properties *prop;
struct hl_cs_compl *signal_cs_cmpl;
u32 q_idx;
q_idx = job->hw_queue_id;
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
signal_cs_cmpl = container_of(cs->signal_fence,
struct hl_cs_compl,
base_fence);
if (cs->encaps_signals) {
/* use the encaps signal handle stored earlier in the flow
* and set the SOB information from the encaps
* signals handle
*/
hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl);
dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n",
cs->encaps_sig_hdl->q_idx,
cs->encaps_sig_hdl->cs_seq,
cs_cmpl->sob_val,
job->encaps_sig_wait_offset);
} else {
/* Copy the SOB id and value of the signal CS */
cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
}
/* check again if the signal cs already completed.
* if yes then don't send any wait cs since the hw_sob
* could be in reset already. if signal is not completed
* then get refcount to hw_sob to prevent resetting the sob
* while wait cs is not submitted.
* note that this check is protected by two locks,
* hw queue lock and completion object lock,
* and the same completion object lock also protects
* the hw_sob reset handler function.
* The hw_queue lock prevent out of sync of hw_sob
* refcount value, changed by signal/wait flows.
*/
spin_lock(&signal_cs_cmpl->lock);
if (completion_done(&cs->signal_fence->completion)) {
spin_unlock(&signal_cs_cmpl->lock);
return -EINVAL;
}
kref_get(&cs_cmpl->hw_sob->kref);
spin_unlock(&signal_cs_cmpl->lock);
dev_dbg(hdev->dev,
"generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d, seq: %llu\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
prop->base_mon_id, q_idx, cs->sequence);
wait_prop.data = (void *) job->patched_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
wait_prop.sob_mask = 0x1;
wait_prop.sob_val = cs_cmpl->sob_val;
wait_prop.mon_id = prop->base_mon_id;
wait_prop.q_idx = q_idx;
wait_prop.size = 0;
hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
return 0;
}
/*
* init_signal_wait_cs - initialize a signal/wait CS
* @cs: pointer to the signal/wait CS
*
* H/W queues spinlock should be taken before calling this function
*/
static int init_signal_wait_cs(struct hl_cs *cs)
{
struct hl_ctx *ctx = cs->ctx;
struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job;
struct hl_cs_compl *cs_cmpl =
container_of(cs->fence, struct hl_cs_compl, base_fence);
int rc = 0;
/* There is only one job in a signal/wait CS */
job = list_first_entry(&cs->job_list, struct hl_cs_job,
cs_node);
if (cs->type & CS_TYPE_SIGNAL)
rc = init_signal_cs(hdev, job, cs_cmpl);
else if (cs->type & CS_TYPE_WAIT)
rc = init_wait_cs(hdev, cs, job, cs_cmpl);
return rc;
}
static int encaps_sig_first_staged_cs_handler
(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_cs_compl *cs_cmpl =
container_of(cs->fence,
struct hl_cs_compl, base_fence);
struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
struct hl_encaps_signals_mgr *mgr;
int rc = 0;
mgr = &cs->ctx->sig_mgr;
spin_lock(&mgr->lock);
encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
if (encaps_sig_hdl) {
/*
* Set handler CS sequence,
* the CS which contains the encapsulated signals.
*/
encaps_sig_hdl->cs_seq = cs->sequence;
/* store the handle and set encaps signal indication,
* to be used later in cs_do_release to put the last
* reference to encaps signals handlers.
*/
cs_cmpl->encaps_signals = true;
cs_cmpl->encaps_sig_hdl = encaps_sig_hdl;
/* set hw_sob pointer in completion object
* since it's used in cs_do_release flow to put
* refcount to sob
*/
cs_cmpl->hw_sob = encaps_sig_hdl->hw_sob;
cs_cmpl->sob_val = encaps_sig_hdl->pre_sob_val +
encaps_sig_hdl->count;
dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n",
cs->sequence, encaps_sig_hdl->id,
encaps_sig_hdl->count,
encaps_sig_hdl->q_idx,
cs_cmpl->hw_sob->sob_id,
cs_cmpl->sob_val);
} else {
dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n",
cs->encaps_sig_hdl_id);
rc = -EINVAL;
}
spin_unlock(&mgr->lock);
return rc;
}
/*
* hl_hw_queue_schedule_cs - schedule a command submission
* @cs: pointer to the CS
*/
int hl_hw_queue_schedule_cs(struct hl_cs *cs)
{
enum hl_device_status status;
struct hl_cs_counters_atomic *cntr;
struct hl_ctx *ctx = cs->ctx;
struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
int rc = 0, i, cq_cnt;
bool first_entry;
u32 max_queues;
cntr = &hdev->aggregated_cs_counters;
hdev->asic_funcs->hw_queues_lock(hdev);
if (!hl_device_operational(hdev, &status)) {
atomic64_inc(&cntr->device_in_reset_drop_cnt);
atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt);
dev_err(hdev->dev,
"device is %s, CS rejected!\n", hdev->status[status]);
rc = -EPERM;
goto out;
}
max_queues = hdev->asic_prop.max_queues;
q = &hdev->kernel_queues[0];
for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
if (cs->jobs_in_queue_cnt[i]) {
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
cs->jobs_in_queue_cnt[i],
cs_needs_completion(cs) ?
true : false);
break;
case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
cs->jobs_in_queue_cnt[i]);
break;
case QUEUE_TYPE_HW:
rc = hw_queue_sanity_checks(hdev, q,
cs->jobs_in_queue_cnt[i]);
break;
default:
dev_err(hdev->dev, "Queue type %d is invalid\n",
q->queue_type);
rc = -EINVAL;
break;
}
if (rc) {
atomic64_inc(
&ctx->cs_counters.queue_full_drop_cnt);
atomic64_inc(&cntr->queue_full_drop_cnt);
goto unroll_cq_resv;
}
if (q->queue_type == QUEUE_TYPE_EXT)
cq_cnt++;
}
}
if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) {
rc = init_signal_wait_cs(cs);
if (rc)
goto unroll_cq_resv;
} else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) {
rc = hdev->asic_funcs->collective_wait_init_cs(cs);
if (rc)
goto unroll_cq_resv;
}
rc = hdev->asic_funcs->pre_schedule_cs(cs);
if (rc) {
dev_err(hdev->dev,
"Failed in pre-submission operations of CS %d.%llu\n",
ctx->asid, cs->sequence);
goto unroll_cq_resv;
}
hdev->shadow_cs_queue[cs->sequence &
(hdev->asic_prop.max_pending_cs - 1)] = cs;
if (cs->encaps_signals && cs->staged_first) {
rc = encaps_sig_first_staged_cs_handler(hdev, cs);
if (rc)
goto unroll_cq_resv;
}
spin_lock(&hdev->cs_mirror_lock);
/* Verify staged CS exists and add to the staged list */
if (cs->staged_cs && !cs->staged_first) {
struct hl_cs *staged_cs;
staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
if (!staged_cs) {
dev_err(hdev->dev,
"Cannot find staged submission sequence %llu",
cs->staged_sequence);
rc = -EINVAL;
goto unlock_cs_mirror;
}
if (is_staged_cs_last_exists(hdev, staged_cs)) {
dev_err(hdev->dev,
"Staged submission sequence %llu already submitted",
cs->staged_sequence);
rc = -EINVAL;
goto unlock_cs_mirror;
}
list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
/* update stream map of the first CS */
if (hdev->supports_wait_for_multi_cs)
staged_cs->fence->stream_master_qid_map |=
cs->fence->stream_master_qid_map;
}
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
/* Queue TDR if the CS is the first entry and if timeout is wanted */
first_entry = list_first_entry(&hdev->cs_mirror_list,
struct hl_cs, mirror_node) == cs;
if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
first_entry && cs_needs_timeout(cs)) {
cs->tdr_active = true;
schedule_delayed_work(&cs->work_tdr, cs->timeout_jiffies);
}
spin_unlock(&hdev->cs_mirror_lock);
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
switch (job->queue_type) {
case QUEUE_TYPE_EXT:
ext_queue_schedule_job(job);
break;
case QUEUE_TYPE_INT:
int_queue_schedule_job(job);
break;
case QUEUE_TYPE_HW:
hw_queue_schedule_job(job);
break;
default:
break;
}
cs->submitted = true;
goto out;
unlock_cs_mirror:
spin_unlock(&hdev->cs_mirror_lock);
unroll_cq_resv:
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
if ((q->queue_type == QUEUE_TYPE_EXT) &&
(cs->jobs_in_queue_cnt[i])) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
cq_cnt--;
}
}
out:
hdev->asic_funcs->hw_queues_unlock(hdev);
return rc;
}
/*
* hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
*
* @hdev: pointer to hl_device structure
* @hw_queue_id: which queue to increment its ci
*/
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
atomic_inc(&q->ci);
}
static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
bool is_cpu_queue)
{
void *p;
int rc;
if (is_cpu_queue)
p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
else
p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
q->kernel_address = p;
q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, sizeof(struct hl_cs_job *), GFP_KERNEL);
if (!q->shadow_queue) {
dev_err(hdev->dev,
"Failed to allocate shadow queue for H/W queue %d\n",
q->hw_queue_id);
rc = -ENOMEM;
goto free_queue;
}
/* Make sure read/write pointers are initialized to start of queue */
atomic_set(&q->ci, 0);
q->pi = 0;
return 0;
free_queue:
if (is_cpu_queue)
hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
else
hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
q->bus_address);
return rc;
}
static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
&q->bus_address, &q->int_queue_len);
if (!p) {
dev_err(hdev->dev,
"Failed to get base address for internal queue %d\n",
q->hw_queue_id);
return -EFAULT;
}
q->kernel_address = p;
q->pi = 0;
atomic_set(&q->ci, 0);
return 0;
}
static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
return ext_and_cpu_queue_init(hdev, q, true);
}
static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
return ext_and_cpu_queue_init(hdev, q, false);
}
static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
q->kernel_address = p;
/* Make sure read/write pointers are initialized to start of queue */
atomic_set(&q->ci, 0);
q->pi = 0;
return 0;
}
static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
{
struct hl_sync_stream_properties *sync_stream_prop;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_hw_sob *hw_sob;
int sob, reserved_mon_idx, queue_idx;
sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
/* We use 'collective_mon_idx' as a running index in order to reserve
* monitors for collective master/slave queues.
* collective master queue gets 2 reserved monitors
* collective slave queue gets 1 reserved monitor
*/
if (hdev->kernel_queues[q_idx].collective_mode ==
HL_COLLECTIVE_MASTER) {
reserved_mon_idx = hdev->collective_mon_idx;
/* reserve the first monitor for collective master queue */
sync_stream_prop->collective_mstr_mon_id[0] =
prop->collective_first_mon + reserved_mon_idx;
/* reserve the second monitor for collective master queue */
sync_stream_prop->collective_mstr_mon_id[1] =
prop->collective_first_mon + reserved_mon_idx + 1;
hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS;
} else if (hdev->kernel_queues[q_idx].collective_mode ==
HL_COLLECTIVE_SLAVE) {
reserved_mon_idx = hdev->collective_mon_idx++;
/* reserve a monitor for collective slave queue */
sync_stream_prop->collective_slave_mon_id =
prop->collective_first_mon + reserved_mon_idx;
}
if (!hdev->kernel_queues[q_idx].supports_sync_stream)
return;
queue_idx = hdev->sync_stream_queue_idx++;
sync_stream_prop->base_sob_id = prop->sync_stream_first_sob +
(queue_idx * HL_RSVD_SOBS);
sync_stream_prop->base_mon_id = prop->sync_stream_first_mon +
(queue_idx * HL_RSVD_MONS);
sync_stream_prop->next_sob_val = 1;
sync_stream_prop->curr_sob_offset = 0;
for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
hw_sob = &sync_stream_prop->hw_sob[sob];
hw_sob->hdev = hdev;
hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
hw_sob->sob_addr =
hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
hw_sob->q_idx = q_idx;
kref_init(&hw_sob->kref);
}
}
static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
{
struct hl_sync_stream_properties *prop =
&hdev->kernel_queues[q_idx].sync_stream_prop;
/*
* In case we got here due to a stuck CS, the refcnt might be bigger
* than 1 and therefore we reset it.
*/
kref_init(&prop->hw_sob[prop->curr_sob_offset].kref);
prop->curr_sob_offset = 0;
prop->next_sob_val = 1;
}
/*
* queue_init - main initialization function for H/W queue object
*
* @hdev: pointer to hl_device device structure
* @q: pointer to hl_hw_queue queue structure
* @hw_queue_id: The id of the H/W queue
*
* Allocate dma-able memory for the queue and initialize fields
* Returns 0 on success
*/
static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
u32 hw_queue_id)
{
int rc;
q->hw_queue_id = hw_queue_id;
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
rc = ext_queue_init(hdev, q);
break;
case QUEUE_TYPE_INT:
rc = int_queue_init(hdev, q);
break;
case QUEUE_TYPE_CPU:
rc = cpu_queue_init(hdev, q);
break;
case QUEUE_TYPE_HW:
rc = hw_queue_init(hdev, q);
break;
case QUEUE_TYPE_NA:
q->valid = 0;
return 0;
default:
dev_crit(hdev->dev, "wrong queue type %d during init\n",
q->queue_type);
rc = -EINVAL;
break;
}
sync_stream_queue_init(hdev, q->hw_queue_id);
if (rc)
return rc;
q->valid = 1;
return 0;
}
/*
* hw_queue_fini - destroy queue
*
* @hdev: pointer to hl_device device structure
* @q: pointer to hl_hw_queue queue structure
*
* Free the queue memory
*/
static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
{
if (!q->valid)
return;
/*
* If we arrived here, there are no jobs waiting on this queue
* so we can safely remove it.
* This is because this function can only called when:
* 1. Either a context is deleted, which only can occur if all its
* jobs were finished
* 2. A context wasn't able to be created due to failure or timeout,
* which means there are no jobs on the queue yet
*
* The only exception are the queues of the kernel context, but
* if they are being destroyed, it means that the entire module is
* being removed. If the module is removed, it means there is no open
* user context. It also means that if a job was submitted by
* the kernel driver (e.g. context creation), the job itself was
* released by the kernel driver when a timeout occurred on its
* Completion. Thus, we don't need to release it again.
*/
if (q->queue_type == QUEUE_TYPE_INT)
return;
kfree(q->shadow_queue);
if (q->queue_type == QUEUE_TYPE_CPU)
hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
else
hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
q->bus_address);
}
int hl_hw_queues_create(struct hl_device *hdev)
{
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hl_hw_queue *q;
int i, rc, q_ready_cnt;
hdev->kernel_queues = kcalloc(asic->max_queues,
sizeof(*hdev->kernel_queues), GFP_KERNEL);
if (!hdev->kernel_queues) {
dev_err(hdev->dev, "Not enough memory for H/W queues\n");
return -ENOMEM;
}
/* Initialize the H/W queues */
for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
i < asic->max_queues ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
q->supports_sync_stream =
asic->hw_queues_props[i].supports_sync_stream;
q->collective_mode = asic->hw_queues_props[i].collective_mode;
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
goto release_queues;
}
}
return 0;
release_queues:
for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
queue_fini(hdev, q);
kfree(hdev->kernel_queues);
return rc;
}
void hl_hw_queues_destroy(struct hl_device *hdev)
{
struct hl_hw_queue *q;
u32 max_queues = hdev->asic_prop.max_queues;
int i;
for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
queue_fini(hdev, q);
kfree(hdev->kernel_queues);
}
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
{
struct hl_hw_queue *q;
u32 max_queues = hdev->asic_prop.max_queues;
int i;
for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
if ((!q->valid) ||
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
continue;
q->pi = 0;
atomic_set(&q->ci, 0);
if (q->supports_sync_stream)
sync_stream_queue_reset(hdev, q->hw_queue_id);
}
}
| linux-master | drivers/accel/habanalabs/common/hw_queue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pci-p2pdma.h>
MODULE_IMPORT_NS(DMA_BUF);
#define HL_MMU_DEBUG 0
/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
#define DRAM_POOL_PAGE_SIZE SZ_8M
#define MEM_HANDLE_INVALID ULONG_MAX
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
struct hl_mem_in *args, u64 *handle);
static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 psize;
/*
* for ASIC that supports setting the allocation page size by user we will address
* user's choice only if it is not 0 (as 0 means taking the default page size)
*/
if (prop->supports_user_set_page_size && args->alloc.page_size) {
psize = args->alloc.page_size;
if (!is_power_of_2(psize)) {
dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
return -EINVAL;
}
} else {
psize = prop->device_mem_alloc_default_page_size;
}
*page_size = psize;
return 0;
}
/*
* The va ranges in context object contain a list with the available chunks of
* device virtual memory.
* There is one range for host allocations and one for DRAM allocations.
*
* On initialization each range contains one chunk of all of its available
* virtual range which is a half of the total device virtual range.
*
* On each mapping of physical pages, a suitable virtual range chunk (with a
* minimum size) is selected from the list. If the chunk size equals the
* requested size, the chunk is returned. Otherwise, the chunk is split into
* two chunks - one to return as result and a remainder to stay in the list.
*
* On each Unmapping of a virtual address, the relevant virtual chunk is
* returned to the list. The chunk is added to the list and if its edges match
* the edges of the adjacent chunks (means a contiguous chunk can be created),
* the chunks are merged.
*
* On finish, the list is checked to have only one chunk of all the relevant
* virtual range (which is a half of the device total virtual range).
* If not (means not all mappings were unmapped), a warning is printed.
*/
/*
* alloc_device_memory() - allocate device memory.
* @ctx: pointer to the context structure.
* @args: host parameters containing the requested size.
* @ret_handle: result handle.
*
* This function does the following:
* - Allocate the requested size rounded up to 'dram_page_size' pages.
* - Return unique handle for later map/unmap/free.
*/
static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
u32 *ret_handle)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
u64 paddr = 0, total_size, num_pgs, i;
u32 num_curr_pgs, page_size;
bool contiguous;
int handle, rc;
num_curr_pgs = 0;
rc = set_alloc_page_size(hdev, args, &page_size);
if (rc)
return rc;
num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
total_size = num_pgs * page_size;
if (!total_size) {
dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
return -EINVAL;
}
contiguous = args->flags & HL_MEM_CONTIGUOUS;
if (contiguous) {
if (is_power_of_2(page_size))
paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
total_size, NULL, page_size);
else
paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
"Cannot allocate %llu contiguous pages with total size of %llu\n",
num_pgs, total_size);
return -ENOMEM;
}
}
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack) {
rc = -ENOMEM;
goto pages_pack_err;
}
phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
phys_pg_pack->asid = ctx->asid;
phys_pg_pack->npages = num_pgs;
phys_pg_pack->page_size = page_size;
phys_pg_pack->total_size = total_size;
phys_pg_pack->flags = args->flags;
phys_pg_pack->contiguous = contiguous;
phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
rc = -ENOMEM;
goto pages_arr_err;
}
if (phys_pg_pack->contiguous) {
for (i = 0 ; i < num_pgs ; i++)
phys_pg_pack->pages[i] = paddr + i * page_size;
} else {
for (i = 0 ; i < num_pgs ; i++) {
if (is_power_of_2(page_size))
phys_pg_pack->pages[i] =
(uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
page_size, NULL,
page_size);
else
phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
page_size);
if (!phys_pg_pack->pages[i]) {
dev_err(hdev->dev,
"Cannot allocate device memory (out of memory)\n");
rc = -ENOMEM;
goto page_err;
}
num_curr_pgs++;
}
}
spin_lock(&vm->idr_lock);
handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
GFP_ATOMIC);
spin_unlock(&vm->idr_lock);
if (handle < 0) {
dev_err(hdev->dev, "Failed to get handle for page\n");
rc = -EFAULT;
goto idr_err;
}
for (i = 0 ; i < num_pgs ; i++)
kref_get(&vm->dram_pg_pool_refcount);
phys_pg_pack->handle = handle;
atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
*ret_handle = handle;
return 0;
idr_err:
page_err:
if (!phys_pg_pack->contiguous)
for (i = 0 ; i < num_curr_pgs ; i++)
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
page_size);
kvfree(phys_pg_pack->pages);
pages_arr_err:
kfree(phys_pg_pack);
pages_pack_err:
if (contiguous)
gen_pool_free(vm->dram_pg_pool, paddr, total_size);
return rc;
}
/**
* dma_map_host_va() - DMA mapping of the given host virtual address.
* @hdev: habanalabs device structure.
* @addr: the host virtual address of the memory area.
* @size: the size of the memory area.
* @p_userptr: pointer to result userptr structure.
*
* This function does the following:
* - Allocate userptr structure.
* - Pin the given host memory using the userptr structure.
* - Perform DMA mapping to have the DMA addresses of the pages.
*/
static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr **p_userptr)
{
struct hl_userptr *userptr;
int rc;
userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
if (!userptr) {
rc = -ENOMEM;
goto userptr_err;
}
rc = hl_pin_host_memory(hdev, addr, size, userptr);
if (rc)
goto pin_err;
userptr->dma_mapped = true;
userptr->dir = DMA_BIDIRECTIONAL;
userptr->vm_type = VM_TYPE_USERPTR;
*p_userptr = userptr;
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
if (rc) {
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
goto dma_map_err;
}
return 0;
dma_map_err:
hl_unpin_host_memory(hdev, userptr);
pin_err:
kfree(userptr);
userptr_err:
return rc;
}
/**
* dma_unmap_host_va() - DMA unmapping of the given host virtual address.
* @hdev: habanalabs device structure.
* @userptr: userptr to free.
*
* This function does the following:
* - Unpins the physical pages.
* - Frees the userptr structure.
*/
static void dma_unmap_host_va(struct hl_device *hdev,
struct hl_userptr *userptr)
{
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
}
/**
* dram_pg_pool_do_release() - free DRAM pages pool
* @ref: pointer to reference object.
*
* This function does the following:
* - Frees the idr structure of physical pages handles.
* - Frees the generic pool of DRAM physical pages.
*/
static void dram_pg_pool_do_release(struct kref *ref)
{
struct hl_vm *vm = container_of(ref, struct hl_vm,
dram_pg_pool_refcount);
/*
* free the idr here as only here we know for sure that there are no
* allocated physical pages and hence there are no handles in use
*/
idr_destroy(&vm->phys_pg_pack_handles);
gen_pool_destroy(vm->dram_pg_pool);
}
/**
* free_phys_pg_pack() - free physical page pack.
* @hdev: habanalabs device structure.
* @phys_pg_pack: physical page pack to free.
*
* This function does the following:
* - For DRAM memory only
* - iterate over the pack, free each physical block structure by
* returning it to the general pool.
* - Free the hl_vm_phys_pg_pack structure.
*/
static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
if (phys_pg_pack->created_from_userptr)
goto end;
if (phys_pg_pack->contiguous) {
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
phys_pg_pack->total_size);
for (i = 0; i < phys_pg_pack->npages ; i++)
kref_put(&vm->dram_pg_pool_refcount,
dram_pg_pool_do_release);
} else {
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
gen_pool_free(vm->dram_pg_pool,
phys_pg_pack->pages[i],
phys_pg_pack->page_size);
kref_put(&vm->dram_pg_pool_refcount,
dram_pg_pool_do_release);
}
}
end:
kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
return;
}
/**
* free_device_memory() - free device memory.
* @ctx: pointer to the context structure.
* @args: host parameters containing the requested size.
*
* This function does the following:
* - Free the device memory related to the given handle.
*/
static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
u32 handle = args->free.handle;
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
if (!phys_pg_pack) {
spin_unlock(&vm->idr_lock);
dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
return -EINVAL;
}
if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
spin_unlock(&vm->idr_lock);
dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
return -EINVAL;
}
/* must remove from idr before the freeing of the physical pages as the refcount of the pool
* is also the trigger of the idr destroy
*/
idr_remove(&vm->phys_pg_pack_handles, handle);
spin_unlock(&vm->idr_lock);
atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
free_phys_pg_pack(hdev, phys_pg_pack);
return 0;
}
/**
* clear_va_list_locked() - free virtual addresses list.
* @hdev: habanalabs device structure.
* @va_list: list of virtual addresses to free.
*
* This function does the following:
* - Iterate over the list and free each virtual addresses block.
*
* This function should be called only when va_list lock is taken.
*/
static void clear_va_list_locked(struct hl_device *hdev,
struct list_head *va_list)
{
struct hl_vm_va_block *va_block, *tmp;
list_for_each_entry_safe(va_block, tmp, va_list, node) {
list_del(&va_block->node);
kfree(va_block);
}
}
/**
* print_va_list_locked() - print virtual addresses list.
* @hdev: habanalabs device structure.
* @va_list: list of virtual addresses to print.
*
* This function does the following:
* - Iterate over the list and print each virtual addresses block.
*
* This function should be called only when va_list lock is taken.
*/
static void print_va_list_locked(struct hl_device *hdev,
struct list_head *va_list)
{
#if HL_MMU_DEBUG
struct hl_vm_va_block *va_block;
dev_dbg(hdev->dev, "print va list:\n");
list_for_each_entry(va_block, va_list, node)
dev_dbg(hdev->dev,
"va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
va_block->start, va_block->end, va_block->size);
#endif
}
/**
* merge_va_blocks_locked() - merge a virtual block if possible.
* @hdev: pointer to the habanalabs device structure.
* @va_list: pointer to the virtual addresses block list.
* @va_block: virtual block to merge with adjacent blocks.
*
* This function does the following:
* - Merge the given blocks with the adjacent blocks if their virtual ranges
* create a contiguous virtual range.
*
* This Function should be called only when va_list lock is taken.
*/
static void merge_va_blocks_locked(struct hl_device *hdev,
struct list_head *va_list, struct hl_vm_va_block *va_block)
{
struct hl_vm_va_block *prev, *next;
prev = list_prev_entry(va_block, node);
if (&prev->node != va_list && prev->end + 1 == va_block->start) {
prev->end = va_block->end;
prev->size = prev->end - prev->start + 1;
list_del(&va_block->node);
kfree(va_block);
va_block = prev;
}
next = list_next_entry(va_block, node);
if (&next->node != va_list && va_block->end + 1 == next->start) {
next->start = va_block->start;
next->size = next->end - next->start + 1;
list_del(&va_block->node);
kfree(va_block);
}
}
/**
* add_va_block_locked() - add a virtual block to the virtual addresses list.
* @hdev: pointer to the habanalabs device structure.
* @va_list: pointer to the virtual addresses block list.
* @start: start virtual address.
* @end: end virtual address.
*
* This function does the following:
* - Add the given block to the virtual blocks list and merge with other blocks
* if a contiguous virtual block can be created.
*
* This Function should be called only when va_list lock is taken.
*/
static int add_va_block_locked(struct hl_device *hdev,
struct list_head *va_list, u64 start, u64 end)
{
struct hl_vm_va_block *va_block, *res = NULL;
u64 size = end - start + 1;
print_va_list_locked(hdev, va_list);
list_for_each_entry(va_block, va_list, node) {
/* TODO: remove upon matureness */
if (hl_mem_area_crosses_range(start, size, va_block->start,
va_block->end)) {
dev_err(hdev->dev,
"block crossing ranges at start 0x%llx, end 0x%llx\n",
va_block->start, va_block->end);
return -EINVAL;
}
if (va_block->end < start)
res = va_block;
}
va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
if (!va_block)
return -ENOMEM;
va_block->start = start;
va_block->end = end;
va_block->size = size;
if (!res)
list_add(&va_block->node, va_list);
else
list_add(&va_block->node, &res->node);
merge_va_blocks_locked(hdev, va_list, va_block);
print_va_list_locked(hdev, va_list);
return 0;
}
/**
* add_va_block() - wrapper for add_va_block_locked.
* @hdev: pointer to the habanalabs device structure.
* @va_range: pointer to the virtual addresses range object.
* @start: start virtual address.
* @end: end virtual address.
*
* This function does the following:
* - Takes the list lock and calls add_va_block_locked.
*/
static inline int add_va_block(struct hl_device *hdev,
struct hl_va_range *va_range, u64 start, u64 end)
{
int rc;
mutex_lock(&va_range->lock);
rc = add_va_block_locked(hdev, &va_range->list, start, end);
mutex_unlock(&va_range->lock);
return rc;
}
/**
* is_hint_crossing_range() - check if hint address crossing specified reserved.
* @range_type: virtual space range type.
* @start_addr: start virtual address.
* @size: block size.
* @prop: asic properties structure to retrieve reserved ranges from.
*/
static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
bool range_cross;
if (range_type == HL_VA_RANGE_TYPE_DRAM)
range_cross =
hl_mem_area_crosses_range(start_addr, size,
prop->hints_dram_reserved_va_range.start_addr,
prop->hints_dram_reserved_va_range.end_addr);
else if (range_type == HL_VA_RANGE_TYPE_HOST)
range_cross =
hl_mem_area_crosses_range(start_addr, size,
prop->hints_host_reserved_va_range.start_addr,
prop->hints_host_reserved_va_range.end_addr);
else
range_cross =
hl_mem_area_crosses_range(start_addr, size,
prop->hints_host_hpage_reserved_va_range.start_addr,
prop->hints_host_hpage_reserved_va_range.end_addr);
return range_cross;
}
/**
* get_va_block() - get a virtual block for the given size and alignment.
*
* @hdev: pointer to the habanalabs device structure.
* @va_range: pointer to the virtual addresses range.
* @size: requested block size.
* @hint_addr: hint for requested address by the user.
* @va_block_align: required alignment of the virtual block start address.
* @range_type: va range type (host, dram)
* @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
* given size, hint address and alignment.
* - Reserve the requested block and update the list.
* - Return the start address of the virtual block.
*/
static u64 get_va_block(struct hl_device *hdev,
struct hl_va_range *va_range,
u64 size, u64 hint_addr, u32 va_block_align,
enum hl_va_range_type range_type,
u32 flags)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
dram_hint_mask = prop->dram_hints_align_mask;
bool add_prev = false;
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
bool force_hint = flags & HL_MEM_FORCE_HINT;
int rc;
if (is_align_pow_2)
align_mask = ~((u64)va_block_align - 1);
else
/*
* with non-power-of-2 range we work only with page granularity
* and the start address is page aligned,
* so no need for alignment checking.
*/
size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
va_range->page_size;
tmp_hint_addr = hint_addr & ~dram_hint_mask;
/* Check if we need to ignore hint address */
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
(!is_align_pow_2 && is_hint_dram_addr &&
do_div(tmp_hint_addr, va_range->page_size))) {
if (force_hint) {
/* Hint must be respected, so here we just fail */
dev_err(hdev->dev,
"Hint address 0x%llx is not page aligned - cannot be respected\n",
hint_addr);
return 0;
}
dev_dbg(hdev->dev,
"Hint address 0x%llx will be ignored because it is not aligned\n",
hint_addr);
hint_addr = 0;
}
mutex_lock(&va_range->lock);
print_va_list_locked(hdev, &va_range->list);
list_for_each_entry(va_block, &va_range->list, node) {
/* Calc the first possible aligned addr */
valid_start = va_block->start;
if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
valid_start &= align_mask;
valid_start += va_block_align;
if (valid_start > va_block->end)
continue;
}
valid_size = va_block->end - valid_start + 1;
if (valid_size < size)
continue;
/*
* In case hint address is 0, and hints_range_reservation
* property enabled, then avoid allocating va blocks from the
* range reserved for hint addresses
*/
if (prop->hints_range_reservation && !hint_addr)
if (is_hint_crossing_range(range_type, valid_start,
size, prop))
continue;
/* Pick the minimal length block which has the required size */
if (!new_va_block || (valid_size < reserved_valid_size)) {
new_va_block = va_block;
reserved_valid_start = valid_start;
reserved_valid_size = valid_size;
}
if (hint_addr && hint_addr >= valid_start &&
(hint_addr + size) <= va_block->end) {
new_va_block = va_block;
reserved_valid_start = hint_addr;
reserved_valid_size = valid_size;
break;
}
}
if (!new_va_block) {
dev_err(hdev->dev, "no available va block for size %llu\n",
size);
goto out;
}
if (force_hint && reserved_valid_start != hint_addr) {
/* Hint address must be respected. If we are here - this means
* we could not respect it.
*/
dev_err(hdev->dev,
"Hint address 0x%llx could not be respected\n",
hint_addr);
reserved_valid_start = 0;
goto out;
}
/*
* Check if there is some leftover range due to reserving the new
* va block, then return it to the main virtual addresses list.
*/
if (reserved_valid_start > new_va_block->start) {
prev_start = new_va_block->start;
prev_end = reserved_valid_start - 1;
new_va_block->start = reserved_valid_start;
new_va_block->size = reserved_valid_size;
add_prev = true;
}
if (new_va_block->size > size) {
new_va_block->start += size;
new_va_block->size = new_va_block->end - new_va_block->start + 1;
} else {
list_del(&new_va_block->node);
kfree(new_va_block);
}
if (add_prev) {
rc = add_va_block_locked(hdev, &va_range->list, prev_start, prev_end);
if (rc) {
reserved_valid_start = 0;
goto out;
}
}
print_va_list_locked(hdev, &va_range->list);
out:
mutex_unlock(&va_range->lock);
return reserved_valid_start;
}
/*
* hl_reserve_va_block() - reserve a virtual block of a given size.
* @hdev: pointer to the habanalabs device structure.
* @ctx: current context
* @type: virtual addresses range type.
* @size: requested block size.
* @alignment: required alignment in bytes of the virtual block start address,
* 0 means no alignment.
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
* given size and alignment.
* - Reserve the requested block and update the list.
* - Return the start address of the virtual block.
*/
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u64 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
max(alignment, ctx->va_range[type]->page_size),
type, 0);
}
/**
* hl_get_va_range_type() - get va_range type for the given address and size.
* @ctx: context to fetch va_range from.
* @address: the start address of the area we want to validate.
* @size: the size in bytes of the area we want to validate.
* @type: returned va_range type.
*
* Return: true if the area is inside a valid range, false otherwise.
*/
static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
enum hl_va_range_type *type)
{
int i;
for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
if (hl_mem_area_inside_range(address, size,
ctx->va_range[i]->start_addr,
ctx->va_range[i]->end_addr)) {
*type = i;
return 0;
}
}
return -EINVAL;
}
/**
* hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
* @hdev: pointer to the habanalabs device structure
* @ctx: pointer to the context structure.
* @start_addr: start virtual address.
* @size: number of bytes to unreserve.
*
* This function does the following:
* - Takes the list lock and calls add_va_block_locked.
*/
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size)
{
enum hl_va_range_type type;
int rc;
rc = hl_get_va_range_type(ctx, start_addr, size, &type);
if (rc) {
dev_err(hdev->dev,
"cannot find va_range for va %#llx size %llu",
start_addr, size);
return rc;
}
rc = add_va_block(hdev, ctx->va_range[type], start_addr,
start_addr + size - 1);
if (rc)
dev_warn(hdev->dev,
"add va block failed for vaddr: 0x%llx\n", start_addr);
return rc;
}
/**
* init_phys_pg_pack_from_userptr() - initialize physical page pack from host
* memory
* @ctx: pointer to the context structure.
* @userptr: userptr to initialize from.
* @pphys_pg_pack: result pointer.
* @force_regular_page: tell the function to ignore huge page optimization,
* even if possible. Needed for cases where the device VA
* is allocated before we know the composition of the
* physical pages
*
* This function does the following:
* - Pin the physical pages related to the given virtual block.
* - Create a physical page pack from the physical pages related to the given
* virtual block.
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
struct hl_vm_phys_pg_pack **pphys_pg_pack,
bool force_regular_page)
{
u32 npages, page_size = PAGE_SIZE,
huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
struct hl_vm_phys_pg_pack *phys_pg_pack;
bool first = true, is_huge_page_opt;
u64 page_mask, total_npages;
struct scatterlist *sg;
dma_addr_t dma_addr;
int rc, i, j;
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
return -ENOMEM;
phys_pg_pack->vm_type = userptr->vm_type;
phys_pg_pack->created_from_userptr = true;
phys_pg_pack->asid = ctx->asid;
atomic_set(&phys_pg_pack->mapping_cnt, 1);
is_huge_page_opt = (force_regular_page ? false : true);
/* Only if all dma_addrs are aligned to 2MB and their
* sizes is at least 2MB, we can use huge page mapping.
* We limit the 2MB optimization to this condition,
* since later on we acquire the related VA range as one
* consecutive block.
*/
total_npages = 0;
for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
total_npages += npages;
if ((npages % pgs_in_huge_page) ||
(dma_addr & (huge_page_size - 1)))
is_huge_page_opt = false;
}
if (is_huge_page_opt) {
page_size = huge_page_size;
do_div(total_npages, pgs_in_huge_page);
}
page_mask = ~(((u64) page_size) - 1);
phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
}
phys_pg_pack->npages = total_npages;
phys_pg_pack->page_size = page_size;
phys_pg_pack->total_size = total_npages * page_size;
j = 0;
for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
/* align down to physical page size and save the offset */
if (first) {
first = false;
phys_pg_pack->offset = dma_addr & (page_size - 1);
dma_addr &= page_mask;
}
while (npages) {
phys_pg_pack->pages[j++] = dma_addr;
dma_addr += page_size;
if (is_huge_page_opt)
npages -= pgs_in_huge_page;
else
npages--;
}
}
*pphys_pg_pack = phys_pg_pack;
return 0;
page_pack_arr_mem_err:
kfree(phys_pg_pack);
return rc;
}
/**
* map_phys_pg_pack() - maps the physical page pack..
* @ctx: pointer to the context structure.
* @vaddr: start address of the virtual area to map from.
* @phys_pg_pack: the pack of physical pages to map to.
*
* This function does the following:
* - Maps each chunk of virtual memory to matching physical chunk.
* - Stores number of successful mappings in the given argument.
* - Returns 0 on success, error code otherwise.
*/
static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
int rc = 0;
bool is_host_addr;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
(i + 1) == phys_pg_pack->npages);
if (rc) {
dev_err(hdev->dev,
"map failed for handle %u, npages: %llu, mapped: %llu",
phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
}
mapped_pg_cnt++;
next_vaddr += page_size;
}
return 0;
err:
is_host_addr = !hl_is_dram_va(hdev, vaddr);
next_vaddr = vaddr;
for (i = 0 ; i < mapped_pg_cnt ; i++) {
if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
(i + 1) == mapped_pg_cnt))
dev_warn_ratelimited(hdev->dev,
"failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
phys_pg_pack->handle, next_vaddr,
phys_pg_pack->pages[i], page_size);
next_vaddr += page_size;
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
*
* In addition, on host num of pages could be huge,
* because page size could be 4KB, so when unmapping host
* pages sleep every 32K pages to avoid soft lockup
*/
if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
usleep_range(50, 200);
}
return rc;
}
/**
* unmap_phys_pg_pack() - unmaps the physical page pack.
* @ctx: pointer to the context structure.
* @vaddr: start address of the virtual area to unmap.
* @phys_pg_pack: the pack of physical pages to unmap.
*/
static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
bool is_host_addr;
u32 page_size;
is_host_addr = !hl_is_dram_va(hdev, vaddr);
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
(i + 1) == phys_pg_pack->npages))
dev_warn_ratelimited(hdev->dev,
"unmap failed for vaddr: 0x%llx\n", next_vaddr);
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
*
* In addition, on host num of pages could be huge,
* because page size could be 4KB, so when unmapping host
* pages sleep every 32K pages to avoid soft lockup
*/
if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
usleep_range(50, 200);
}
}
/**
* map_device_va() - map the given memory.
* @ctx: pointer to the context structure.
* @args: host parameters with handle/host virtual address.
* @device_addr: pointer to result device virtual address.
*
* This function does the following:
* - If given a physical device memory handle, map to a device virtual block
* and return the start address of this block.
* - If given a host virtual address and size, find the related physical pages,
* map a device virtual block to this pages and return the start address of
* this block.
*/
static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
{
struct hl_vm_phys_pg_pack *phys_pg_pack;
enum hl_va_range_type va_range_type = 0;
struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
u32 handle = 0, va_block_align;
struct hl_vm_hash_node *hnode;
struct hl_vm *vm = &hdev->vm;
struct hl_va_range *va_range;
bool is_userptr, do_prefetch;
u64 ret_vaddr, hint_addr;
enum vm_type *vm_type;
int rc;
/* set map flags */
is_userptr = args->flags & HL_MEM_USERPTR;
do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
/* Assume failure */
*device_addr = 0;
if (is_userptr) {
u64 addr = args->map_host.host_virt_addr,
size = args->map_host.mem_size;
u32 page_size = hdev->asic_prop.pmmu.page_size,
huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc)
return rc;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
&phys_pg_pack, false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
addr);
goto init_page_pack_err;
}
vm_type = (enum vm_type *) userptr;
hint_addr = args->map_host.hint_addr;
handle = phys_pg_pack->handle;
/* get required alignment */
if (phys_pg_pack->page_size == page_size) {
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
va_range_type = HL_VA_RANGE_TYPE_HOST;
/*
* huge page alignment may be needed in case of regular
* page mapping, depending on the host VA alignment
*/
if (addr & (huge_page_size - 1))
va_block_align = page_size;
else
va_block_align = huge_page_size;
} else {
/*
* huge page alignment is needed in case of huge page
* mapping
*/
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
va_block_align = huge_page_size;
}
} else {
handle = lower_32_bits(args->map_device.handle);
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
if (!phys_pg_pack) {
spin_unlock(&vm->idr_lock);
dev_err(hdev->dev,
"no match for handle %u\n", handle);
return -EINVAL;
}
/* increment now to avoid freeing device memory while mapping */
atomic_inc(&phys_pg_pack->mapping_cnt);
spin_unlock(&vm->idr_lock);
vm_type = (enum vm_type *) phys_pg_pack;
hint_addr = args->map_device.hint_addr;
/* DRAM VA alignment is the same as the MMU page size */
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
va_range_type = HL_VA_RANGE_TYPE_DRAM;
va_block_align = hdev->asic_prop.dmmu.page_size;
}
/*
* relevant for mapping device physical memory only, as host memory is
* implicitly shared
*/
if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
phys_pg_pack->asid != ctx->asid) {
dev_err(hdev->dev,
"Failed to map memory, handle %u is not shared\n",
handle);
rc = -EPERM;
goto shared_err;
}
hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
if (!hnode) {
rc = -ENOMEM;
goto hnode_err;
}
if (hint_addr && phys_pg_pack->offset) {
if (args->flags & HL_MEM_FORCE_HINT) {
/* Fail if hint must be respected but it can't be */
dev_err(hdev->dev,
"Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
hint_addr, phys_pg_pack->offset);
rc = -EINVAL;
goto va_block_err;
}
dev_dbg(hdev->dev,
"Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
hint_addr, phys_pg_pack->offset);
}
ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
hint_addr, va_block_align,
va_range_type, args->flags);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
rc = -ENOMEM;
goto va_block_err;
}
mutex_lock(&hdev->mmu_lock);
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
mutex_unlock(&hdev->mmu_lock);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
mutex_unlock(&hdev->mmu_lock);
if (rc)
goto map_err;
/*
* prefetch is done upon user's request. it is performed in WQ as and so can
* be outside the MMU lock. the operation itself is already protected by the mmu lock
*/
if (do_prefetch) {
rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
phys_pg_pack->total_size);
if (rc)
goto map_err;
}
ret_vaddr += phys_pg_pack->offset;
hnode->ptr = vm_type;
hnode->vaddr = ret_vaddr;
hnode->handle = is_userptr ? MEM_HANDLE_INVALID : handle;
mutex_lock(&ctx->mem_hash_lock);
hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
mutex_unlock(&ctx->mem_hash_lock);
*device_addr = ret_vaddr;
if (is_userptr)
free_phys_pg_pack(hdev, phys_pg_pack);
return rc;
map_err:
if (add_va_block(hdev, va_range, ret_vaddr,
ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
"release va block failed for handle 0x%x, vaddr: 0x%llx\n",
handle, ret_vaddr);
va_block_err:
kfree(hnode);
hnode_err:
shared_err:
atomic_dec(&phys_pg_pack->mapping_cnt);
if (is_userptr)
free_phys_pg_pack(hdev, phys_pg_pack);
init_page_pack_err:
if (is_userptr)
dma_unmap_host_va(hdev, userptr);
return rc;
}
/* Should be called while the context's mem_hash_lock is taken */
static struct hl_vm_hash_node *get_vm_hash_node_locked(struct hl_ctx *ctx, u64 vaddr)
{
struct hl_vm_hash_node *hnode;
hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
if (vaddr == hnode->vaddr)
return hnode;
return NULL;
}
/**
* unmap_device_va() - unmap the given device virtual address.
* @ctx: pointer to the context structure.
* @args: host parameters with device virtual address to unmap.
* @ctx_free: true if in context free flow, false otherwise.
*
* This function does the following:
* - unmap the physical pages related to the given virtual address.
* - return the device virtual block to the virtual block list.
*/
static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
bool ctx_free)
{
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
u64 vaddr = args->unmap.device_virt_addr;
struct asic_fixed_properties *prop;
struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
enum vm_type *vm_type;
bool is_userptr;
int rc = 0;
prop = &hdev->asic_prop;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
hnode = get_vm_hash_node_locked(ctx, vaddr);
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
return -EINVAL;
}
if (hnode->export_cnt) {
mutex_unlock(&ctx->mem_hash_lock);
dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
return -EINVAL;
}
hash_del(&hnode->node);
mutex_unlock(&ctx->mem_hash_lock);
vm_type = hnode->ptr;
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
vaddr);
goto vm_type_err;
}
if (phys_pg_pack->page_size ==
hdev->asic_prop.pmmu.page_size)
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
else
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
"unmap failed, unknown vm desc for vaddr 0x%llx\n",
vaddr);
rc = -EFAULT;
goto vm_type_err;
}
if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
rc = -EINVAL;
goto mapping_cnt_err;
}
if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
vaddr = prop->dram_base_address +
DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
phys_pg_pack->page_size) *
phys_pg_pack->page_size;
else
vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&hdev->mmu_lock);
unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
/*
* During context free this function is called in a loop to clean all
* the context mappings. Hence the cache invalidation can be called once
* at the loop end rather than for each iteration
*/
if (!ctx_free)
rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
phys_pg_pack->total_size);
mutex_unlock(&hdev->mmu_lock);
/*
* If the context is closing we don't need to check for the MMU cache
* invalidation return code and update the VA free list as in this flow
* we invalidate the MMU cache outside of this unmap function and the VA
* free list will be freed anyway.
*/
if (!ctx_free) {
int tmp_rc;
tmp_rc = add_va_block(hdev, va_range, vaddr,
vaddr + phys_pg_pack->total_size - 1);
if (tmp_rc) {
dev_warn(hdev->dev,
"add va block failed for vaddr: 0x%llx\n",
vaddr);
if (!rc)
rc = tmp_rc;
}
}
atomic_dec(&phys_pg_pack->mapping_cnt);
kfree(hnode);
if (is_userptr) {
free_phys_pg_pack(hdev, phys_pg_pack);
dma_unmap_host_va(hdev, userptr);
}
return rc;
mapping_cnt_err:
if (is_userptr)
free_phys_pg_pack(hdev, phys_pg_pack);
vm_type_err:
mutex_lock(&ctx->mem_hash_lock);
hash_add(ctx->mem_hash, &hnode->node, vaddr);
mutex_unlock(&ctx->mem_hash_lock);
return rc;
}
static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
{
u32 block_id;
int rc;
*handle = 0;
if (size)
*size = 0;
rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
if (rc)
return rc;
*handle = block_id | HL_MMAP_TYPE_BLOCK;
*handle <<= PAGE_SHIFT;
return 0;
}
static void hw_block_vm_close(struct vm_area_struct *vma)
{
struct hl_vm_hw_block_list_node *lnode =
(struct hl_vm_hw_block_list_node *) vma->vm_private_data;
struct hl_ctx *ctx = lnode->ctx;
long new_mmap_size;
new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
if (new_mmap_size > 0) {
lnode->mapped_size = new_mmap_size;
return;
}
mutex_lock(&ctx->hw_block_list_lock);
list_del(&lnode->node);
mutex_unlock(&ctx->hw_block_list_lock);
hl_ctx_put(ctx);
kfree(lnode);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct hw_block_vm_ops = {
.close = hw_block_vm_close
};
/**
* hl_hw_block_mmap() - mmap a hw block to user.
* @hpriv: pointer to the private data of the fd
* @vma: pointer to vm_area_struct of the process
*
* Driver increments context reference for every HW block mapped in order
* to prevent user from closing FD without unmapping first
*/
int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
struct hl_vm_hw_block_list_node *lnode;
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
u32 block_id, block_size;
int rc;
/* We use the page offset to hold the block id and thus we need to clear
* it before doing the mmap itself
*/
block_id = vma->vm_pgoff;
vma->vm_pgoff = 0;
/* Driver only allows mapping of a complete HW block */
block_size = vma->vm_end - vma->vm_start;
if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
dev_err(hdev->dev,
"user pointer is invalid - 0x%lx\n",
vma->vm_start);
return -EINVAL;
}
lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
if (!lnode)
return -ENOMEM;
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
if (rc) {
kfree(lnode);
return rc;
}
hl_ctx_get(ctx);
lnode->ctx = ctx;
lnode->vaddr = vma->vm_start;
lnode->block_size = block_size;
lnode->mapped_size = lnode->block_size;
lnode->id = block_id;
vma->vm_private_data = lnode;
vma->vm_ops = &hw_block_vm_ops;
mutex_lock(&ctx->hw_block_list_lock);
list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
mutex_unlock(&ctx->hw_block_list_lock);
vma->vm_pgoff = block_id;
return 0;
}
static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
struct device *dev, enum dma_data_direction dir)
{
dma_addr_t addr;
int rc;
addr = dma_map_resource(dev, bar_address, chunk_size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
rc = dma_mapping_error(dev, addr);
if (rc)
return rc;
sg_set_page(sg, NULL, chunk_size, 0);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = chunk_size;
return 0;
}
static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
u64 page_size, u64 exported_size,
struct device *dev, enum dma_data_direction dir)
{
u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages;
struct asic_fixed_properties *prop;
int rc, i, j, nents, cur_page;
struct scatterlist *sg;
struct sg_table *sgt;
prop = &hdev->asic_prop;
dma_max_seg_size = dma_get_max_seg_size(dev);
/* We would like to align the max segment size to PAGE_SIZE, so the
* SGL will contain aligned addresses that can be easily mapped to
* an MMU
*/
dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
if (dma_max_seg_size < PAGE_SIZE) {
dev_err_ratelimited(hdev->dev,
"dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
dma_max_seg_size);
return ERR_PTR(-EINVAL);
}
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
/* remove export size restrictions in case not explicitly defined */
cur_size_to_export = exported_size ? exported_size : (npages * page_size);
/* If the size of each page is larger than the dma max segment size,
* then we can't combine pages and the number of entries in the SGL
* will just be the
* <number of pages> * <chunks of max segment size in each page>
*/
if (page_size > dma_max_seg_size) {
/* we should limit number of pages according to the exported size */
cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size);
nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
} else {
cur_npages = npages;
/* Get number of non-contiguous chunks */
for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
if (pages[i - 1] + page_size != pages[i] ||
chunk_size + page_size > dma_max_seg_size) {
nents++;
chunk_size = page_size;
continue;
}
chunk_size += page_size;
}
}
rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
if (rc)
goto error_free;
cur_page = 0;
if (page_size > dma_max_seg_size) {
u64 size_left, cur_device_address = 0;
size_left = page_size;
/* Need to split each page into the number of chunks of
* dma_max_seg_size
*/
for_each_sgtable_dma_sg(sgt, sg, i) {
if (size_left == page_size)
cur_device_address =
pages[cur_page] - prop->dram_base_address;
else
cur_device_address += dma_max_seg_size;
/* make sure not to export over exported size */
chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export);
bar_address = hdev->dram_pci_bar_start + cur_device_address;
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
if (rc)
goto error_unmap;
cur_size_to_export -= chunk_size;
if (size_left > dma_max_seg_size) {
size_left -= dma_max_seg_size;
} else {
cur_page++;
size_left = page_size;
}
}
} else {
/* Merge pages and put them into the scatterlist */
for_each_sgtable_dma_sg(sgt, sg, i) {
chunk_size = page_size;
for (j = cur_page + 1 ; j < cur_npages ; j++) {
if (pages[j - 1] + page_size != pages[j] ||
chunk_size + page_size > dma_max_seg_size)
break;
chunk_size += page_size;
}
bar_address = hdev->dram_pci_bar_start +
(pages[cur_page] - prop->dram_base_address);
/* make sure not to export over exported size */
chunk_size = min(chunk_size, cur_size_to_export);
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
if (rc)
goto error_unmap;
cur_size_to_export -= chunk_size;
cur_page = j;
}
}
/* Because we are not going to include a CPU list we want to have some
* chance that other users will detect this by setting the orig_nents
* to 0 and using only nents (length of DMA list) when going over the
* sgl
*/
sgt->orig_nents = 0;
return sgt;
error_unmap:
for_each_sgtable_dma_sg(sgt, sg, i) {
if (!sg_dma_len(sg))
continue;
dma_unmap_resource(dev, sg_dma_address(sg),
sg_dma_len(sg), dir,
DMA_ATTR_SKIP_CPU_SYNC);
}
sg_free_table(sgt);
error_free:
kfree(sgt);
return ERR_PTR(rc);
}
static int hl_dmabuf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct hl_dmabuf_priv *hl_dmabuf;
struct hl_device *hdev;
int rc;
hl_dmabuf = dmabuf->priv;
hdev = hl_dmabuf->ctx->hdev;
rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
if (rc < 0)
attachment->peer2peer = false;
return 0;
}
static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct dma_buf *dma_buf = attachment->dmabuf;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_dmabuf_priv *hl_dmabuf;
struct hl_device *hdev;
struct sg_table *sgt;
hl_dmabuf = dma_buf->priv;
hdev = hl_dmabuf->ctx->hdev;
phys_pg_pack = hl_dmabuf->phys_pg_pack;
if (!attachment->peer2peer) {
dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
return ERR_PTR(-EPERM);
}
if (phys_pg_pack)
sgt = alloc_sgt_from_device_pages(hdev,
phys_pg_pack->pages,
phys_pg_pack->npages,
phys_pg_pack->page_size,
phys_pg_pack->exported_size,
attachment->dev,
dir);
else
sgt = alloc_sgt_from_device_pages(hdev,
&hl_dmabuf->device_address,
1,
hl_dmabuf->dmabuf->size,
0,
attachment->dev,
dir);
if (IS_ERR(sgt))
dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
return sgt;
}
static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
/* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
* only in the 'device' domain (after all, it maps a PCI bar address which points to the
* device memory).
*
* Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
* a sync of the memory to the CPU's cache, as it never resided inside that cache.
*/
for_each_sgtable_dma_sg(sgt, sg, i)
dma_unmap_resource(attachment->dev, sg_dma_address(sg),
sg_dma_len(sg), dir,
DMA_ATTR_SKIP_CPU_SYNC);
/* Need to restore orig_nents because sg_free_table use that field */
sgt->orig_nents = sgt->nents;
sg_free_table(sgt);
kfree(sgt);
}
static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_hash_node *hnode;
/* get the memory handle */
mutex_lock(&ctx->mem_hash_lock);
hnode = get_vm_hash_node_locked(ctx, addr);
if (!hnode) {
mutex_unlock(&ctx->mem_hash_lock);
dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
return ERR_PTR(-EINVAL);
}
if (upper_32_bits(hnode->handle)) {
mutex_unlock(&ctx->mem_hash_lock);
dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
hnode->handle, addr);
return ERR_PTR(-EINVAL);
}
/*
* node found, increase export count so this memory cannot be unmapped
* and the hash node cannot be deleted.
*/
hnode->export_cnt++;
mutex_unlock(&ctx->mem_hash_lock);
return hnode;
}
static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
{
mutex_lock(&ctx->mem_hash_lock);
hnode->export_cnt--;
mutex_unlock(&ctx->mem_hash_lock);
}
static void hl_release_dmabuf(struct dma_buf *dmabuf)
{
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
if (!hl_dmabuf)
return;
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
memhash_node_export_put(ctx, hl_dmabuf->memhash_hnode);
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
/* Paired with get_file() in export_dmabuf() */
fput(ctx->hpriv->filp);
kfree(hl_dmabuf);
}
static const struct dma_buf_ops habanalabs_dmabuf_ops = {
.attach = hl_dmabuf_attach,
.map_dma_buf = hl_map_dmabuf,
.unmap_dma_buf = hl_unmap_dmabuf,
.release = hl_release_dmabuf,
};
static int export_dmabuf(struct hl_ctx *ctx,
struct hl_dmabuf_priv *hl_dmabuf,
u64 total_size, int flags, int *dmabuf_fd)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
int rc, fd;
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
exp_info.flags = flags;
exp_info.priv = hl_dmabuf;
hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(hl_dmabuf->dmabuf)) {
dev_err(hdev->dev, "failed to export dma-buf\n");
return PTR_ERR(hl_dmabuf->dmabuf);
}
fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
if (fd < 0) {
dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
rc = fd;
goto err_dma_buf_put;
}
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
/* Get compute device file to enforce release order, such that all exported dma-buf will be
* released first and only then the compute device.
* Paired with fput() in hl_release_dmabuf().
*/
get_file(ctx->hpriv->filp);
*dmabuf_fd = fd;
return 0;
err_dma_buf_put:
hl_dmabuf->dmabuf->priv = NULL;
dma_buf_put(hl_dmabuf->dmabuf);
return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
{
if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
dev_dbg(hdev->dev,
"exported device memory address 0x%llx should be aligned to 0x%lx\n",
device_addr, PAGE_SIZE);
return -EINVAL;
}
if (size < PAGE_SIZE) {
dev_dbg(hdev->dev,
"exported device memory size %llu should be equal to or greater than %lu\n",
size, PAGE_SIZE);
return -EINVAL;
}
return 0;
}
static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 bar_address;
int rc;
rc = validate_export_params_common(hdev, device_addr, size);
if (rc)
return rc;
if (device_addr < prop->dram_user_base_address ||
(device_addr + size) > prop->dram_end_address ||
(device_addr + size) < device_addr) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
device_addr, size);
return -EINVAL;
}
bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address);
if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
(bar_address + size) < bar_address) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
device_addr, size);
return -EINVAL;
}
return 0;
}
static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 bar_address;
int i, rc;
rc = validate_export_params_common(hdev, device_addr, size);
if (rc)
return rc;
if ((offset + size) > phys_pg_pack->total_size) {
dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
offset, size, phys_pg_pack->total_size);
return -EINVAL;
}
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
bar_address = hdev->dram_pci_bar_start +
(phys_pg_pack->pages[i] - prop->dram_base_address);
if ((bar_address + phys_pg_pack->page_size) >
(hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
(bar_address + phys_pg_pack->page_size) < bar_address) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
phys_pg_pack->pages[i],
phys_pg_pack->page_size);
return -EINVAL;
}
}
return 0;
}
static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
struct hl_vm_hash_node *hnode)
{
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_vm *vm = &hdev->vm;
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) hnode->handle);
if (!phys_pg_pack) {
spin_unlock(&vm->idr_lock);
dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle);
return ERR_PTR(-EINVAL);
}
spin_unlock(&vm->idr_lock);
if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle);
return ERR_PTR(-EINVAL);
}
return phys_pg_pack;
}
/**
* export_dmabuf_from_addr() - export a dma-buf object for the given memory
* address and size.
* @ctx: pointer to the context structure.
* @addr: device address.
* @size: size of device memory to export.
* @offset: the offset into the buffer from which to start exporting
* @flags: DMA-BUF file/FD flags.
* @dmabuf_fd: pointer to result FD that represents the dma-buf object.
*
* Create and export a dma-buf object for an existing memory allocation inside
* the device memory, and return a FD which is associated with the dma-buf
* object.
*
* Return: 0 on success, non-zero for failure.
*/
static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 offset,
int flags, int *dmabuf_fd)
{
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct asic_fixed_properties *prop;
struct hl_dmabuf_priv *hl_dmabuf;
struct hl_device *hdev;
u64 export_addr;
int rc;
hdev = ctx->hdev;
prop = &hdev->asic_prop;
/* offset must be 0 in devices without virtual memory support */
if (!prop->dram_supports_virtual_memory && offset) {
dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n");
return -EINVAL;
}
export_addr = addr + offset;
hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
if (!hl_dmabuf)
return -ENOMEM;
if (prop->dram_supports_virtual_memory) {
hnode = memhash_node_export_get(ctx, addr);
if (IS_ERR(hnode)) {
rc = PTR_ERR(hnode);
goto err_free_dmabuf_wrapper;
}
phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode);
if (IS_ERR(phys_pg_pack)) {
rc = PTR_ERR(phys_pg_pack);
goto dec_memhash_export_cnt;
}
rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
if (rc)
goto dec_memhash_export_cnt;
phys_pg_pack->exported_size = size;
hl_dmabuf->phys_pg_pack = phys_pg_pack;
hl_dmabuf->memhash_hnode = hnode;
} else {
rc = validate_export_params_no_mmu(hdev, export_addr, size);
if (rc)
goto err_free_dmabuf_wrapper;
}
hl_dmabuf->device_address = export_addr;
rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd);
if (rc)
goto dec_memhash_export_cnt;
return 0;
dec_memhash_export_cnt:
if (prop->dram_supports_virtual_memory)
memhash_node_export_put(ctx, hnode);
err_free_dmabuf_wrapper:
kfree(hl_dmabuf);
return rc;
}
static void ts_buff_release(struct hl_mmap_mem_buf *buf)
{
struct hl_ts_buff *ts_buff = buf->private;
vfree(ts_buff->kernel_buff_address);
vfree(ts_buff->user_buff_address);
kfree(ts_buff);
}
static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
{
struct hl_ts_buff *ts_buff = buf->private;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
{
struct hl_ts_buff *ts_buff = NULL;
u32 num_elements;
size_t size;
void *p;
num_elements = *(u32 *)args;
ts_buff = kzalloc(sizeof(*ts_buff), gfp);
if (!ts_buff)
return -ENOMEM;
/* Allocate the user buffer */
size = num_elements * sizeof(u64);
p = vmalloc_user(size);
if (!p)
goto free_mem;
ts_buff->user_buff_address = p;
buf->mappable_size = size;
/* Allocate the internal kernel buffer */
size = num_elements * sizeof(struct hl_user_pending_interrupt);
p = vzalloc(size);
if (!p)
goto free_user_buff;
ts_buff->kernel_buff_address = p;
ts_buff->kernel_buff_size = size;
buf->private = ts_buff;
return 0;
free_user_buff:
vfree(ts_buff->user_buff_address);
free_mem:
kfree(ts_buff);
return -ENOMEM;
}
static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
.topic = "TS",
.mem_id = HL_MMAP_TYPE_TS_BUFF,
.mmap = hl_ts_mmap,
.alloc = hl_ts_alloc_buf,
.release = ts_buff_release,
};
/**
* allocate_timestamps_buffers() - allocate timestamps buffers
* This function will allocate ts buffer that will later on be mapped to the user
* in order to be able to read the timestamp.
* in addition it'll allocate an extra buffer for registration management.
* since we cannot fail during registration for out-of-memory situation, so
* we'll prepare a pool which will be used as user interrupt nodes and instead
* of dynamically allocating nodes while registration we'll pick the node from
* this pool. in addition it'll add node to the mapping hash which will be used
* to map user ts buffer to the internal kernel ts buffer.
* @hpriv: pointer to the private data of the fd
* @args: ioctl input
* @handle: user timestamp buffer handle as an output
*/
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
{
struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
struct hl_mmap_mem_buf *buf;
if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
args->num_of_elements, TS_MAX_ELEMENTS_NUM);
return -EINVAL;
}
buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
if (!buf)
return -ENOMEM;
*handle = buf->handle;
return 0;
}
int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
{
enum hl_device_status status;
union hl_mem_args *args = data;
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
u64 block_handle, device_addr = 0;
u32 handle = 0, block_size;
int rc, dmabuf_fd = -EBADF;
if (!hl_device_operational(hdev, &status)) {
dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute MEMORY IOCTL\n",
hdev->status[status]);
return -EBUSY;
}
switch (args->in.op) {
case HL_MEM_OP_ALLOC:
if (args->in.alloc.mem_size == 0) {
dev_err(hdev->dev,
"alloc size must be larger than 0\n");
rc = -EINVAL;
goto out;
}
/* If DRAM does not support virtual memory the driver won't
* handle the allocation/freeing of that memory. However, for
* system administration/monitoring purposes, the driver will
* keep track of the amount of DRAM memory that is allocated
* and freed by the user. Because this code totally relies on
* the user's input, the driver can't ensure the validity
* of this accounting.
*/
if (!hdev->asic_prop.dram_supports_virtual_memory) {
atomic64_add(args->in.alloc.mem_size,
&ctx->dram_phys_mem);
atomic64_add(args->in.alloc.mem_size,
&hdev->dram_used_mem);
dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
rc = 0;
memset(args, 0, sizeof(*args));
args->out.handle = 0;
goto out;
}
rc = alloc_device_memory(ctx, &args->in, &handle);
memset(args, 0, sizeof(*args));
args->out.handle = (__u64) handle;
break;
case HL_MEM_OP_FREE:
/* If DRAM does not support virtual memory the driver won't
* handle the allocation/freeing of that memory. However, for
* system administration/monitoring purposes, the driver will
* keep track of the amount of DRAM memory that is allocated
* and freed by the user. Because this code totally relies on
* the user's input, the driver can't ensure the validity
* of this accounting.
*/
if (!hdev->asic_prop.dram_supports_virtual_memory) {
atomic64_sub(args->in.alloc.mem_size,
&ctx->dram_phys_mem);
atomic64_sub(args->in.alloc.mem_size,
&hdev->dram_used_mem);
dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
rc = 0;
goto out;
}
rc = free_device_memory(ctx, &args->in);
break;
case HL_MEM_OP_MAP:
rc = map_device_va(ctx, &args->in, &device_addr);
memset(args, 0, sizeof(*args));
args->out.device_virt_addr = device_addr;
break;
case HL_MEM_OP_UNMAP:
rc = unmap_device_va(ctx, &args->in, false);
break;
case HL_MEM_OP_MAP_BLOCK:
rc = map_block(hdev, args->in.map_block.block_addr,
&block_handle, &block_size);
args->out.block_handle = block_handle;
args->out.block_size = block_size;
break;
case HL_MEM_OP_EXPORT_DMABUF_FD:
rc = export_dmabuf_from_addr(ctx,
args->in.export_dmabuf_fd.addr,
args->in.export_dmabuf_fd.mem_size,
args->in.export_dmabuf_fd.offset,
args->in.flags,
&dmabuf_fd);
memset(args, 0, sizeof(*args));
args->out.fd = dmabuf_fd;
break;
case HL_MEM_OP_TS_ALLOC:
rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
break;
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -EINVAL;
break;
}
out:
return rc;
}
static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
u32 npages, u64 start, u32 offset,
struct hl_userptr *userptr)
{
int rc;
if (!access_ok((void __user *) (uintptr_t) addr, size)) {
dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
return -EFAULT;
}
userptr->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!userptr->pages)
return -ENOMEM;
rc = pin_user_pages_fast(start, npages, FOLL_WRITE | FOLL_LONGTERM,
userptr->pages);
if (rc != npages) {
dev_err(hdev->dev,
"Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
rc, addr, size, npages);
if (rc < 0)
goto destroy_pages;
npages = rc;
rc = -EFAULT;
goto put_pages;
}
userptr->npages = npages;
rc = sg_alloc_table_from_pages(userptr->sgt,
userptr->pages,
npages, offset, size, GFP_KERNEL);
if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n");
goto put_pages;
}
return 0;
put_pages:
unpin_user_pages(userptr->pages, npages);
destroy_pages:
kvfree(userptr->pages);
return rc;
}
/**
* hl_pin_host_memory() - pins a chunk of host memory.
* @hdev: pointer to the habanalabs device structure.
* @addr: the host virtual address of the memory area.
* @size: the size of the memory area.
* @userptr: pointer to hl_userptr structure.
*
* This function does the following:
* - Pins the physical pages.
* - Create an SG list from those pages.
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr)
{
u64 start, end;
u32 npages, offset;
int rc;
if (!size) {
dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
return -EINVAL;
}
/*
* If the combination of the address and size requested for this memory
* region causes an integer overflow, return error.
*/
if (((addr + size) < addr) ||
PAGE_ALIGN(addr + size) < (addr + size)) {
dev_err(hdev->dev,
"user pointer 0x%llx + %llu causes integer overflow\n",
addr, size);
return -EINVAL;
}
userptr->pid = current->pid;
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
if (!userptr->sgt)
return -ENOMEM;
start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size);
npages = (end - start) >> PAGE_SHIFT;
userptr->size = size;
userptr->addr = addr;
userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node);
rc = get_user_memory(hdev, addr, size, npages, start, offset,
userptr);
if (rc) {
dev_err(hdev->dev,
"failed to get user memory for address 0x%llx\n",
addr);
goto free_sgt;
}
hl_debugfs_add_userptr(hdev, userptr);
return 0;
free_sgt:
kfree(userptr->sgt);
return rc;
}
/*
* hl_unpin_host_memory - unpins a chunk of host memory.
* @hdev: pointer to the habanalabs device structure
* @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Unpins the physical pages related to the host memory
* - Free the SG list
*/
void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
kvfree(userptr->pages);
list_del(&userptr->job_node);
sg_free_table(userptr->sgt);
kfree(userptr->sgt);
}
/**
* hl_userptr_delete_list() - clear userptr list.
* @hdev: pointer to the habanalabs device structure.
* @userptr_list: pointer to the list to clear.
*
* This function does the following:
* - Iterates over the list and unpins the host memory and frees the userptr
* structure.
*/
void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list)
{
struct hl_userptr *userptr, *tmp;
list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
}
INIT_LIST_HEAD(userptr_list);
}
/**
* hl_userptr_is_pinned() - returns whether the given userptr is pinned.
* @hdev: pointer to the habanalabs device structure.
* @addr: user address to check.
* @size: user block size to check.
* @userptr_list: pointer to the list to clear.
* @userptr: pointer to userptr to check.
*
* This function does the following:
* - Iterates over the list and checks if the given userptr is in it, means is
* pinned. If so, returns true, otherwise returns false.
*/
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
u32 size, struct list_head *userptr_list,
struct hl_userptr **userptr)
{
list_for_each_entry((*userptr), userptr_list, job_node) {
if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
return true;
}
return false;
}
/**
* va_range_init() - initialize virtual addresses range.
* @hdev: pointer to the habanalabs device structure.
* @va_ranges: pointer to va_ranges array.
* @range_type: virtual address range type.
* @start: range start address, inclusive.
* @end: range end address, inclusive.
* @page_size: page size for this va_range.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
* addresses.
*/
static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
enum hl_va_range_type range_type, u64 start,
u64 end, u32 page_size)
{
struct hl_va_range *va_range = va_ranges[range_type];
int rc;
INIT_LIST_HEAD(&va_range->list);
/*
* PAGE_SIZE alignment
* it is the caller's responsibility to align the addresses if the
* page size is not a power of 2
*/
if (is_power_of_2(page_size)) {
start = round_up(start, page_size);
/*
* The end of the range is inclusive, hence we need to align it
* to the end of the last full page in the range. For example if
* end = 0x3ff5 with page size 0x1000, we need to align it to
* 0x2fff. The remaining 0xff5 bytes do not form a full page.
*/
end = round_down(end + 1, page_size) - 1;
}
if (start >= end) {
dev_err(hdev->dev, "too small vm range for va list\n");
return -EFAULT;
}
rc = add_va_block(hdev, va_range, start, end);
if (rc) {
dev_err(hdev->dev, "Failed to init host va list\n");
return rc;
}
va_range->start_addr = start;
va_range->end_addr = end;
va_range->page_size = page_size;
return 0;
}
/**
* va_range_fini() - clear a virtual addresses range.
* @hdev: pointer to the habanalabs structure.
* @va_range: pointer to virtual addresses range.
*
* This function does the following:
* - Frees the virtual addresses block list and its lock.
*/
static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
{
mutex_lock(&va_range->lock);
clear_va_list_locked(hdev, &va_range->list);
mutex_unlock(&va_range->lock);
mutex_destroy(&va_range->lock);
kfree(va_range);
}
/**
* vm_ctx_init_with_ranges() - initialize virtual memory for context.
* @ctx: pointer to the habanalabs context structure.
* @host_range_start: host virtual addresses range start.
* @host_range_end: host virtual addresses range end.
* @host_page_size: host page size.
* @host_huge_range_start: host virtual addresses range start for memory
* allocated with huge pages.
* @host_huge_range_end: host virtual addresses range end for memory allocated
* with huge pages.
* @host_huge_page_size: host huge page size.
* @dram_range_start: dram virtual addresses range start.
* @dram_range_end: dram virtual addresses range end.
* @dram_page_size: dram page size.
*
* This function initializes the following:
* - MMU for context.
* - Virtual address to area descriptor hashtable.
* - Virtual block list of available virtual memory.
*/
static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
u64 host_range_start,
u64 host_range_end,
u32 host_page_size,
u64 host_huge_range_start,
u64 host_huge_range_end,
u32 host_huge_page_size,
u64 dram_range_start,
u64 dram_range_end,
u32 dram_page_size)
{
struct hl_device *hdev = ctx->hdev;
int i, rc;
for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
ctx->va_range[i] =
kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
if (!ctx->va_range[i]) {
rc = -ENOMEM;
goto free_va_range;
}
}
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
goto free_va_range;
}
mutex_init(&ctx->mem_hash_lock);
hash_init(ctx->mem_hash);
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
host_range_start, host_range_end, host_page_size);
if (rc) {
dev_err(hdev->dev, "failed to init host vm range\n");
goto mmu_ctx_fini;
}
if (hdev->pmmu_huge_range) {
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
rc = va_range_init(hdev,
ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
host_huge_range_start, host_huge_range_end,
host_huge_page_size);
if (rc) {
dev_err(hdev->dev,
"failed to init host huge vm range\n");
goto clear_host_va_range;
}
} else {
kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
ctx->va_range[HL_VA_RANGE_TYPE_HOST];
}
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
dram_range_start, dram_range_end, dram_page_size);
if (rc) {
dev_err(hdev->dev, "failed to init dram vm range\n");
goto clear_host_huge_va_range;
}
hl_debugfs_add_ctx_mem_hash(hdev, ctx);
return 0;
clear_host_huge_va_range:
mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
if (hdev->pmmu_huge_range) {
mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
clear_va_list_locked(hdev,
&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
}
clear_host_va_range:
if (hdev->pmmu_huge_range)
mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
mmu_ctx_fini:
mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
mutex_destroy(&ctx->mem_hash_lock);
hl_mmu_ctx_fini(ctx);
free_va_range:
for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
kfree(ctx->va_range[i]);
return rc;
}
int hl_vm_ctx_init(struct hl_ctx *ctx)
{
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
u64 host_range_start, host_range_end, host_huge_range_start,
host_huge_range_end, dram_range_start, dram_range_end;
u32 host_page_size, host_huge_page_size, dram_page_size;
atomic64_set(&ctx->dram_phys_mem, 0);
/*
* In case of DRAM mapping, the returned address is the physical
* address of the memory related to the given handle.
*/
if (ctx->hdev->mmu_disable)
return 0;
dram_range_start = prop->dmmu.start_addr;
dram_range_end = prop->dmmu.end_addr - 1;
dram_page_size = prop->dram_page_size ?
prop->dram_page_size : prop->dmmu.page_size;
host_range_start = prop->pmmu.start_addr;
host_range_end = prop->pmmu.end_addr - 1;
host_page_size = prop->pmmu.page_size;
host_huge_range_start = prop->pmmu_huge.start_addr;
host_huge_range_end = prop->pmmu_huge.end_addr - 1;
host_huge_page_size = prop->pmmu_huge.page_size;
return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
host_page_size, host_huge_range_start,
host_huge_range_end, host_huge_page_size,
dram_range_start, dram_range_end, dram_page_size);
}
/**
* hl_vm_ctx_fini() - virtual memory teardown of context.
* @ctx: pointer to the habanalabs context structure.
*
* This function perform teardown the following:
* - Virtual block list of available virtual memory.
* - Virtual address to area descriptor hashtable.
* - MMU for context.
*
* In addition this function does the following:
* - Unmaps the existing hashtable nodes if the hashtable is not empty. The
* hashtable should be empty as no valid mappings should exist at this
* point.
* - Frees any existing physical page list from the idr which relates to the
* current context asid.
* - This function checks the virtual block list for correctness. At this point
* the list should contain one element which describes the whole virtual
* memory range of the context. Otherwise, a warning is printed.
*/
void hl_vm_ctx_fini(struct hl_ctx *ctx)
{
struct hl_vm_phys_pg_pack *phys_pg_list, *tmp_phys_node;
struct hl_device *hdev = ctx->hdev;
struct hl_vm_hash_node *hnode;
struct hl_vm *vm = &hdev->vm;
struct hlist_node *tmp_node;
struct list_head free_list;
struct hl_mem_in args;
int i;
if (hdev->mmu_disable)
return;
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
/*
* Clearly something went wrong on hard reset so no point in printing
* another side effect error
*/
if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
dev_dbg(hdev->dev,
"user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
args.unmap.device_virt_addr = hnode->vaddr;
unmap_device_va(ctx, &args, true);
}
mutex_lock(&hdev->mmu_lock);
/* invalidate the cache once after the unmapping loop */
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
mutex_unlock(&hdev->mmu_lock);
INIT_LIST_HEAD(&free_list);
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev,
"page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
idr_remove(&vm->phys_pg_pack_handles, i);
list_add(&phys_pg_list->node, &free_list);
}
spin_unlock(&vm->idr_lock);
list_for_each_entry_safe(phys_pg_list, tmp_phys_node, &free_list, node)
free_phys_pg_pack(hdev, phys_pg_list);
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
if (hdev->pmmu_huge_range)
va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
mutex_destroy(&ctx->mem_hash_lock);
hl_mmu_ctx_fini(ctx);
/* In this case we need to clear the global accounting of DRAM usage
* because the user notifies us on allocations. If the user is no more,
* all DRAM is available
*/
if (ctx->asid != HL_KERNEL_ASID_ID &&
!hdev->asic_prop.dram_supports_virtual_memory)
atomic64_set(&hdev->dram_used_mem, 0);
}
/**
* hl_vm_init() - initialize virtual memory module.
* @hdev: pointer to the habanalabs device structure.
*
* This function initializes the following:
* - MMU module.
* - DRAM physical pages pool of 2MB.
* - Idr for device memory allocation handles.
*/
int hl_vm_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm *vm = &hdev->vm;
int rc;
if (is_power_of_2(prop->dram_page_size))
vm->dram_pg_pool =
gen_pool_create(__ffs(prop->dram_page_size), -1);
else
vm->dram_pg_pool =
gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
if (!vm->dram_pg_pool) {
dev_err(hdev->dev, "Failed to create dram page pool\n");
return -ENOMEM;
}
kref_init(&vm->dram_pg_pool_refcount);
rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
prop->dram_end_address - prop->dram_user_base_address,
-1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to dram page pool %d\n", rc);
goto pool_add_err;
}
spin_lock_init(&vm->idr_lock);
idr_init(&vm->phys_pg_pack_handles);
atomic64_set(&hdev->dram_used_mem, 0);
vm->init_done = true;
return 0;
pool_add_err:
gen_pool_destroy(vm->dram_pg_pool);
return rc;
}
/**
* hl_vm_fini() - virtual memory module teardown.
* @hdev: pointer to the habanalabs device structure.
*
* This function perform teardown to the following:
* - Idr for device memory allocation handles.
* - DRAM physical pages pool of 2MB.
* - MMU module.
*/
void hl_vm_fini(struct hl_device *hdev)
{
struct hl_vm *vm = &hdev->vm;
if (!vm->init_done)
return;
/*
* At this point all the contexts should be freed and hence no DRAM
* memory should be in use. Hence the DRAM pool should be freed here.
*/
if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
__func__);
vm->init_done = false;
}
/**
* hl_hw_block_mem_init() - HW block memory initialization.
* @ctx: pointer to the habanalabs context structure.
*
* This function initializes the HW block virtual mapped addresses list and
* it's lock.
*/
void hl_hw_block_mem_init(struct hl_ctx *ctx)
{
mutex_init(&ctx->hw_block_list_lock);
INIT_LIST_HEAD(&ctx->hw_block_mem_list);
}
/**
* hl_hw_block_mem_fini() - HW block memory teardown.
* @ctx: pointer to the habanalabs context structure.
*
* This function clears the HW block virtual mapped addresses list and destroys
* it's lock.
*/
void hl_hw_block_mem_fini(struct hl_ctx *ctx)
{
struct hl_vm_hw_block_list_node *lnode, *tmp;
if (!list_empty(&ctx->hw_block_mem_list))
dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
list_del(&lnode->node);
kfree(lnode);
}
mutex_destroy(&ctx->hw_block_list_lock);
}
| linux-master | drivers/accel/habanalabs/common/memory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/pci.h>
static ssize_t clk_max_freq_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
if (value < 0)
return value;
hdev->asic_prop.max_freq_value = value;
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
static ssize_t clk_max_freq_mhz_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
int rc;
u64 value;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto fail;
}
rc = kstrtoull(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto fail;
}
hdev->asic_prop.max_freq_value = value * 1000 * 1000;
hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index, hdev->asic_prop.max_freq_value);
fail:
return count;
}
static ssize_t clk_cur_freq_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
if (value < 0)
return value;
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
static DEVICE_ATTR_RW(clk_max_freq_mhz);
static DEVICE_ATTR_RO(clk_cur_freq_mhz);
static struct attribute *hl_dev_clk_attrs[] = {
&dev_attr_clk_max_freq_mhz.attr,
&dev_attr_clk_cur_freq_mhz.attr,
NULL,
};
static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct cpucp_info *cpucp_info;
cpucp_info = &hdev->asic_prop.cpucp_info;
if (cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x\n", le32_to_cpu(cpucp_info->infineon_version),
le32_to_cpu(cpucp_info->infineon_second_stage_version));
else
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
}
static DEVICE_ATTR_RO(vrm_ver);
static struct attribute *hl_dev_vrm_attrs[] = {
&dev_attr_vrm_ver.attr,
NULL,
};
static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hdev->asic_prop.uboot_ver);
}
static ssize_t armcp_kernel_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.kernel_version);
}
static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.cpucp_version);
}
static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%08x\n",
le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
}
static ssize_t cpucp_kernel_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.kernel_version);
}
static ssize_t cpucp_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.cpucp_version);
}
static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.fuse_version);
}
static ssize_t thermal_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.thermal_version);
}
static ssize_t fw_os_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.fw_os_version);
}
static ssize_t preboot_btl_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", hdev->asic_prop.preboot_ver);
}
static ssize_t soft_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
int rc;
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto out;
}
if (!hdev->asic_prop.allow_inference_soft_reset) {
dev_err(hdev->dev, "Device does not support inference soft-reset\n");
goto out;
}
dev_warn(hdev->dev, "Inference Soft-Reset requested through sysfs\n");
hl_device_reset(hdev, 0);
out:
return count;
}
static ssize_t hard_reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long value;
int rc;
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto out;
}
dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
hl_device_reset(hdev, HL_DRV_RESET_HARD);
out:
return count;
}
static ssize_t device_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
char *str;
switch (hdev->asic_type) {
case ASIC_GOYA:
str = "GOYA";
break;
case ASIC_GAUDI:
str = "GAUDI";
break;
case ASIC_GAUDI_SEC:
str = "GAUDI SEC";
break;
case ASIC_GAUDI2:
str = "GAUDI2";
break;
case ASIC_GAUDI2B:
str = "GAUDI2B";
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
return -EINVAL;
}
return sprintf(buf, "%s\n", str);
}
static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%04x:%02x:%02x.%x\n",
pci_domain_nr(hdev->pdev->bus),
hdev->pdev->bus->number,
PCI_SLOT(hdev->pdev->devfn),
PCI_FUNC(hdev->pdev->devfn));
}
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
char str[HL_STR_MAX];
strscpy(str, hdev->status[hl_device_status(hdev)], HL_STR_MAX);
/* use uppercase for backward compatibility */
str[0] = 'A' + (str[0] - 'a');
return sprintf(buf, "%s\n", str);
}
static ssize_t soft_reset_cnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->reset_info.compute_reset_cnt);
}
static ssize_t hard_reset_cnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->reset_info.hard_reset_cnt);
}
static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
long val;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
val = hl_fw_get_max_power(hdev);
if (val < 0)
return val;
return sprintf(buf, "%lu\n", val);
}
static ssize_t max_power_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
unsigned long value;
int rc;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
goto out;
}
rc = kstrtoul(buf, 0, &value);
if (rc) {
count = -EINVAL;
goto out;
}
hdev->max_power = value;
hl_fw_set_max_power(hdev);
out:
return count;
}
static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t offset,
size_t max_size)
{
struct device *dev = kobj_to_dev(kobj);
struct hl_device *hdev = dev_get_drvdata(dev);
char *data;
int rc;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
if (!max_size)
return -EINVAL;
data = kzalloc(max_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
rc = hdev->asic_funcs->get_eeprom_data(hdev, data, max_size);
if (rc)
goto out;
memcpy(buf, data, max_size);
out:
kfree(data);
return max_size;
}
static ssize_t security_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->asic_prop.fw_security_enabled);
}
static DEVICE_ATTR_RO(armcp_kernel_ver);
static DEVICE_ATTR_RO(armcp_ver);
static DEVICE_ATTR_RO(cpld_ver);
static DEVICE_ATTR_RO(cpucp_kernel_ver);
static DEVICE_ATTR_RO(cpucp_ver);
static DEVICE_ATTR_RO(device_type);
static DEVICE_ATTR_RO(fuse_ver);
static DEVICE_ATTR_WO(hard_reset);
static DEVICE_ATTR_RO(hard_reset_cnt);
static DEVICE_ATTR_RW(max_power);
static DEVICE_ATTR_RO(pci_addr);
static DEVICE_ATTR_RO(preboot_btl_ver);
static DEVICE_ATTR_WO(soft_reset);
static DEVICE_ATTR_RO(soft_reset_cnt);
static DEVICE_ATTR_RO(status);
static DEVICE_ATTR_RO(thermal_ver);
static DEVICE_ATTR_RO(uboot_ver);
static DEVICE_ATTR_RO(fw_os_ver);
static DEVICE_ATTR_RO(security_enabled);
static struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
.size = PAGE_SIZE,
.read = eeprom_read_handler
};
static struct attribute *hl_dev_attrs[] = {
&dev_attr_armcp_kernel_ver.attr,
&dev_attr_armcp_ver.attr,
&dev_attr_cpld_ver.attr,
&dev_attr_cpucp_kernel_ver.attr,
&dev_attr_cpucp_ver.attr,
&dev_attr_device_type.attr,
&dev_attr_fuse_ver.attr,
&dev_attr_hard_reset.attr,
&dev_attr_hard_reset_cnt.attr,
&dev_attr_max_power.attr,
&dev_attr_pci_addr.attr,
&dev_attr_preboot_btl_ver.attr,
&dev_attr_status.attr,
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
&dev_attr_fw_os_ver.attr,
&dev_attr_security_enabled.attr,
NULL,
};
static struct bin_attribute *hl_dev_bin_attrs[] = {
&bin_attr_eeprom,
NULL
};
static struct attribute_group hl_dev_attr_group = {
.attrs = hl_dev_attrs,
.bin_attrs = hl_dev_bin_attrs,
};
static struct attribute_group hl_dev_clks_attr_group;
static struct attribute_group hl_dev_vrm_attr_group;
static const struct attribute_group *hl_dev_attr_groups[] = {
&hl_dev_attr_group,
&hl_dev_clks_attr_group,
&hl_dev_vrm_attr_group,
NULL,
};
static struct attribute *hl_dev_inference_attrs[] = {
&dev_attr_soft_reset.attr,
&dev_attr_soft_reset_cnt.attr,
NULL,
};
static struct attribute_group hl_dev_inference_attr_group = {
.attrs = hl_dev_inference_attrs,
};
static const struct attribute_group *hl_dev_inference_attr_groups[] = {
&hl_dev_inference_attr_group,
NULL,
};
void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp)
{
dev_clk_attr_grp->attrs = hl_dev_clk_attrs;
}
void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp)
{
dev_vrm_attr_grp->attrs = hl_dev_vrm_attrs;
}
int hl_sysfs_init(struct hl_device *hdev)
{
int rc;
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group, &hl_dev_vrm_attr_group);
rc = device_add_groups(hdev->dev, hl_dev_attr_groups);
if (rc) {
dev_err(hdev->dev,
"Failed to add groups to device, error %d\n", rc);
return rc;
}
if (!hdev->asic_prop.allow_inference_soft_reset)
return 0;
rc = device_add_groups(hdev->dev, hl_dev_inference_attr_groups);
if (rc) {
dev_err(hdev->dev,
"Failed to add groups to device, error %d\n", rc);
goto remove_groups;
}
return 0;
remove_groups:
device_remove_groups(hdev->dev, hl_dev_attr_groups);
return rc;
}
void hl_sysfs_fini(struct hl_device *hdev)
{
device_remove_groups(hdev->dev, hl_dev_attr_groups);
if (!hdev->asic_prop.allow_inference_soft_reset)
return;
device_remove_groups(hdev->dev, hl_dev_inference_attr_groups);
}
| linux-master | drivers/accel/habanalabs/common/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/slab.h>
/**
* struct hl_eqe_work - This structure is used to schedule work of EQ
* entry and cpucp_reset event
*
* @eq_work: workqueue object to run when EQ entry is received
* @hdev: pointer to device structure
* @eq_entry: copy of the EQ entry
*/
struct hl_eqe_work {
struct work_struct eq_work;
struct hl_device *hdev;
struct hl_eq_entry eq_entry;
};
/**
* hl_cq_inc_ptr - increment ci or pi of cq
*
* @ptr: the current ci or pi value of the completion queue
*
* Increment ptr by 1. If it reaches the number of completion queue
* entries, set it to 0
*/
inline u32 hl_cq_inc_ptr(u32 ptr)
{
ptr++;
if (unlikely(ptr == HL_CQ_LENGTH))
ptr = 0;
return ptr;
}
/**
* hl_eq_inc_ptr - increment ci of eq
*
* @ptr: the current ci value of the event queue
*
* Increment ptr by 1. If it reaches the number of event queue
* entries, set it to 0
*/
static inline u32 hl_eq_inc_ptr(u32 ptr)
{
ptr++;
if (unlikely(ptr == HL_EQ_LENGTH))
ptr = 0;
return ptr;
}
static void irq_handle_eqe(struct work_struct *work)
{
struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
eq_work);
struct hl_device *hdev = eqe_work->hdev;
hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
kfree(eqe_work);
}
/**
* job_finish - queue job finish work
*
* @hdev: pointer to device structure
* @cs_seq: command submission sequence
* @cq: completion queue
* @timestamp: interrupt timestamp
*
*/
static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp)
{
struct hl_hw_queue *queue;
struct hl_cs_job *job;
queue = &hdev->kernel_queues[cq->hw_queue_id];
job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
job->timestamp = timestamp;
queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
atomic_inc(&queue->ci);
}
/**
* cs_finish - queue all cs jobs finish work
*
* @hdev: pointer to device structure
* @cs_seq: command submission sequence
* @timestamp: interrupt timestamp
*
*/
static void cs_finish(struct hl_device *hdev, u16 cs_seq, ktime_t timestamp)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_hw_queue *queue;
struct hl_cs *cs;
struct hl_cs_job *job;
cs = hdev->shadow_cs_queue[cs_seq & (prop->max_pending_cs - 1)];
if (!cs) {
dev_warn(hdev->dev,
"No pointer to CS in shadow array at index %d\n",
cs_seq);
return;
}
list_for_each_entry(job, &cs->job_list, cs_node) {
queue = &hdev->kernel_queues[job->hw_queue_id];
atomic_inc(&queue->ci);
}
cs->completion_timestamp = timestamp;
queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
}
/**
* hl_irq_handler_cq - irq handler for completion queue
*
* @irq: irq number
* @arg: pointer to completion queue structure
*
*/
irqreturn_t hl_irq_handler_cq(int irq, void *arg)
{
struct hl_cq *cq = arg;
struct hl_device *hdev = cq->hdev;
bool shadow_index_valid, entry_ready;
u16 shadow_index;
struct hl_cq_entry *cq_entry, *cq_base;
ktime_t timestamp = ktime_get();
if (hdev->disabled) {
dev_dbg(hdev->dev,
"Device disabled but received IRQ %d for CQ %d\n",
irq, cq->hw_queue_id);
return IRQ_HANDLED;
}
cq_base = cq->kernel_address;
while (1) {
cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
entry_ready = !!FIELD_GET(CQ_ENTRY_READY_MASK,
le32_to_cpu(cq_entry->data));
if (!entry_ready)
break;
/* Make sure we read CQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
shadow_index_valid =
!!FIELD_GET(CQ_ENTRY_SHADOW_INDEX_VALID_MASK,
le32_to_cpu(cq_entry->data));
shadow_index = FIELD_GET(CQ_ENTRY_SHADOW_INDEX_MASK,
le32_to_cpu(cq_entry->data));
/*
* CQ interrupt handler has 2 modes of operation:
* 1. Interrupt per CS completion: (Single CQ for all queues)
* CQ entry represents a completed CS
*
* 2. Interrupt per CS job completion in queue: (CQ per queue)
* CQ entry represents a completed job in a certain queue
*/
if (shadow_index_valid && !hdev->disabled) {
if (hdev->asic_prop.completion_mode ==
HL_COMPLETION_MODE_CS)
cs_finish(hdev, shadow_index, timestamp);
else
job_finish(hdev, shadow_index, cq, timestamp);
}
/* Clear CQ entry ready bit */
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
~CQ_ENTRY_READY_MASK);
cq->ci = hl_cq_inc_ptr(cq->ci);
/* Increment free slots */
atomic_inc(&cq->free_slots_cnt);
}
return IRQ_HANDLED;
}
/*
* hl_ts_free_objects - handler of the free objects workqueue.
* This function should put refcount to objects that the registration node
* took refcount to them.
* @work: workqueue object pointer
*/
static void hl_ts_free_objects(struct work_struct *work)
{
struct timestamp_reg_work_obj *job =
container_of(work, struct timestamp_reg_work_obj, free_obj);
struct timestamp_reg_free_node *free_obj, *temp_free_obj;
struct list_head *free_list_head = job->free_obj_head;
struct hl_device *hdev = job->hdev;
list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n",
free_obj->buf,
free_obj->cq_cb);
hl_mmap_mem_buf_put(free_obj->buf);
hl_cb_put(free_obj->cq_cb);
kfree(free_obj);
}
kfree(free_list_head);
kfree(job);
}
/*
* This function called with spin_lock of wait_list_lock taken
* This function will set timestamp and delete the registration node from the
* wait_list_lock.
* and since we're protected with spin_lock here, so we cannot just put the refcount
* for the objects here, since the release function may be called and it's also a long
* logic (which might sleep also) that cannot be handled in irq context.
* so here we'll be filling a list with nodes of "put" jobs and then will send this
* list to a dedicated workqueue to do the actual put.
*/
static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend,
struct list_head **free_list, ktime_t now)
{
struct timestamp_reg_free_node *free_node;
u64 timestamp;
if (!(*free_list)) {
/* Alloc/Init the timestamp registration free objects list */
*free_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
if (!(*free_list))
return -ENOMEM;
INIT_LIST_HEAD(*free_list);
}
free_node = kmalloc(sizeof(*free_node), GFP_ATOMIC);
if (!free_node)
return -ENOMEM;
timestamp = ktime_to_ns(now);
*pend->ts_reg_info.timestamp_kernel_addr = timestamp;
dev_dbg(hdev->dev, "Timestamp is set to ts cb address (%p), ts: 0x%llx\n",
pend->ts_reg_info.timestamp_kernel_addr,
*(u64 *)pend->ts_reg_info.timestamp_kernel_addr);
list_del(&pend->wait_list_node);
/* Mark kernel CB node as free */
pend->ts_reg_info.in_use = 0;
/* Putting the refcount for ts_buff and cq_cb objects will be handled
* in workqueue context, just add job to free_list.
*/
free_node->buf = pend->ts_reg_info.buf;
free_node->cq_cb = pend->ts_reg_info.cq_cb;
list_add(&free_node->free_objects_node, *free_list);
return 0;
}
static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
{
struct hl_user_pending_interrupt *pend, *temp_pend;
struct list_head *ts_reg_free_list_head = NULL;
struct timestamp_reg_work_obj *job;
bool reg_node_handle_fail = false;
int rc;
/* For registration nodes:
* As part of handling the registration nodes, we should put refcount to
* some objects. the problem is that we cannot do that under spinlock
* or in irq handler context at all (since release functions are long and
* might sleep), so we will need to handle that part in workqueue context.
* To avoid handling kmalloc failure which compels us rolling back actions
* and move nodes hanged on the free list back to the interrupt wait list
* we always alloc the job of the WQ at the beginning.
*/
job = kmalloc(sizeof(*job), GFP_ATOMIC);
if (!job)
return;
spin_lock(&intr->wait_list_lock);
list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
!pend->cq_kernel_addr) {
if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
&ts_reg_free_list_head, intr->timestamp);
if (rc)
reg_node_handle_fail = true;
}
} else {
/* Handle wait target value node */
pend->fence.timestamp = intr->timestamp;
complete_all(&pend->fence.completion);
}
}
}
spin_unlock(&intr->wait_list_lock);
if (ts_reg_free_list_head) {
INIT_WORK(&job->free_obj, hl_ts_free_objects);
job->free_obj_head = ts_reg_free_list_head;
job->hdev = hdev;
queue_work(hdev->ts_free_obj_wq, &job->free_obj);
} else {
kfree(job);
}
}
static void handle_tpc_interrupt(struct hl_device *hdev)
{
u64 event_mask;
u32 flags;
event_mask = HL_NOTIFIER_EVENT_TPC_ASSERT |
HL_NOTIFIER_EVENT_USER_ENGINE_ERR |
HL_NOTIFIER_EVENT_DEVICE_RESET;
flags = HL_DRV_RESET_DELAY;
dev_err_ratelimited(hdev->dev, "Received TPC assert\n");
hl_device_cond_reset(hdev, flags, event_mask);
}
static void handle_unexpected_user_interrupt(struct hl_device *hdev)
{
dev_err_ratelimited(hdev->dev, "Received unexpected user error interrupt\n");
}
/**
* hl_irq_handler_user_interrupt - irq handler for user interrupts
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
user_int->timestamp = ktime_get();
return IRQ_WAKE_THREAD;
}
/**
* hl_irq_user_interrupt_thread_handler - irq thread handler for user interrupts.
* This function is invoked by threaded irq mechanism
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
{
struct hl_user_interrupt *user_int = arg;
struct hl_device *hdev = user_int->hdev;
switch (user_int->type) {
case HL_USR_INTERRUPT_CQ:
handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
/* Handle user cq interrupt registered on this specific irq */
handle_user_interrupt(hdev, user_int);
break;
case HL_USR_INTERRUPT_DECODER:
handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
/* Handle decoder interrupt registered on this specific irq */
handle_user_interrupt(hdev, user_int);
break;
case HL_USR_INTERRUPT_TPC:
handle_tpc_interrupt(hdev);
break;
case HL_USR_INTERRUPT_UNEXPECTED:
handle_unexpected_user_interrupt(hdev);
break;
default:
break;
}
return IRQ_HANDLED;
}
/**
* hl_irq_handler_eq - irq handler for event queue
*
* @irq: irq number
* @arg: pointer to event queue structure
*
*/
irqreturn_t hl_irq_handler_eq(int irq, void *arg)
{
struct hl_eq *eq = arg;
struct hl_device *hdev = eq->hdev;
struct hl_eq_entry *eq_entry;
struct hl_eq_entry *eq_base;
struct hl_eqe_work *handle_eqe_work;
bool entry_ready;
u32 cur_eqe, ctl;
u16 cur_eqe_index, event_type;
eq_base = eq->kernel_address;
while (1) {
cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
if (!entry_ready)
break;
cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
if ((hdev->event_queue.check_eqe_index) &&
(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
dev_err(hdev->dev,
"EQE %#x in queue is ready but index does not match %d!=%d",
cur_eqe,
((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
cur_eqe_index);
break;
}
eq->prev_eqe_index++;
eq_entry = &eq_base[eq->ci];
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
ctl = le32_to_cpu(eq_entry->hdr.ctl);
event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT);
dev_warn(hdev->dev,
"Device disabled but received an EQ event (%u)\n", event_type);
goto skip_irq;
}
handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
if (handle_eqe_work) {
INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
handle_eqe_work->hdev = hdev;
memcpy(&handle_eqe_work->eq_entry, eq_entry,
sizeof(*eq_entry));
queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
}
skip_irq:
/* Clear EQ entry ready bit */
eq_entry->hdr.ctl =
cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
~EQ_CTL_READY_MASK);
eq->ci = hl_eq_inc_ptr(eq->ci);
hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
}
return IRQ_HANDLED;
}
/**
* hl_irq_handler_dec_abnrm - Decoder error interrupt handler
* @irq: IRQ number
* @arg: pointer to decoder structure.
*/
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
{
struct hl_dec *dec = arg;
schedule_work(&dec->abnrm_intr_work);
return IRQ_HANDLED;
}
/**
* hl_cq_init - main initialization function for an cq object
*
* @hdev: pointer to device structure
* @q: pointer to cq structure
* @hw_queue_id: The H/W queue ID this completion queue belongs to
* HL_INVALID_QUEUE if cq is not attached to any specific queue
*
* Allocate dma-able memory for the completion queue and initialize fields
* Returns 0 on success
*/
int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
void *p;
p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
q->hdev = hdev;
q->kernel_address = p;
q->hw_queue_id = hw_queue_id;
q->ci = 0;
q->pi = 0;
atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
return 0;
}
/**
* hl_cq_fini - destroy completion queue
*
* @hdev: pointer to device structure
* @q: pointer to cq structure
*
* Free the completion queue memory
*/
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
{
hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
}
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
{
q->ci = 0;
q->pi = 0;
atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
/*
* It's not enough to just reset the PI/CI because the H/W may have
* written valid completion entries before it was halted and therefore
* we need to clean the actual queues so we won't process old entries
* when the device is operational again
*/
memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
}
/**
* hl_eq_init - main initialization function for an event queue object
*
* @hdev: pointer to device structure
* @q: pointer to eq structure
*
* Allocate dma-able memory for the event queue and initialize fields
* Returns 0 on success
*/
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
void *p;
p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
if (!p)
return -ENOMEM;
q->hdev = hdev;
q->kernel_address = p;
q->ci = 0;
q->prev_eqe_index = 0;
return 0;
}
/**
* hl_eq_fini - destroy event queue
*
* @hdev: pointer to device structure
* @q: pointer to eq structure
*
* Free the event queue memory
*/
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
flush_workqueue(hdev->eq_wq);
hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
}
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
{
q->ci = 0;
q->prev_eqe_index = 0;
/*
* It's not enough to just reset the PI/CI because the H/W may have
* written valid completion entries before it was halted and therefore
* we need to clean the actual queues so we won't process old entries
* when the device is operational again
*/
memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
}
| linux-master | drivers/accel/habanalabs/common/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
#define pr_fmt(fmt) "habanalabs: " fmt
#include "habanalabs.h"
#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#define CREATE_TRACE_POINTS
#include <trace/events/habanalabs.h>
#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators"
MODULE_AUTHOR(HL_DRIVER_AUTHOR);
MODULE_DESCRIPTION(HL_DRIVER_DESC);
MODULE_LICENSE("GPL v2");
static int hl_major;
static struct class *hl_class;
static DEFINE_IDR(hl_devs_idr);
static DEFINE_MUTEX(hl_devs_idr_lock);
#define HL_DEFAULT_TIMEOUT_LOCKED 30 /* 30 seconds */
#define GAUDI_DEFAULT_TIMEOUT_LOCKED 600 /* 10 minutes */
static int timeout_locked = HL_DEFAULT_TIMEOUT_LOCKED;
static int reset_on_lockup = 1;
static int memory_scrub;
static ulong boot_error_status_mask = ULONG_MAX;
module_param(timeout_locked, int, 0444);
MODULE_PARM_DESC(timeout_locked,
"Device lockup timeout in seconds (0 = disabled, default 30s)");
module_param(reset_on_lockup, int, 0444);
MODULE_PARM_DESC(reset_on_lockup,
"Do device reset on lockup (0 = no, 1 = yes, default yes)");
module_param(memory_scrub, int, 0444);
MODULE_PARM_DESC(memory_scrub,
"Scrub device memory in various states (0 = no, 1 = yes, default no)");
module_param(boot_error_status_mask, ulong, 0444);
MODULE_PARM_DESC(boot_error_status_mask,
"Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
#define PCI_IDS_GOYA 0x0001
#define PCI_IDS_GAUDI 0x1000
#define PCI_IDS_GAUDI_SEC 0x1010
#define PCI_IDS_GAUDI2 0x1020
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
/*
* get_asic_type - translate device id to asic type
*
* @hdev: pointer to habanalabs device structure.
*
* Translate device id and revision id to asic type.
* In case of unidentified device, return -1
*/
static enum hl_asic_type get_asic_type(struct hl_device *hdev)
{
struct pci_dev *pdev = hdev->pdev;
enum hl_asic_type asic_type = ASIC_INVALID;
switch (pdev->device) {
case PCI_IDS_GOYA:
asic_type = ASIC_GOYA;
break;
case PCI_IDS_GAUDI:
asic_type = ASIC_GAUDI;
break;
case PCI_IDS_GAUDI_SEC:
asic_type = ASIC_GAUDI_SEC;
break;
case PCI_IDS_GAUDI2:
switch (pdev->revision) {
case REV_ID_A:
asic_type = ASIC_GAUDI2;
break;
case REV_ID_B:
asic_type = ASIC_GAUDI2B;
break;
default:
break;
}
break;
default:
break;
}
return asic_type;
}
static bool is_asic_secured(enum hl_asic_type asic_type)
{
switch (asic_type) {
case ASIC_GAUDI_SEC:
return true;
default:
return false;
}
}
/*
* hl_device_open - open function for habanalabs device
*
* @inode: pointer to inode structure
* @filp: pointer to file structure
*
* Called when process opens an habanalabs device.
*/
int hl_device_open(struct inode *inode, struct file *filp)
{
enum hl_device_status status;
struct hl_device *hdev;
struct hl_fpriv *hpriv;
int rc;
mutex_lock(&hl_devs_idr_lock);
hdev = idr_find(&hl_devs_idr, iminor(inode));
mutex_unlock(&hl_devs_idr_lock);
if (!hdev) {
pr_err("Couldn't find device %d:%d\n",
imajor(inode), iminor(inode));
return -ENXIO;
}
hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
mutex_init(&hpriv->notifier_event.lock);
mutex_init(&hpriv->restore_phase_mutex);
mutex_init(&hpriv->ctx_lock);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_list_lock);
if (!hl_device_operational(hdev, &status)) {
dev_dbg_ratelimited(hdev->dev,
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
if (status == HL_DEVICE_STATUS_IN_RESET ||
status == HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE)
rc = -EAGAIN;
else
rc = -EPERM;
goto out_err;
}
if (hdev->is_in_dram_scrub) {
dev_dbg_ratelimited(hdev->dev,
"Can't open %s during dram scrub\n",
dev_name(hdev->dev));
rc = -EAGAIN;
goto out_err;
}
if (hdev->compute_ctx_in_release) {
dev_dbg_ratelimited(hdev->dev,
"Can't open %s because another user is still releasing it\n",
dev_name(hdev->dev));
rc = -EAGAIN;
goto out_err;
}
if (hdev->is_compute_ctx_active) {
dev_dbg_ratelimited(hdev->dev,
"Can't open %s because another user is working on it\n",
dev_name(hdev->dev));
rc = -EBUSY;
goto out_err;
}
rc = hl_ctx_create(hdev, hpriv);
if (rc) {
dev_err(hdev->dev, "Failed to create context %d\n", rc);
goto out_err;
}
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
hdev->asic_funcs->send_device_activity(hdev, true);
hl_debugfs_add_file(hpriv);
hl_enable_err_info_capture(&hdev->captured_err_info);
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
hdev->last_successful_open_ktime = ktime_get();
return 0;
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
return rc;
}
int hl_device_open_ctrl(struct inode *inode, struct file *filp)
{
struct hl_device *hdev;
struct hl_fpriv *hpriv;
int rc;
mutex_lock(&hl_devs_idr_lock);
hdev = idr_find(&hl_devs_idr, iminor(inode));
mutex_unlock(&hl_devs_idr_lock);
if (!hdev) {
pr_err("Couldn't find device %d:%d\n",
imajor(inode), iminor(inode));
return -ENXIO;
}
hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
/* Prevent other routines from reading partial hpriv data by
* initializing hpriv fields before inserting it to the list
*/
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
mutex_init(&hpriv->notifier_event.lock);
nonseekable_open(inode, filp);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_ctrl_list_lock);
if (!hl_ctrl_device_operational(hdev, NULL)) {
dev_dbg_ratelimited(hdev->dev_ctrl,
"Can't open %s because it is disabled\n",
dev_name(hdev->dev_ctrl));
rc = -EPERM;
goto out_err;
}
list_add(&hpriv->dev_node, &hdev->fpriv_ctrl_list);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
return 0;
out_err:
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
filp->private_data = NULL;
put_pid(hpriv->taskpid);
kfree(hpriv);
return rc;
}
static void set_driver_behavior_per_device(struct hl_device *hdev)
{
hdev->nic_ports_mask = 0;
hdev->fw_components = FW_TYPE_ALL_TYPES;
hdev->cpu_queues_enable = 1;
hdev->pldm = 0;
hdev->hard_reset_on_fw_events = 1;
hdev->bmc_enable = 1;
hdev->reset_on_preboot_fail = 1;
hdev->heartbeat = 1;
}
static void copy_kernel_module_params_to_device(struct hl_device *hdev)
{
hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
hdev->major = hl_major;
hdev->hclass = hl_class;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
}
static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
{
switch (hdev->asic_type) {
case ASIC_GAUDI:
case ASIC_GAUDI_SEC:
/* If user didn't request a different timeout than the default one, we have
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
MSEC_PER_SEC);
hdev->reset_upon_device_release = 0;
break;
case ASIC_GOYA:
hdev->reset_upon_device_release = 0;
break;
default:
hdev->reset_upon_device_release = 1;
break;
}
}
static int fixup_device_params(struct hl_device *hdev)
{
int tmp_timeout;
tmp_timeout = timeout_locked;
hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
if (tmp_timeout)
hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
hdev->stop_on_err = true;
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
/* Enable only after the initialization of the device */
hdev->disabled = true;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) &&
(hdev->fw_components & ~FW_TYPE_PREBOOT_CPU)) {
pr_err("Preboot must be set along with other components");
return -EINVAL;
}
/* If CPU queues not enabled, no way to do heartbeat */
if (!hdev->cpu_queues_enable)
hdev->heartbeat = 0;
fixup_device_params_per_asic(hdev, tmp_timeout);
return 0;
}
/**
* create_hdev - create habanalabs device instance
*
* @dev: will hold the pointer to the new habanalabs device structure
* @pdev: pointer to the pci device
*
* Allocate memory for habanalabs device and initialize basic fields
* Identify the ASIC type
* Allocate ID (minor) for the device (only for real devices)
*/
static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
{
int main_id, ctrl_id = 0, rc = 0;
struct hl_device *hdev;
*dev = NULL;
hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
if (!hdev)
return -ENOMEM;
/* Will be NULL in case of simulator device */
hdev->pdev = pdev;
/* Assign status description string */
strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], "operational", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], "in reset", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], "disabled", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
"in device creation", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE],
"in reset after device release", HL_STR_MAX);
/* First, we must find out which ASIC are we handling. This is needed
* to configure the behavior of the driver (kernel parameters)
*/
hdev->asic_type = get_asic_type(hdev);
if (hdev->asic_type == ASIC_INVALID) {
dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV;
goto free_hdev;
}
copy_kernel_module_params_to_device(hdev);
set_driver_behavior_per_device(hdev);
fixup_device_params(hdev);
mutex_lock(&hl_devs_idr_lock);
/* Always save 2 numbers, 1 for main device and 1 for control.
* They must be consecutive
*/
main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, GFP_KERNEL);
if (main_id >= 0)
ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
main_id + 2, GFP_KERNEL);
mutex_unlock(&hl_devs_idr_lock);
if ((main_id < 0) || (ctrl_id < 0)) {
if ((main_id == -ENOSPC) || (ctrl_id == -ENOSPC))
pr_err("too many devices in the system\n");
if (main_id >= 0) {
mutex_lock(&hl_devs_idr_lock);
idr_remove(&hl_devs_idr, main_id);
mutex_unlock(&hl_devs_idr_lock);
}
rc = -EBUSY;
goto free_hdev;
}
hdev->id = main_id;
hdev->id_control = ctrl_id;
*dev = hdev;
return 0;
free_hdev:
kfree(hdev);
return rc;
}
/*
* destroy_hdev - destroy habanalabs device instance
*
* @dev: pointer to the habanalabs device structure
*
*/
static void destroy_hdev(struct hl_device *hdev)
{
/* Remove device from the device list */
mutex_lock(&hl_devs_idr_lock);
idr_remove(&hl_devs_idr, hdev->id);
idr_remove(&hl_devs_idr, hdev->id_control);
mutex_unlock(&hl_devs_idr_lock);
kfree(hdev);
}
static int hl_pmops_suspend(struct device *dev)
{
struct hl_device *hdev = dev_get_drvdata(dev);
pr_debug("Going to suspend PCI device\n");
if (!hdev) {
pr_err("device pointer is NULL in suspend\n");
return 0;
}
return hl_device_suspend(hdev);
}
static int hl_pmops_resume(struct device *dev)
{
struct hl_device *hdev = dev_get_drvdata(dev);
pr_debug("Going to resume PCI device\n");
if (!hdev) {
pr_err("device pointer is NULL in resume\n");
return 0;
}
return hl_device_resume(hdev);
}
/**
* hl_pci_probe - probe PCI habanalabs devices
*
* @pdev: pointer to pci device
* @id: pointer to pci device id structure
*
* Standard PCI probe function for habanalabs device.
* Create a new habanalabs device and initialize it according to the
* device's type
*/
static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hl_device *hdev;
int rc;
dev_info(&pdev->dev, HL_NAME
" device found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
rc = create_hdev(&hdev, pdev);
if (rc)
return rc;
pci_set_drvdata(pdev, hdev);
rc = hl_device_init(hdev);
if (rc) {
dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
rc = -ENODEV;
goto disable_device;
}
return 0;
disable_device:
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
return rc;
}
/*
* hl_pci_remove - remove PCI habanalabs devices
*
* @pdev: pointer to pci device
*
* Standard PCI remove function for habanalabs device
*/
static void hl_pci_remove(struct pci_dev *pdev)
{
struct hl_device *hdev;
hdev = pci_get_drvdata(pdev);
if (!hdev)
return;
hl_device_fini(hdev);
pci_set_drvdata(pdev, NULL);
destroy_hdev(hdev);
}
/**
* hl_pci_err_detected - a PCI bus error detected on this device
*
* @pdev: pointer to pci device
* @state: PCI error type
*
* Called by the PCI subsystem whenever a non-correctable
* PCI bus error is detected
*/
static pci_ers_result_t
hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct hl_device *hdev = pci_get_drvdata(pdev);
enum pci_ers_result result;
switch (state) {
case pci_channel_io_normal:
dev_warn(hdev->dev, "PCI normal state error detected\n");
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
dev_warn(hdev->dev, "PCI frozen state error detected\n");
result = PCI_ERS_RESULT_NEED_RESET;
break;
case pci_channel_io_perm_failure:
dev_warn(hdev->dev, "PCI failure state error detected\n");
result = PCI_ERS_RESULT_DISCONNECT;
break;
default:
result = PCI_ERS_RESULT_NONE;
}
hdev->asic_funcs->halt_engines(hdev, true, false);
return result;
}
/**
* hl_pci_err_resume - resume after a PCI slot reset
*
* @pdev: pointer to pci device
*
*/
static void hl_pci_err_resume(struct pci_dev *pdev)
{
struct hl_device *hdev = pci_get_drvdata(pdev);
dev_warn(hdev->dev, "Resuming device after PCI slot reset\n");
hl_device_resume(hdev);
}
/**
* hl_pci_err_slot_reset - a PCI slot reset has just happened
*
* @pdev: pointer to pci device
*
* Determine if the driver can recover from the PCI slot reset
*/
static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev)
{
struct hl_device *hdev = pci_get_drvdata(pdev);
dev_warn(hdev->dev, "PCI slot reset detected\n");
return PCI_ERS_RESULT_RECOVERED;
}
static const struct dev_pm_ops hl_pm_ops = {
.suspend = hl_pmops_suspend,
.resume = hl_pmops_resume,
};
static const struct pci_error_handlers hl_pci_err_handler = {
.error_detected = hl_pci_err_detected,
.slot_reset = hl_pci_err_slot_reset,
.resume = hl_pci_err_resume,
};
static struct pci_driver hl_pci_driver = {
.name = HL_NAME,
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
.shutdown = hl_pci_remove,
.driver = {
.name = HL_NAME,
.pm = &hl_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.err_handler = &hl_pci_err_handler,
};
/*
* hl_init - Initialize the habanalabs kernel driver
*/
static int __init hl_init(void)
{
int rc;
dev_t dev;
pr_info("loading driver\n");
rc = alloc_chrdev_region(&dev, 0, HL_MAX_MINORS, HL_NAME);
if (rc < 0) {
pr_err("unable to get major\n");
return rc;
}
hl_major = MAJOR(dev);
hl_class = class_create(HL_NAME);
if (IS_ERR(hl_class)) {
pr_err("failed to allocate class\n");
rc = PTR_ERR(hl_class);
goto remove_major;
}
hl_debugfs_init();
rc = pci_register_driver(&hl_pci_driver);
if (rc) {
pr_err("failed to register pci device\n");
goto remove_debugfs;
}
pr_debug("driver loaded\n");
return 0;
remove_debugfs:
hl_debugfs_fini();
class_destroy(hl_class);
remove_major:
unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
return rc;
}
/*
* hl_exit - Release all resources of the habanalabs kernel driver
*/
static void __exit hl_exit(void)
{
pci_unregister_driver(&hl_pci_driver);
/*
* Removing debugfs must be after all devices or simulator devices
* have been removed because otherwise we get a bug in the
* debugfs module for referencing NULL objects
*/
hl_debugfs_fini();
class_destroy(hl_class);
unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
idr_destroy(&hl_devs_idr);
pr_debug("driver removed\n");
}
module_init(hl_init);
module_exit(hl_exit);
| linux-master | drivers/accel/habanalabs/common/habanalabs_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/iommu.h>
#define MMU_ADDR_BUF_SIZE 40
#define MMU_ASID_BUF_SIZE 10
#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
#define I2C_MAX_TRANSACTION_LEN 8
static struct dentry *hl_debug_root;
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
u8 i2c_reg, u8 i2c_len, u64 *val)
{
struct cpucp_packet pkt;
int rc;
if (!hl_device_operational(hdev, NULL))
return -EBUSY;
if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
i2c_len, I2C_MAX_TRANSACTION_LEN);
return -EINVAL;
}
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
pkt.i2c_len = i2c_len;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
return rc;
}
static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
u8 i2c_reg, u8 i2c_len, u64 val)
{
struct cpucp_packet pkt;
int rc;
if (!hl_device_operational(hdev, NULL))
return -EBUSY;
if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
i2c_len, I2C_MAX_TRANSACTION_LEN);
return -EINVAL;
}
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
pkt.i2c_len = i2c_len;
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
return rc;
}
static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
{
struct cpucp_packet pkt;
int rc;
if (!hl_device_operational(hdev, NULL))
return;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.led_index = cpu_to_le32(led);
pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
}
static int command_buffers_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_cb *cb;
bool first = true;
spin_lock(&dev_entry->cb_spinlock);
list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
if (first) {
first = false;
seq_puts(s, "\n");
seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
seq_puts(s, "---------------------------------------------------------------\n");
}
seq_printf(s,
" %03llu %d 0x%08x %d %d %d\n",
cb->buf->handle, cb->ctx->asid, cb->size,
kref_read(&cb->buf->refcount),
atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
}
spin_unlock(&dev_entry->cb_spinlock);
if (!first)
seq_puts(s, "\n");
return 0;
}
static int command_submission_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_cs *cs;
bool first = true;
spin_lock(&dev_entry->cs_spinlock);
list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
if (first) {
first = false;
seq_puts(s, "\n");
seq_puts(s, " CS ID CS TYPE CTX ASID CS RefCnt Submitted Completed\n");
seq_puts(s, "----------------------------------------------------------------\n");
}
seq_printf(s,
" %llu %d %d %d %d %d\n",
cs->sequence, cs->type, cs->ctx->asid,
kref_read(&cs->refcount),
cs->submitted, cs->completed);
}
spin_unlock(&dev_entry->cs_spinlock);
if (!first)
seq_puts(s, "\n");
return 0;
}
static int command_submission_jobs_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_cs_job *job;
bool first = true;
spin_lock(&dev_entry->cs_job_spinlock);
list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
if (first) {
first = false;
seq_puts(s, "\n");
seq_puts(s, " JOB ID CS ID CS TYPE CTX ASID JOB RefCnt H/W Queue\n");
seq_puts(s, "---------------------------------------------------------------\n");
}
if (job->cs)
seq_printf(s,
" %02d %llu %d %d %d %d\n",
job->id, job->cs->sequence, job->cs->type,
job->cs->ctx->asid, kref_read(&job->refcount),
job->hw_queue_id);
else
seq_printf(s,
" %02d 0 0 %d %d %d\n",
job->id, HL_KERNEL_ASID_ID,
kref_read(&job->refcount), job->hw_queue_id);
}
spin_unlock(&dev_entry->cs_job_spinlock);
if (!first)
seq_puts(s, "\n");
return 0;
}
static int userptr_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_userptr *userptr;
char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE"};
bool first = true;
spin_lock(&dev_entry->userptr_spinlock);
list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
if (first) {
first = false;
seq_puts(s, "\n");
seq_puts(s, " pid user virtual address size dma dir\n");
seq_puts(s, "----------------------------------------------------------\n");
}
seq_printf(s, " %-7d 0x%-14llx %-10llu %-30s\n",
userptr->pid, userptr->addr, userptr->size,
dma_dir[userptr->dir]);
}
spin_unlock(&dev_entry->userptr_spinlock);
if (!first)
seq_puts(s, "\n");
return 0;
}
static int vm_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_vm_hw_block_list_node *lnode;
struct hl_ctx *ctx;
struct hl_vm *vm;
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_va_range *va_range;
struct hl_vm_va_block *va_block;
enum vm_type *vm_type;
bool once = true;
u64 j;
int i;
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
once = false;
seq_puts(s, "\n\n----------------------------------------------------");
seq_puts(s, "\n----------------------------------------------------\n\n");
seq_printf(s, "ctx asid: %u\n", ctx->asid);
seq_puts(s, "\nmappings:\n\n");
seq_puts(s, " virtual address size handle\n");
seq_puts(s, "----------------------------------------------------\n");
mutex_lock(&ctx->mem_hash_lock);
hash_for_each(ctx->mem_hash, i, hnode, node) {
vm_type = hnode->ptr;
if (*vm_type == VM_TYPE_USERPTR) {
userptr = hnode->ptr;
seq_printf(s,
" 0x%-14llx %-10llu\n",
hnode->vaddr, userptr->size);
} else {
phys_pg_pack = hnode->ptr;
seq_printf(s,
" 0x%-14llx %-10llu %-4u\n",
hnode->vaddr, phys_pg_pack->total_size,
phys_pg_pack->handle);
}
}
mutex_unlock(&ctx->mem_hash_lock);
if (ctx->asid != HL_KERNEL_ASID_ID &&
!list_empty(&ctx->hw_block_mem_list)) {
seq_puts(s, "\nhw_block mappings:\n\n");
seq_puts(s,
" virtual address block size mapped size HW block id\n");
seq_puts(s,
"---------------------------------------------------------------\n");
mutex_lock(&ctx->hw_block_list_lock);
list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
seq_printf(s,
" 0x%-14lx %-6u %-6u %-9u\n",
lnode->vaddr, lnode->block_size, lnode->mapped_size,
lnode->id);
}
mutex_unlock(&ctx->hw_block_list_lock);
}
vm = &ctx->hdev->vm;
spin_lock(&vm->idr_lock);
if (!idr_is_empty(&vm->phys_pg_pack_handles))
seq_puts(s, "\n\nallocations:\n");
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
if (phys_pg_pack->asid != ctx->asid)
continue;
seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
seq_printf(s, "page size: %u\n\n",
phys_pg_pack->page_size);
seq_puts(s, " physical address\n");
seq_puts(s, "---------------------\n");
for (j = 0 ; j < phys_pg_pack->npages ; j++) {
seq_printf(s, " 0x%-14llx\n",
phys_pg_pack->pages[j]);
}
}
spin_unlock(&vm->idr_lock);
}
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
ctx = hl_get_compute_ctx(dev_entry->hdev);
if (ctx) {
seq_puts(s, "\nVA ranges:\n\n");
for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
va_range = ctx->va_range[i];
seq_printf(s, " va_range %d\n", i);
seq_puts(s, "---------------------\n");
mutex_lock(&va_range->lock);
list_for_each_entry(va_block, &va_range->list, node) {
seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
va_block->start, va_block->end,
va_block->size);
}
mutex_unlock(&va_range->lock);
seq_puts(s, "\n");
}
hl_ctx_put(ctx);
}
if (!once)
seq_puts(s, "\n");
return 0;
}
static int userptr_lookup_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct scatterlist *sg;
struct hl_userptr *userptr;
bool first = true;
u64 total_npages, npages, sg_start, sg_end;
dma_addr_t dma_addr;
int i;
spin_lock(&dev_entry->userptr_spinlock);
list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
if (dev_entry->userptr_lookup >= userptr->addr &&
dev_entry->userptr_lookup < userptr->addr + userptr->size) {
total_npages = 0;
for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
npages = hl_get_sg_info(sg, &dma_addr);
sg_start = userptr->addr +
total_npages * PAGE_SIZE;
sg_end = userptr->addr +
(total_npages + npages) * PAGE_SIZE;
if (dev_entry->userptr_lookup >= sg_start &&
dev_entry->userptr_lookup < sg_end) {
dma_addr += (dev_entry->userptr_lookup -
sg_start);
if (first) {
first = false;
seq_puts(s, "\n");
seq_puts(s, " user virtual address dma address pid region start region size\n");
seq_puts(s, "---------------------------------------------------------------------------------------\n");
}
seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n",
dev_entry->userptr_lookup,
(u64)dma_addr, userptr->pid,
userptr->addr, userptr->size);
}
total_npages += npages;
}
}
}
spin_unlock(&dev_entry->userptr_spinlock);
if (!first)
seq_puts(s, "\n");
return 0;
}
static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct seq_file *s = file->private_data;
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
ssize_t rc;
u64 value;
rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
dev_entry->userptr_lookup = value;
return count;
}
static int mmu_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
struct hl_ctx *ctx;
struct hl_mmu_hop_info hops_info = {0};
u64 virt_addr = dev_entry->mmu_addr, phys_addr;
int i;
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
ctx = hdev->kernel_ctx;
else
ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return 0;
}
if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
virt_addr);
goto put_ctx;
}
hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
if (hops_info.scrambled_vaddr &&
(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
seq_printf(s,
"asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
dev_entry->mmu_asid, dev_entry->mmu_addr,
hops_info.scrambled_vaddr,
hops_info.unscrambled_paddr, phys_addr);
else
seq_printf(s,
"asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
for (i = 0 ; i < hops_info.used_hops ; i++) {
seq_printf(s, "hop%d_addr: 0x%llx\n",
i, hops_info.hop_info[i].hop_addr);
seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
i, hops_info.hop_info[i].hop_pte_addr);
seq_printf(s, "hop%d_pte: 0x%llx\n",
i, hops_info.hop_info[i].hop_pte_val);
}
put_ctx:
if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
hl_ctx_put(ctx);
return 0;
}
static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct seq_file *s = file->private_data;
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
char kbuf[MMU_KBUF_SIZE];
char *c;
ssize_t rc;
if (count > sizeof(kbuf) - 1)
goto err;
if (copy_from_user(kbuf, buf, count))
goto err;
kbuf[count] = 0;
c = strchr(kbuf, ' ');
if (!c)
goto err;
*c = '\0';
rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
if (rc)
goto err;
if (strncmp(c+1, "0x", 2))
goto err;
rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
if (rc)
goto err;
return count;
err:
dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
return -EINVAL;
}
static int mmu_ack_error(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
int rc;
if (!dev_entry->mmu_cap_mask) {
dev_err(hdev->dev, "mmu_cap_mask is not set\n");
goto err;
}
rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
if (rc)
goto err;
return 0;
err:
return -EINVAL;
}
static ssize_t mmu_ack_error_value_write(struct file *file,
const char __user *buf,
size_t count, loff_t *f_pos)
{
struct seq_file *s = file->private_data;
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
char kbuf[MMU_KBUF_SIZE];
ssize_t rc;
if (count > sizeof(kbuf) - 1)
goto err;
if (copy_from_user(kbuf, buf, count))
goto err;
kbuf[count] = 0;
if (strncmp(kbuf, "0x", 2))
goto err;
rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
if (rc)
goto err;
return count;
err:
dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
return -EINVAL;
}
static int engines_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
struct engines_data eng_data;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't check device idle during reset\n");
return 0;
}
eng_data.actual_size = 0;
eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
eng_data.buf = vmalloc(eng_data.allocated_buf_size);
if (!eng_data.buf)
return -ENOMEM;
hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
if (eng_data.actual_size > eng_data.allocated_buf_size) {
dev_err(hdev->dev,
"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
eng_data.actual_size, eng_data.allocated_buf_size);
vfree(eng_data.buf);
return -ENOMEM;
}
seq_write(s, eng_data.buf, eng_data.actual_size);
vfree(eng_data.buf);
return 0;
}
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 val = hdev->memory_scrub_val;
int rc;
if (!hl_device_operational(hdev, NULL)) {
dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
return -EIO;
}
mutex_lock(&hdev->fpriv_list_lock);
if (hdev->is_compute_ctx_active) {
mutex_unlock(&hdev->fpriv_list_lock);
dev_err(hdev->dev, "can't scrub dram, context exist\n");
return -EBUSY;
}
hdev->is_in_dram_scrub = true;
mutex_unlock(&hdev->fpriv_list_lock);
rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
mutex_lock(&hdev->fpriv_list_lock);
hdev->is_in_dram_scrub = false;
mutex_unlock(&hdev->fpriv_list_lock);
if (rc)
return rc;
return count;
}
static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (prop->dram_supports_virtual_memory &&
(addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
return true;
if (addr >= prop->pmmu.start_addr &&
addr < prop->pmmu.end_addr)
return true;
if (addr >= prop->pmmu_huge.start_addr &&
addr < prop->pmmu_huge.end_addr)
return true;
return false;
}
static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
u32 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 dram_start_addr, dram_end_addr;
if (prop->dram_supports_virtual_memory) {
dram_start_addr = prop->dmmu.start_addr;
dram_end_addr = prop->dmmu.end_addr;
} else {
dram_start_addr = prop->dram_base_address;
dram_end_addr = prop->dram_end_address;
}
if (hl_mem_area_inside_range(addr, size, dram_start_addr,
dram_end_addr))
return true;
if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
prop->sram_end_address))
return true;
return false;
}
static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
u64 *phys_addr)
{
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_ctx *ctx;
struct hl_vm_hash_node *hnode;
u64 end_address, range_size;
struct hl_userptr *userptr;
enum vm_type *vm_type;
bool valid = false;
int i, rc = 0;
ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
}
/* Verify address is mapped */
mutex_lock(&ctx->mem_hash_lock);
hash_for_each(ctx->mem_hash, i, hnode, node) {
vm_type = hnode->ptr;
if (*vm_type == VM_TYPE_USERPTR) {
userptr = hnode->ptr;
range_size = userptr->size;
} else {
phys_pg_pack = hnode->ptr;
range_size = phys_pg_pack->total_size;
}
end_address = virt_addr + size;
if ((virt_addr >= hnode->vaddr) &&
(end_address <= hnode->vaddr + range_size)) {
valid = true;
break;
}
}
mutex_unlock(&ctx->mem_hash_lock);
if (!valid) {
dev_err(hdev->dev,
"virt addr 0x%llx is not mapped\n",
virt_addr);
rc = -EINVAL;
goto put_ctx;
}
rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
if (rc) {
dev_err(hdev->dev,
"virt addr 0x%llx is not mapped to phys addr\n",
virt_addr);
rc = -EINVAL;
}
put_ctx:
hl_ctx_put(ctx);
return rc;
}
static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
u64 *val, enum debugfs_access_type acc_type, bool *found)
{
size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
sizeof(u64) : sizeof(u32);
struct pci_mem_region *mem_reg;
int i;
for (i = 0; i < PCI_REGION_NUMBER; i++) {
mem_reg = &hdev->pci_mem_region[i];
if (!mem_reg->used)
continue;
if (addr >= mem_reg->region_base &&
addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
*found = true;
return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
}
}
return 0;
}
static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 offset = prop->device_dma_offset_for_host_access;
switch (acc_type) {
case DEBUGFS_READ32:
*val = *(u32 *) phys_to_virt(addr - offset);
break;
case DEBUGFS_WRITE32:
*(u32 *) phys_to_virt(addr - offset) = *val;
break;
case DEBUGFS_READ64:
*val = *(u64 *) phys_to_virt(addr - offset);
break;
case DEBUGFS_WRITE64:
*(u64 *) phys_to_virt(addr - offset) = *val;
break;
default:
dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
break;
}
}
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
sizeof(u64) : sizeof(u32);
u64 host_start = hdev->asic_prop.host_base_address;
u64 host_end = hdev->asic_prop.host_end_address;
bool user_address, found = false;
int rc;
user_address = hl_is_device_va(hdev, addr);
if (user_address) {
rc = device_va_to_pa(hdev, addr, acc_size, &addr);
if (rc)
return rc;
}
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
"Failed reading addr %#llx from dev mem (%d)\n",
addr, rc);
return rc;
}
if (found)
return 0;
if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
rc = -EINVAL;
goto err;
}
if (addr >= host_start && addr <= host_end - acc_size) {
hl_access_host_mem(hdev, addr, val, acc_type);
} else {
rc = -EINVAL;
goto err;
}
return 0;
err:
dev_err(hdev->dev, "invalid addr %#llx\n", addr);
return rc;
}
static ssize_t hl_data_read32(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 value64, addr = entry->addr;
char tmp_buf[32];
ssize_t rc;
u32 val;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
return 0;
}
if (*ppos)
return 0;
rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
if (rc)
return rc;
val = value64; /* downcast back to 32 */
sprintf(tmp_buf, "0x%08x\n", val);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
}
static ssize_t hl_data_write32(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 value64, addr = entry->addr;
u32 value;
ssize_t rc;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
return 0;
}
rc = kstrtouint_from_user(buf, count, 16, &value);
if (rc)
return rc;
value64 = value;
rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
if (rc)
return rc;
return count;
}
static ssize_t hl_data_read64(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
char tmp_buf[32];
ssize_t rc;
u64 val;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
return 0;
}
if (*ppos)
return 0;
rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
if (rc)
return rc;
sprintf(tmp_buf, "0x%016llx\n", val);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
}
static ssize_t hl_data_write64(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
u64 value;
ssize_t rc;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
return 0;
}
rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
if (rc)
return rc;
return count;
}
static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 addr = entry->addr;
ssize_t rc;
u32 size;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
return 0;
}
rc = kstrtouint_from_user(buf, count, 16, &size);
if (rc)
return rc;
if (!size) {
dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
return -EINVAL;
}
if (size > SZ_128M) {
dev_err(hdev->dev,
"DMA read failed. size can't be larger than 128MB\n");
return -EINVAL;
}
if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
dev_err(hdev->dev,
"DMA read failed. Invalid 0x%010llx + 0x%08x\n",
addr, size);
return -EINVAL;
}
/* Free the previous allocation, if there was any */
entry->data_dma_blob_desc.size = 0;
vfree(entry->data_dma_blob_desc.data);
entry->data_dma_blob_desc.data = vmalloc(size);
if (!entry->data_dma_blob_desc.data)
return -ENOMEM;
rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
entry->data_dma_blob_desc.data);
if (rc) {
dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
vfree(entry->data_dma_blob_desc.data);
entry->data_dma_blob_desc.data = NULL;
return -EIO;
}
entry->data_dma_blob_desc.size = size;
return count;
}
static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 size, trig;
ssize_t rc;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
return 0;
}
rc = kstrtouint_from_user(buf, count, 10, &trig);
if (rc)
return rc;
if (trig != 1) {
dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
return -EINVAL;
}
size = sizeof(struct cpucp_monitor_dump);
/* Free the previous allocation, if there was any */
entry->mon_dump_blob_desc.size = 0;
vfree(entry->mon_dump_blob_desc.data);
entry->mon_dump_blob_desc.data = vmalloc(size);
if (!entry->mon_dump_blob_desc.data)
return -ENOMEM;
rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
if (rc) {
dev_err(hdev->dev, "Failed to dump monitors\n");
vfree(entry->mon_dump_blob_desc.data);
entry->mon_dump_blob_desc.data = NULL;
return -EIO;
}
entry->mon_dump_blob_desc.size = size;
return count;
}
static ssize_t hl_get_power_state(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[200];
int i;
if (*ppos)
return 0;
if (hdev->pdev->current_state == PCI_D0)
i = 1;
else if (hdev->pdev->current_state == PCI_D3hot)
i = 2;
else
i = 3;
sprintf(tmp_buf,
"current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
return simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
}
static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
ssize_t rc;
rc = kstrtouint_from_user(buf, count, 10, &value);
if (rc)
return rc;
if (value == 1) {
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
rc = pci_enable_device(hdev->pdev);
if (rc < 0)
return rc;
} else if (value == 2) {
pci_save_state(hdev->pdev);
pci_disable_device(hdev->pdev);
pci_set_power_state(hdev->pdev, PCI_D3hot);
} else {
dev_dbg(hdev->dev, "invalid power state value %u\n", value);
return -EINVAL;
}
return count;
}
static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[32];
u64 val;
ssize_t rc;
if (*ppos)
return 0;
rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
entry->i2c_reg, entry->i2c_len, &val);
if (rc) {
dev_err(hdev->dev,
"Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
return rc;
}
sprintf(tmp_buf, "%#02llx\n", val);
rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
return rc;
}
static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u64 value;
ssize_t rc;
rc = kstrtou64_from_user(buf, count, 16, &value);
if (rc)
return rc;
rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
entry->i2c_reg, entry->i2c_len, value);
if (rc) {
dev_err(hdev->dev,
"Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
return rc;
}
return count;
}
static ssize_t hl_led0_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
ssize_t rc;
rc = kstrtouint_from_user(buf, count, 10, &value);
if (rc)
return rc;
value = value ? 1 : 0;
hl_debugfs_led_set(hdev, 0, value);
return count;
}
static ssize_t hl_led1_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
ssize_t rc;
rc = kstrtouint_from_user(buf, count, 10, &value);
if (rc)
return rc;
value = value ? 1 : 0;
hl_debugfs_led_set(hdev, 1, value);
return count;
}
static ssize_t hl_led2_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
ssize_t rc;
rc = kstrtouint_from_user(buf, count, 10, &value);
if (rc)
return rc;
value = value ? 1 : 0;
hl_debugfs_led_set(hdev, 2, value);
return count;
}
static ssize_t hl_device_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
static const char *help =
"Valid values: disable, enable, suspend, resume, cpu_timeout\n";
return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
}
static ssize_t hl_device_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char data[30] = {0};
/* don't allow partial writes */
if (*ppos != 0)
return 0;
simple_write_to_buffer(data, 29, ppos, buf, count);
if (strncmp("disable", data, strlen("disable")) == 0) {
hdev->disabled = true;
} else if (strncmp("enable", data, strlen("enable")) == 0) {
hdev->disabled = false;
} else if (strncmp("suspend", data, strlen("suspend")) == 0) {
hdev->asic_funcs->suspend(hdev);
} else if (strncmp("resume", data, strlen("resume")) == 0) {
hdev->asic_funcs->resume(hdev);
} else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
hdev->device_cpu_disabled = true;
} else {
dev_err(hdev->dev,
"Valid values: disable, enable, suspend, resume, cpu_timeout\n");
count = -EINVAL;
}
return count;
}
static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
return 0;
}
static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
return count;
}
static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[200];
ssize_t rc;
if (!hdev->asic_prop.configurable_stop_on_err)
return -EOPNOTSUPP;
if (*ppos)
return 0;
sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
strlen(tmp_buf) + 1);
return rc;
}
static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
ssize_t rc;
if (!hdev->asic_prop.configurable_stop_on_err)
return -EOPNOTSUPP;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't change stop on error during reset\n");
return 0;
}
rc = kstrtouint_from_user(buf, count, 10, &value);
if (rc)
return rc;
hdev->stop_on_err = value ? 1 : 0;
hl_device_reset(hdev, 0);
return count;
}
static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
hdev->asic_funcs->ack_protection_bits_errors(hdev);
return 0;
}
static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
ssize_t rc;
down_read(&entry->state_dump_sem);
if (!entry->state_dump[entry->state_dump_head])
rc = 0;
else
rc = simple_read_from_buffer(
buf, count, ppos,
entry->state_dump[entry->state_dump_head],
strlen(entry->state_dump[entry->state_dump_head]));
up_read(&entry->state_dump_sem);
return rc;
}
static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
ssize_t rc;
u32 size;
int i;
rc = kstrtouint_from_user(buf, count, 10, &size);
if (rc)
return rc;
if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
dev_err(hdev->dev, "Invalid number of dumps to skip\n");
return -EINVAL;
}
if (entry->state_dump[entry->state_dump_head]) {
down_write(&entry->state_dump_sem);
for (i = 0; i < size; ++i) {
vfree(entry->state_dump[entry->state_dump_head]);
entry->state_dump[entry->state_dump_head] = NULL;
if (entry->state_dump_head > 0)
entry->state_dump_head--;
else
entry->state_dump_head =
ARRAY_SIZE(entry->state_dump) - 1;
}
up_write(&entry->state_dump_sem);
}
return count;
}
static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[200];
ssize_t rc;
if (*ppos)
return 0;
sprintf(tmp_buf, "%d\n",
jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
strlen(tmp_buf) + 1);
return rc;
}
static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
ssize_t rc;
rc = kstrtouint_from_user(buf, count, 10, &value);
if (rc)
return rc;
if (value)
hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
return count;
}
static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
hdev->asic_funcs->check_if_razwi_happened(hdev);
return 0;
}
static const struct file_operations hl_mem_scrub_fops = {
.owner = THIS_MODULE,
.write = hl_memory_scrub,
};
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
.write = hl_data_write32
};
static const struct file_operations hl_data64b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read64,
.write = hl_data_write64
};
static const struct file_operations hl_dma_size_fops = {
.owner = THIS_MODULE,
.write = hl_dma_size_write
};
static const struct file_operations hl_monitor_dump_fops = {
.owner = THIS_MODULE,
.write = hl_monitor_dump_trigger
};
static const struct file_operations hl_i2c_data_fops = {
.owner = THIS_MODULE,
.read = hl_i2c_data_read,
.write = hl_i2c_data_write
};
static const struct file_operations hl_power_fops = {
.owner = THIS_MODULE,
.read = hl_get_power_state,
.write = hl_set_power_state
};
static const struct file_operations hl_led0_fops = {
.owner = THIS_MODULE,
.write = hl_led0_write
};
static const struct file_operations hl_led1_fops = {
.owner = THIS_MODULE,
.write = hl_led1_write
};
static const struct file_operations hl_led2_fops = {
.owner = THIS_MODULE,
.write = hl_led2_write
};
static const struct file_operations hl_device_fops = {
.owner = THIS_MODULE,
.read = hl_device_read,
.write = hl_device_write
};
static const struct file_operations hl_clk_gate_fops = {
.owner = THIS_MODULE,
.read = hl_clk_gate_read,
.write = hl_clk_gate_write
};
static const struct file_operations hl_stop_on_err_fops = {
.owner = THIS_MODULE,
.read = hl_stop_on_err_read,
.write = hl_stop_on_err_write
};
static const struct file_operations hl_security_violations_fops = {
.owner = THIS_MODULE,
.read = hl_security_violations_read
};
static const struct file_operations hl_state_dump_fops = {
.owner = THIS_MODULE,
.read = hl_state_dump_read,
.write = hl_state_dump_write
};
static const struct file_operations hl_timeout_locked_fops = {
.owner = THIS_MODULE,
.read = hl_timeout_locked_read,
.write = hl_timeout_locked_write
};
static const struct file_operations hl_razwi_check_fops = {
.owner = THIS_MODULE,
.read = hl_check_razwi_happened
};
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
{"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
{
struct hl_debugfs_entry *node = inode->i_private;
return single_open(file, node->info_ent->show, node);
}
static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct hl_debugfs_entry *node = file->f_inode->i_private;
if (node->info_ent->write)
return node->info_ent->write(file, buf, count, f_pos);
else
return -EINVAL;
}
static const struct file_operations hl_debugfs_fops = {
.owner = THIS_MODULE,
.open = hl_debugfs_open,
.read = seq_read,
.write = hl_debugfs_write,
.llseek = seq_lseek,
.release = single_release,
};
static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
{
debugfs_create_u8("i2c_bus",
0644,
root,
&dev_entry->i2c_bus);
debugfs_create_u8("i2c_addr",
0644,
root,
&dev_entry->i2c_addr);
debugfs_create_u8("i2c_reg",
0644,
root,
&dev_entry->i2c_reg);
debugfs_create_u8("i2c_len",
0644,
root,
&dev_entry->i2c_len);
debugfs_create_file("i2c_data",
0644,
root,
dev_entry,
&hl_i2c_data_fops);
debugfs_create_file("led0",
0200,
root,
dev_entry,
&hl_led0_fops);
debugfs_create_file("led1",
0200,
root,
dev_entry,
&hl_led1_fops);
debugfs_create_file("led2",
0200,
root,
dev_entry,
&hl_led2_fops);
}
static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
struct dentry *root)
{
int count = ARRAY_SIZE(hl_debugfs_list);
struct hl_debugfs_entry *entry;
int i;
debugfs_create_x64("memory_scrub_val",
0644,
root,
&hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
root,
dev_entry,
&hl_mem_scrub_fops);
debugfs_create_x64("addr",
0644,
root,
&dev_entry->addr);
debugfs_create_file("data32",
0644,
root,
dev_entry,
&hl_data32b_fops);
debugfs_create_file("data64",
0644,
root,
dev_entry,
&hl_data64b_fops);
debugfs_create_file("set_power_state",
0200,
root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
0200,
root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
0200,
root,
dev_entry,
&hl_clk_gate_fops);
debugfs_create_file("stop_on_err",
0644,
root,
dev_entry,
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
0644,
root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
0644,
root,
dev_entry,
&hl_razwi_check_fops);
debugfs_create_file("dma_size",
0200,
root,
dev_entry,
&hl_dma_size_fops);
debugfs_create_blob("data_dma",
0400,
root,
&dev_entry->data_dma_blob_desc);
debugfs_create_file("monitor_dump_trig",
0200,
root,
dev_entry,
&hl_monitor_dump_fops);
debugfs_create_blob("monitor_dump",
0400,
root,
&dev_entry->mon_dump_blob_desc);
debugfs_create_x8("skip_reset_on_timeout",
0644,
root,
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
root,
dev_entry,
&hl_state_dump_fops);
debugfs_create_file("timeout_locked",
0644,
root,
dev_entry,
&hl_timeout_locked_fops);
debugfs_create_u32("device_release_watchdog_timeout",
0644,
root,
&hdev->device_release_watchdog_timeout_sec);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
root,
entry,
&hl_debugfs_fops);
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
}
int hl_debugfs_device_init(struct hl_device *hdev)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
int count = ARRAY_SIZE(hl_debugfs_list);
dev_entry->hdev = hdev;
dev_entry->entry_arr = kmalloc_array(count, sizeof(struct hl_debugfs_entry), GFP_KERNEL);
if (!dev_entry->entry_arr)
return -ENOMEM;
dev_entry->data_dma_blob_desc.size = 0;
dev_entry->data_dma_blob_desc.data = NULL;
dev_entry->mon_dump_blob_desc.size = 0;
dev_entry->mon_dump_blob_desc.data = NULL;
INIT_LIST_HEAD(&dev_entry->file_list);
INIT_LIST_HEAD(&dev_entry->cb_list);
INIT_LIST_HEAD(&dev_entry->cs_list);
INIT_LIST_HEAD(&dev_entry->cs_job_list);
INIT_LIST_HEAD(&dev_entry->userptr_list);
INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
mutex_init(&dev_entry->file_mutex);
init_rwsem(&dev_entry->state_dump_sem);
spin_lock_init(&dev_entry->cb_spinlock);
spin_lock_init(&dev_entry->cs_spinlock);
spin_lock_init(&dev_entry->cs_job_spinlock);
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
return 0;
}
void hl_debugfs_device_fini(struct hl_device *hdev)
{
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
int i;
mutex_destroy(&entry->ctx_mem_hash_mutex);
mutex_destroy(&entry->file_mutex);
vfree(entry->data_dma_blob_desc.data);
vfree(entry->mon_dump_blob_desc.data);
for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
}
void hl_debugfs_add_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), hl_debug_root);
add_files_to_device(hdev, dev_entry, dev_entry->root);
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
}
void hl_debugfs_remove_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
debugfs_remove_recursive(entry->root);
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
{
struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
mutex_lock(&dev_entry->file_mutex);
list_add(&hpriv->debugfs_list, &dev_entry->file_list);
mutex_unlock(&dev_entry->file_mutex);
}
void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
{
struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
mutex_lock(&dev_entry->file_mutex);
list_del(&hpriv->debugfs_list);
mutex_unlock(&dev_entry->file_mutex);
}
void hl_debugfs_add_cb(struct hl_cb *cb)
{
struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
spin_lock(&dev_entry->cb_spinlock);
list_add(&cb->debugfs_list, &dev_entry->cb_list);
spin_unlock(&dev_entry->cb_spinlock);
}
void hl_debugfs_remove_cb(struct hl_cb *cb)
{
struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
spin_lock(&dev_entry->cb_spinlock);
list_del(&cb->debugfs_list);
spin_unlock(&dev_entry->cb_spinlock);
}
void hl_debugfs_add_cs(struct hl_cs *cs)
{
struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
spin_lock(&dev_entry->cs_spinlock);
list_add(&cs->debugfs_list, &dev_entry->cs_list);
spin_unlock(&dev_entry->cs_spinlock);
}
void hl_debugfs_remove_cs(struct hl_cs *cs)
{
struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
spin_lock(&dev_entry->cs_spinlock);
list_del(&cs->debugfs_list);
spin_unlock(&dev_entry->cs_spinlock);
}
void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->cs_job_spinlock);
list_add(&job->debugfs_list, &dev_entry->cs_job_list);
spin_unlock(&dev_entry->cs_job_spinlock);
}
void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->cs_job_spinlock);
list_del(&job->debugfs_list);
spin_unlock(&dev_entry->cs_job_spinlock);
}
void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->userptr_spinlock);
list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
spin_unlock(&dev_entry->userptr_spinlock);
}
void hl_debugfs_remove_userptr(struct hl_device *hdev,
struct hl_userptr *userptr)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
spin_lock(&dev_entry->userptr_spinlock);
list_del(&userptr->debugfs_list);
spin_unlock(&dev_entry->userptr_spinlock);
}
void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
mutex_lock(&dev_entry->ctx_mem_hash_mutex);
list_del(&ctx->debugfs_list);
mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
}
/**
* hl_debugfs_set_state_dump - register state dump making it accessible via
* debugfs
* @hdev: pointer to the device structure
* @data: the actual dump data
* @length: the length of the data
*/
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
down_write(&dev_entry->state_dump_sem);
dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
ARRAY_SIZE(dev_entry->state_dump);
vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
dev_entry->state_dump[dev_entry->state_dump_head] = data;
up_write(&dev_entry->state_dump_sem);
}
void __init hl_debugfs_init(void)
{
hl_debug_root = debugfs_create_dir("habanalabs", NULL);
}
void hl_debugfs_fini(void)
{
debugfs_remove_recursive(hl_debug_root);
}
| linux-master | drivers/accel/habanalabs/common/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
/**
* hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
* the buffer descriptor.
*
* @mmg: parent unified memory manager
* @handle: requested buffer handle
*
* Find the buffer in the store and return a pointer to its descriptor.
* Increase buffer refcount. If not found - return NULL.
*/
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
{
struct hl_mmap_mem_buf *buf;
spin_lock(&mmg->lock);
buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);
return NULL;
}
kref_get(&buf->refcount);
spin_unlock(&mmg->lock);
return buf;
}
/**
* hl_mmap_mem_buf_destroy - destroy the unused buffer
*
* @buf: memory manager buffer descriptor
*
* Internal function, used as a final step of buffer release. Shall be invoked
* only when the buffer is no longer in use (removed from idr). Will call the
* release callback (if applicable), and free the memory.
*/
static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
{
if (buf->behavior->release)
buf->behavior->release(buf);
kfree(buf);
}
/**
* hl_mmap_mem_buf_release - release buffer
*
* @kref: kref that reached 0.
*
* Internal function, used as a kref release callback, when the last user of
* the buffer is released. Shall be called from an interrupt context.
*/
static void hl_mmap_mem_buf_release(struct kref *kref)
{
struct hl_mmap_mem_buf *buf =
container_of(kref, struct hl_mmap_mem_buf, refcount);
spin_lock(&buf->mmg->lock);
idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
spin_unlock(&buf->mmg->lock);
hl_mmap_mem_buf_destroy(buf);
}
/**
* hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
*
* @kref: kref that reached 0.
*
* Internal function, used for kref put by handle. Assumes mmg lock is taken.
* Will remove the buffer from idr, without destroying it.
*/
static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
{
struct hl_mmap_mem_buf *buf =
container_of(kref, struct hl_mmap_mem_buf, refcount);
idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
}
/**
* hl_mmap_mem_buf_put - decrease the reference to the buffer
*
* @buf: memory manager buffer descriptor
*
* Decrease the reference to the buffer, and release it if it was the last one.
* Shall be called from an interrupt context.
*/
int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
{
return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
}
/**
* hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
* given handle.
*
* @mmg: parent unified memory manager
* @handle: requested buffer handle
*
* Decrease the reference to the buffer, and release it if it was the last one.
* Shall not be called from an interrupt context. Return -EINVAL if handle was
* not found, else return the put outcome (0 or 1).
*/
int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
{
struct hl_mmap_mem_buf *buf;
spin_lock(&mmg->lock);
buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
dev_dbg(mmg->dev,
"Buff put failed, no match to handle %#llx\n", handle);
return -EINVAL;
}
if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
spin_unlock(&mmg->lock);
hl_mmap_mem_buf_destroy(buf);
return 1;
}
spin_unlock(&mmg->lock);
return 0;
}
/**
* hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
* @mmg: parent unified memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
* @gfp: gfp flags to use for the memory allocations
* @args: additional args passed to behavior->alloc
*
* Allocate and register a new memory buffer inside the give memory manager.
* Return the pointer to the new buffer on success or NULL on failure.
*/
struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args)
{
struct hl_mmap_mem_buf *buf;
int rc;
buf = kzalloc(sizeof(*buf), gfp);
if (!buf)
return NULL;
spin_lock(&mmg->lock);
rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
spin_unlock(&mmg->lock);
if (rc < 0) {
dev_err(mmg->dev,
"%s: Failed to allocate IDR for a new buffer, rc=%d\n",
behavior->topic, rc);
goto free_buf;
}
buf->mmg = mmg;
buf->behavior = behavior;
buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
kref_init(&buf->refcount);
rc = buf->behavior->alloc(buf, gfp, args);
if (rc) {
dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
behavior->topic, rc);
goto remove_idr;
}
return buf;
remove_idr:
spin_lock(&mmg->lock);
idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
spin_unlock(&mmg->lock);
free_buf:
kfree(buf);
return NULL;
}
/**
* hl_mmap_mem_buf_vm_close - handle mmap close
*
* @vma: the vma object for which mmap was closed.
*
* Put the memory buffer if it is no longer mapped.
*/
static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
{
struct hl_mmap_mem_buf *buf =
(struct hl_mmap_mem_buf *)vma->vm_private_data;
long new_mmap_size;
new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
if (new_mmap_size > 0) {
buf->real_mapped_size = new_mmap_size;
return;
}
atomic_set(&buf->mmap, 0);
hl_mmap_mem_buf_put(buf);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
.close = hl_mmap_mem_buf_vm_close
};
/**
* hl_mem_mgr_mmap - map the given buffer to the user
*
* @mmg: unified memory manager
* @vma: the vma object for which mmap was closed.
* @args: additional args passed to behavior->mmap
*
* Map the buffer specified by the vma->vm_pgoff to the given vma.
*/
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args)
{
struct hl_mmap_mem_buf *buf;
u64 user_mem_size;
u64 handle;
int rc;
/* We use the page offset to hold the idr and thus we need to clear
* it before doing the mmap itself
*/
handle = vma->vm_pgoff << PAGE_SHIFT;
vma->vm_pgoff = 0;
/* Reference was taken here */
buf = hl_mmap_mem_buf_get(mmg, handle);
if (!buf) {
dev_err(mmg->dev,
"Memory mmap failed, no match to handle %#llx\n", handle);
return -EINVAL;
}
/* Validation check */
user_mem_size = vma->vm_end - vma->vm_start;
if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
dev_err(mmg->dev,
"%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
buf->behavior->topic, user_mem_size, buf->mappable_size);
rc = -EINVAL;
goto put_mem;
}
#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
rc = -EINVAL;
goto put_mem;
}
if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
dev_err(mmg->dev,
"%s, Memory mmap failed, already mapped to user\n",
buf->behavior->topic);
rc = -EINVAL;
goto put_mem;
}
vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
/* Note: We're transferring the memory reference to vma->vm_private_data here. */
vma->vm_private_data = buf;
rc = buf->behavior->mmap(buf, vma, args);
if (rc) {
atomic_set(&buf->mmap, 0);
goto put_mem;
}
buf->real_mapped_size = buf->mappable_size;
vma->vm_pgoff = handle >> PAGE_SHIFT;
return 0;
put_mem:
hl_mmap_mem_buf_put(buf);
return rc;
}
/**
* hl_mem_mgr_init - initialize unified memory manager
*
* @dev: owner device pointer
* @mmg: structure to initialize
*
* Initialize an instance of unified memory manager
*/
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
{
mmg->dev = dev;
spin_lock_init(&mmg->lock);
idr_init(&mmg->handles);
}
/**
* hl_mem_mgr_fini - release unified memory manager
*
* @mmg: parent unified memory manager
*
* Release the unified memory manager. Shall be called from an interrupt context.
*/
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
{
struct hl_mmap_mem_buf *buf;
struct idr *idp;
const char *topic;
u32 id;
idp = &mmg->handles;
idr_for_each_entry(idp, buf, id) {
topic = buf->behavior->topic;
if (hl_mmap_mem_buf_put(buf) != 1)
dev_err(mmg->dev,
"%s: Buff handle %u for CTX is still alive\n",
topic, id);
}
}
/**
* hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
* @mmg: parent unified memory manager
*
* Destroy the memory manager IDR.
* Shall be called when IDR is empty and no memory buffers are in use.
*/
void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
{
if (!idr_is_empty(&mmg->handles))
dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
idr_destroy(&mmg->handles);
}
| linux-master | drivers/accel/habanalabs/common/memory_mgr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
#define HWMON_NR_SENSOR_TYPES (hwmon_max)
#ifdef _HAS_HWMON_HWMON_T_ENABLE
static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type,
u32 cpucp_flags)
{
u32 flags;
switch (type) {
case hwmon_temp:
flags = (cpucp_flags << 1) | HWMON_T_ENABLE;
break;
case hwmon_in:
flags = (cpucp_flags << 1) | HWMON_I_ENABLE;
break;
case hwmon_curr:
flags = (cpucp_flags << 1) | HWMON_C_ENABLE;
break;
case hwmon_fan:
flags = (cpucp_flags << 1) | HWMON_F_ENABLE;
break;
case hwmon_power:
flags = (cpucp_flags << 1) | HWMON_P_ENABLE;
break;
case hwmon_pwm:
/* enable bit was here from day 1, so no need to adjust */
flags = cpucp_flags;
break;
default:
dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
flags = cpucp_flags;
break;
}
return flags;
}
static u32 fixup_attr_legacy_fw(u32 attr)
{
return (attr - 1);
}
#else
static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type,
u32 cpucp_flags)
{
return cpucp_flags;
}
static u32 fixup_attr_legacy_fw(u32 attr)
{
return attr;
}
#endif /* !_HAS_HWMON_HWMON_T_ENABLE */
static u32 adjust_hwmon_flags(struct hl_device *hdev, enum hwmon_sensor_types type, u32 cpucp_flags)
{
u32 flags, cpucp_input_val;
bool use_cpucp_enum;
use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
/* If f/w is using it's own enum, we need to check if the properties values are aligned.
* If not, it means we need to adjust the values to the new format that is used in the
* kernel since 5.6 (enum values were incremented by 1 by adding a new enable value).
*/
if (use_cpucp_enum) {
switch (type) {
case hwmon_temp:
cpucp_input_val = cpucp_temp_input;
if (cpucp_input_val == hwmon_temp_input)
flags = cpucp_flags;
else
flags = (cpucp_flags << 1) | HWMON_T_ENABLE;
break;
case hwmon_in:
cpucp_input_val = cpucp_in_input;
if (cpucp_input_val == hwmon_in_input)
flags = cpucp_flags;
else
flags = (cpucp_flags << 1) | HWMON_I_ENABLE;
break;
case hwmon_curr:
cpucp_input_val = cpucp_curr_input;
if (cpucp_input_val == hwmon_curr_input)
flags = cpucp_flags;
else
flags = (cpucp_flags << 1) | HWMON_C_ENABLE;
break;
case hwmon_fan:
cpucp_input_val = cpucp_fan_input;
if (cpucp_input_val == hwmon_fan_input)
flags = cpucp_flags;
else
flags = (cpucp_flags << 1) | HWMON_F_ENABLE;
break;
case hwmon_pwm:
/* enable bit was here from day 1, so no need to adjust */
flags = cpucp_flags;
break;
case hwmon_power:
cpucp_input_val = CPUCP_POWER_INPUT;
if (cpucp_input_val == hwmon_power_input)
flags = cpucp_flags;
else
flags = (cpucp_flags << 1) | HWMON_P_ENABLE;
break;
default:
dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
flags = cpucp_flags;
break;
}
} else {
flags = fixup_flags_legacy_fw(hdev, type, cpucp_flags);
}
return flags;
}
int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sensors_arr)
{
u32 num_sensors_for_type, flags, num_active_sensor_types = 0, arr_size = 0, *curr_arr;
u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0};
u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
struct hwmon_channel_info **channels_info;
u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
enum hwmon_sensor_types type;
int rc, i, j;
for (i = 0 ; i < CPUCP_MAX_SENSORS ; i++) {
type = le32_to_cpu(sensors_arr[i].type);
if ((type == 0) && (sensors_arr[i].flags == 0))
break;
if (type >= HWMON_NR_SENSOR_TYPES) {
dev_err(hdev->dev, "Got wrong sensor type %d from device\n", type);
return -EINVAL;
}
counts[type]++;
arr_size++;
}
for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
if (counts[i] == 0)
continue;
num_sensors_for_type = counts[i] + 1;
dev_dbg(hdev->dev, "num_sensors_for_type %d = %d\n", i, num_sensors_for_type);
curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), GFP_KERNEL);
if (!curr_arr) {
rc = -ENOMEM;
goto sensors_type_err;
}
num_active_sensor_types++;
sensors_by_type[i] = curr_arr;
}
for (i = 0 ; i < arr_size ; i++) {
type = le32_to_cpu(sensors_arr[i].type);
curr_arr = sensors_by_type[type];
flags = adjust_hwmon_flags(hdev, type, le32_to_cpu(sensors_arr[i].flags));
curr_arr[sensors_by_type_next_index[type]++] = flags;
}
channels_info = kcalloc(num_active_sensor_types + 1, sizeof(struct hwmon_channel_info *),
GFP_KERNEL);
if (!channels_info) {
rc = -ENOMEM;
goto channels_info_array_err;
}
for (i = 0 ; i < num_active_sensor_types ; i++) {
channels_info[i] = kzalloc(sizeof(*channels_info[i]), GFP_KERNEL);
if (!channels_info[i]) {
rc = -ENOMEM;
goto channel_info_err;
}
}
for (i = 0, j = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
if (!sensors_by_type[i])
continue;
channels_info[j]->type = i;
channels_info[j]->config = sensors_by_type[i];
j++;
}
hdev->hl_chip_info->info = (const struct hwmon_channel_info **)channels_info;
return 0;
channel_info_err:
for (i = 0 ; i < num_active_sensor_types ; i++) {
if (channels_info[i]) {
kfree(channels_info[i]->config);
kfree(channels_info[i]);
}
}
kfree(channels_info);
channels_info_array_err:
sensors_type_err:
for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++)
kfree(sensors_by_type[i]);
return rc;
}
static int hl_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
bool use_cpucp_enum;
u32 cpucp_attr;
int rc;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
switch (type) {
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
cpucp_attr = cpucp_temp_input;
break;
case hwmon_temp_max:
cpucp_attr = cpucp_temp_max;
break;
case hwmon_temp_crit:
cpucp_attr = cpucp_temp_crit;
break;
case hwmon_temp_max_hyst:
cpucp_attr = cpucp_temp_max_hyst;
break;
case hwmon_temp_crit_hyst:
cpucp_attr = cpucp_temp_crit_hyst;
break;
case hwmon_temp_offset:
cpucp_attr = cpucp_temp_offset;
break;
case hwmon_temp_highest:
cpucp_attr = cpucp_temp_highest;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
rc = hl_get_temperature(hdev, channel, cpucp_attr, val);
else
rc = hl_get_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_input:
cpucp_attr = cpucp_in_input;
break;
case hwmon_in_min:
cpucp_attr = cpucp_in_min;
break;
case hwmon_in_max:
cpucp_attr = cpucp_in_max;
break;
case hwmon_in_highest:
cpucp_attr = cpucp_in_highest;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
rc = hl_get_voltage(hdev, channel, cpucp_attr, val);
else
rc = hl_get_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
cpucp_attr = cpucp_curr_input;
break;
case hwmon_curr_min:
cpucp_attr = cpucp_curr_min;
break;
case hwmon_curr_max:
cpucp_attr = cpucp_curr_max;
break;
case hwmon_curr_highest:
cpucp_attr = cpucp_curr_highest;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
rc = hl_get_current(hdev, channel, cpucp_attr, val);
else
rc = hl_get_current(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
cpucp_attr = cpucp_fan_input;
break;
case hwmon_fan_min:
cpucp_attr = cpucp_fan_min;
break;
case hwmon_fan_max:
cpucp_attr = cpucp_fan_max;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
rc = hl_get_fan_speed(hdev, channel, cpucp_attr, val);
else
rc = hl_get_fan_speed(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
cpucp_attr = cpucp_pwm_input;
break;
case hwmon_pwm_enable:
cpucp_attr = cpucp_pwm_enable;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
rc = hl_get_pwm_info(hdev, channel, cpucp_attr, val);
else
/* no need for fixup as pwm was aligned from day 1 */
rc = hl_get_pwm_info(hdev, channel, attr, val);
break;
case hwmon_power:
switch (attr) {
case hwmon_power_input:
cpucp_attr = CPUCP_POWER_INPUT;
break;
case hwmon_power_input_highest:
cpucp_attr = CPUCP_POWER_INPUT_HIGHEST;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
rc = hl_get_power(hdev, channel, cpucp_attr, val);
else
rc = hl_get_power(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
default:
return -EINVAL;
}
return rc;
}
static int hl_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
u32 cpucp_attr;
bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
switch (type) {
case hwmon_temp:
switch (attr) {
case hwmon_temp_offset:
cpucp_attr = cpucp_temp_offset;
break;
case hwmon_temp_reset_history:
cpucp_attr = cpucp_temp_reset_history;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
hl_set_temperature(hdev, channel, cpucp_attr, val);
else
hl_set_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
cpucp_attr = cpucp_pwm_input;
break;
case hwmon_pwm_enable:
cpucp_attr = cpucp_pwm_enable;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
hl_set_pwm_info(hdev, channel, cpucp_attr, val);
else
/* no need for fixup as pwm was aligned from day 1 */
hl_set_pwm_info(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_reset_history:
cpucp_attr = cpucp_in_reset_history;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
hl_set_voltage(hdev, channel, cpucp_attr, val);
else
hl_set_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_reset_history:
cpucp_attr = cpucp_curr_reset_history;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
hl_set_current(hdev, channel, cpucp_attr, val);
else
hl_set_current(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_power:
switch (attr) {
case hwmon_power_reset_history:
cpucp_attr = CPUCP_POWER_RESET_INPUT_HISTORY;
break;
default:
return -EINVAL;
}
if (use_cpucp_enum)
hl_set_power(hdev, channel, cpucp_attr, val);
else
hl_set_power(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
default:
return -EINVAL;
}
return 0;
}
static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
switch (type) {
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
case hwmon_temp_max:
case hwmon_temp_max_hyst:
case hwmon_temp_crit:
case hwmon_temp_crit_hyst:
case hwmon_temp_highest:
return 0444;
case hwmon_temp_offset:
return 0644;
case hwmon_temp_reset_history:
return 0200;
}
break;
case hwmon_in:
switch (attr) {
case hwmon_in_input:
case hwmon_in_min:
case hwmon_in_max:
case hwmon_in_highest:
return 0444;
case hwmon_in_reset_history:
return 0200;
}
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
case hwmon_curr_min:
case hwmon_curr_max:
case hwmon_curr_highest:
return 0444;
case hwmon_curr_reset_history:
return 0200;
}
break;
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
case hwmon_fan_min:
case hwmon_fan_max:
return 0444;
}
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
case hwmon_pwm_enable:
return 0644;
}
break;
case hwmon_power:
switch (attr) {
case hwmon_power_input:
case hwmon_power_input_highest:
return 0444;
case hwmon_power_reset_history:
return 0200;
}
break;
default:
break;
}
return 0;
}
static const struct hwmon_ops hl_hwmon_ops = {
.is_visible = hl_is_visible,
.read = hl_read,
.write = hl_write
};
int hl_get_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEMPERATURE_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
dev_dbg(hdev->dev, "get temp, ctl 0x%x, sensor %d, type %d\n",
pkt.ctl, pkt.sensor_index, pkt.type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
*value = (long) result;
if (rc) {
dev_err(hdev->dev,
"Failed to get temperature from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
}
return rc;
}
int hl_set_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEMPERATURE_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev,
"Failed to set temperature of sensor %d, error %d\n",
sensor_index, rc);
return rc;
}
int hl_get_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_VOLTAGE_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
*value = (long) result;
if (rc) {
dev_err(hdev->dev,
"Failed to get voltage from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
}
return rc;
}
int hl_get_current(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_CURRENT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
*value = (long) result;
if (rc) {
dev_err(hdev->dev,
"Failed to get current from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
}
return rc;
}
int hl_get_fan_speed(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_FAN_SPEED_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
*value = (long) result;
if (rc) {
dev_err(hdev->dev,
"Failed to get fan speed from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
}
return rc;
}
int hl_get_pwm_info(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PWM_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
*value = (long) result;
if (rc) {
dev_err(hdev->dev,
"Failed to get pwm info from sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
}
return rc;
}
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PWM_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev,
"Failed to set pwm info to sensor %d, error %d\n",
sensor_index, rc);
}
int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_VOLTAGE_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev,
"Failed to set voltage of sensor %d, error %d\n",
sensor_index, rc);
return rc;
}
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_CURRENT_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev,
"Failed to set current of sensor %d, error %d\n",
sensor_index, rc);
return rc;
}
int hl_set_power(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
struct cpucp_packet pkt;
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
memset(&pkt, 0, sizeof(pkt));
if (prop->use_get_power_for_reset_history)
pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
else
pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
if (rc)
dev_err(hdev->dev,
"Failed to set power of sensor %d, error %d\n",
sensor_index, rc);
return rc;
}
int hl_get_power(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
*value = (long) result;
if (rc) {
dev_err(hdev->dev,
"Failed to get power of sensor %d, error %d\n",
sensor_index, rc);
*value = 0;
}
return rc;
}
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
if ((hdev->hwmon_initialized) || !(hdev->cpu_queues_enable))
return 0;
if (hdev->hl_chip_info->info) {
hdev->hl_chip_info->ops = &hl_hwmon_ops;
hdev->hwmon_dev = hwmon_device_register_with_info(dev,
prop->cpucp_info.card_name, hdev,
hdev->hl_chip_info, NULL);
if (IS_ERR(hdev->hwmon_dev)) {
rc = PTR_ERR(hdev->hwmon_dev);
dev_err(hdev->dev,
"Unable to register hwmon device: %d\n", rc);
return rc;
}
dev_info(hdev->dev, "%s: add sensors information\n",
dev_name(hdev->hwmon_dev));
hdev->hwmon_initialized = true;
} else {
dev_info(hdev->dev, "no available sensors\n");
}
return 0;
}
void hl_hwmon_fini(struct hl_device *hdev)
{
if (!hdev->hwmon_initialized)
return;
hwmon_device_unregister(hdev->hwmon_dev);
}
void hl_hwmon_release_resources(struct hl_device *hdev)
{
const struct hwmon_channel_info * const *channel_info_arr;
int i = 0;
if (!hdev->hl_chip_info->info)
return;
channel_info_arr = hdev->hl_chip_info->info;
while (channel_info_arr[i]) {
kfree(channel_info_arr[i]->config);
kfree(channel_info_arr[i]);
i++;
}
kfree(channel_info_arr);
hdev->hl_chip_info->info = NULL;
}
| linux-master | drivers/accel/habanalabs/common/hwmon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include <linux/vmalloc.h>
#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
/**
* hl_format_as_binary - helper function, format an integer as binary
* using supplied scratch buffer
* @buf: the buffer to use
* @buf_len: buffer capacity
* @n: number to format
*
* Returns pointer to buffer
*/
char *hl_format_as_binary(char *buf, size_t buf_len, u32 n)
{
int i;
u32 bit;
bool leading0 = true;
char *wrptr = buf;
if (buf_len > 0 && buf_len < 3) {
*wrptr = '\0';
return buf;
}
wrptr[0] = '0';
wrptr[1] = 'b';
wrptr += 2;
/* Remove 3 characters from length for '0b' and '\0' termination */
buf_len -= 3;
for (i = 0; i < sizeof(n) * BITS_PER_BYTE && buf_len; ++i, n <<= 1) {
/* Writing bit calculation in one line would cause a false
* positive static code analysis error, so splitting.
*/
bit = n & (1 << (sizeof(n) * BITS_PER_BYTE - 1));
bit = !!bit;
leading0 &= !bit;
if (!leading0) {
*wrptr = '0' + bit;
++wrptr;
}
}
*wrptr = '\0';
return buf;
}
/**
* resize_to_fit - helper function, resize buffer to fit given amount of data
* @buf: destination buffer double pointer
* @size: pointer to the size container
* @desired_size: size the buffer must contain
*
* Returns 0 on success or error code on failure.
* On success, the size of buffer is at least desired_size. Buffer is allocated
* via vmalloc and must be freed with vfree.
*/
static int resize_to_fit(char **buf, size_t *size, size_t desired_size)
{
char *resized_buf;
size_t new_size;
if (*size >= desired_size)
return 0;
/* Not enough space to print all, have to resize */
new_size = max_t(size_t, PAGE_SIZE, round_up(desired_size, PAGE_SIZE));
resized_buf = vmalloc(new_size);
if (!resized_buf)
return -ENOMEM;
memcpy(resized_buf, *buf, *size);
vfree(*buf);
*buf = resized_buf;
*size = new_size;
return 1;
}
/**
* hl_snprintf_resize() - print formatted data to buffer, resize as needed
* @buf: buffer double pointer, to be written to and resized, must be either
* NULL or allocated with vmalloc.
* @size: current size of the buffer
* @offset: current offset to write to
* @format: format of the data
*
* This function will write formatted data into the buffer. If buffer is not
* large enough, it will be resized using vmalloc. Size may be modified if the
* buffer was resized, offset will be advanced by the number of bytes written
* not including the terminating character
*
* Returns 0 on success or error code on failure
*
* Note that the buffer has to be manually released using vfree.
*/
int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
const char *format, ...)
{
va_list args;
size_t length;
int rc;
if (*buf == NULL && (*size != 0 || *offset != 0))
return -EINVAL;
va_start(args, format);
length = vsnprintf(*buf + *offset, *size - *offset, format, args);
va_end(args);
rc = resize_to_fit(buf, size, *offset + length + 1);
if (rc < 0)
return rc;
else if (rc > 0) {
/* Resize was needed, write again */
va_start(args, format);
length = vsnprintf(*buf + *offset, *size - *offset, format,
args);
va_end(args);
}
*offset += length;
return 0;
}
/**
* hl_sync_engine_to_string - convert engine type enum to string literal
* @engine_type: engine type (TPC/MME/DMA)
*
* Return the resolved string literal
*/
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type)
{
switch (engine_type) {
case ENGINE_DMA:
return "DMA";
case ENGINE_MME:
return "MME";
case ENGINE_TPC:
return "TPC";
}
return "Invalid Engine Type";
}
/**
* hl_print_resize_sync_engine - helper function, format engine name and ID
* using hl_snprintf_resize
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
* @engine_type: engine type (TPC/MME/DMA)
* @engine_id: engine numerical id
*
* Returns 0 on success or error code on failure
*/
static int hl_print_resize_sync_engine(char **buf, size_t *size, size_t *offset,
enum hl_sync_engine_type engine_type,
u32 engine_id)
{
return hl_snprintf_resize(buf, size, offset, "%s%u",
hl_sync_engine_to_string(engine_type), engine_id);
}
/**
* hl_state_dump_get_sync_name - transform sync object id to name if available
* @hdev: pointer to the device
* @sync_id: sync object id
*
* Returns a name literal or NULL if not resolved.
* Note: returning NULL shall not be considered as a failure, as not all
* sync objects are named.
*/
const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
struct hl_hw_obj_name_entry *entry;
hash_for_each_possible(sds->so_id_to_str_tb, entry,
node, sync_id)
if (sync_id == entry->id)
return entry->name;
return NULL;
}
/**
* hl_state_dump_get_monitor_name - transform monitor object dump to monitor
* name if available
* @hdev: pointer to the device
* @mon: monitor state dump
*
* Returns a name literal or NULL if not resolved.
* Note: returning NULL shall not be considered as a failure, as not all
* monitors are named.
*/
const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
struct hl_mon_state_dump *mon)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
struct hl_hw_obj_name_entry *entry;
hash_for_each_possible(sds->monitor_id_to_str_tb,
entry, node, mon->id)
if (mon->id == entry->id)
return entry->name;
return NULL;
}
/**
* hl_state_dump_free_sync_to_engine_map - free sync object to engine map
* @map: sync object to engine map
*
* Note: generic free implementation, the allocation is implemented per ASIC.
*/
void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map)
{
struct hl_sync_to_engine_map_entry *entry;
struct hlist_node *tmp_node;
int i;
hash_for_each_safe(map->tb, i, tmp_node, entry, node) {
hash_del(&entry->node);
kfree(entry);
}
}
/**
* hl_state_dump_get_sync_to_engine - transform sync_id to
* hl_sync_to_engine_map_entry if available for current id
* @map: sync object to engine map
* @sync_id: sync object id
*
* Returns the translation entry if found or NULL if not.
* Note, returned NULL shall not be considered as a failure as the map
* does not cover all possible, it is a best effort sync ids.
*/
static struct hl_sync_to_engine_map_entry *
hl_state_dump_get_sync_to_engine(struct hl_sync_to_engine_map *map, u32 sync_id)
{
struct hl_sync_to_engine_map_entry *entry;
hash_for_each_possible(map->tb, entry, node, sync_id)
if (entry->sync_id == sync_id)
return entry;
return NULL;
}
/**
* hl_state_dump_read_sync_objects - read sync objects array
* @hdev: pointer to the device
* @index: sync manager block index starting with E_N
*
* Returns array of size SP_SYNC_OBJ_AMOUNT on success or NULL on failure
*/
static u32 *hl_state_dump_read_sync_objects(struct hl_device *hdev, u32 index)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
u32 *sync_objects;
s64 base_addr; /* Base addr can be negative */
int i;
base_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
sync_objects = vmalloc(sds->props[SP_SYNC_OBJ_AMOUNT] * sizeof(u32));
if (!sync_objects)
return NULL;
for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i)
sync_objects[i] = RREG32(base_addr + i * sizeof(u32));
return sync_objects;
}
/**
* hl_state_dump_free_sync_objects - free sync objects array allocated by
* hl_state_dump_read_sync_objects
* @sync_objects: sync objects array
*/
static void hl_state_dump_free_sync_objects(u32 *sync_objects)
{
vfree(sync_objects);
}
/**
* hl_state_dump_print_syncs_single_block - print active sync objects on a
* single block
* @hdev: pointer to the device
* @index: sync manager block index starting with E_N
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
* @map: sync engines names map
*
* Returns 0 on success or error code on failure
*/
static int
hl_state_dump_print_syncs_single_block(struct hl_device *hdev, u32 index,
char **buf, size_t *size, size_t *offset,
struct hl_sync_to_engine_map *map)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
const char *sync_name;
u32 *sync_objects = NULL;
int rc = 0, i;
if (sds->sync_namager_names) {
rc = hl_snprintf_resize(
buf, size, offset, "%s\n",
sds->sync_namager_names[index]);
if (rc)
goto out;
}
sync_objects = hl_state_dump_read_sync_objects(hdev, index);
if (!sync_objects) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i) {
struct hl_sync_to_engine_map_entry *entry;
u64 sync_object_addr;
if (!sync_objects[i])
continue;
sync_object_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index +
i * sizeof(u32);
rc = hl_snprintf_resize(buf, size, offset, "sync id: %u", i);
if (rc)
goto free_sync_objects;
sync_name = hl_state_dump_get_sync_name(hdev, i);
if (sync_name) {
rc = hl_snprintf_resize(buf, size, offset, " %s",
sync_name);
if (rc)
goto free_sync_objects;
}
rc = hl_snprintf_resize(buf, size, offset, ", value: %u",
sync_objects[i]);
if (rc)
goto free_sync_objects;
/* Append engine string */
entry = hl_state_dump_get_sync_to_engine(map,
(u32)sync_object_addr);
if (entry) {
rc = hl_snprintf_resize(buf, size, offset,
", Engine: ");
if (rc)
goto free_sync_objects;
rc = hl_print_resize_sync_engine(buf, size, offset,
entry->engine_type,
entry->engine_id);
if (rc)
goto free_sync_objects;
}
rc = hl_snprintf_resize(buf, size, offset, "\n");
if (rc)
goto free_sync_objects;
}
free_sync_objects:
hl_state_dump_free_sync_objects(sync_objects);
out:
return rc;
}
/**
* hl_state_dump_print_syncs - print active sync objects
* @hdev: pointer to the device
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
*
* Returns 0 on success or error code on failure
*/
static int hl_state_dump_print_syncs(struct hl_device *hdev,
char **buf, size_t *size,
size_t *offset)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
struct hl_sync_to_engine_map *map;
u32 index;
int rc = 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
rc = sds->funcs.gen_sync_to_engine_map(hdev, map);
if (rc)
goto free_map_mem;
rc = hl_snprintf_resize(buf, size, offset, "Non zero sync objects:\n");
if (rc)
goto out;
if (sds->sync_namager_names) {
for (index = 0; sds->sync_namager_names[index]; ++index) {
rc = hl_state_dump_print_syncs_single_block(
hdev, index, buf, size, offset, map);
if (rc)
goto out;
}
} else {
for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
rc = hl_state_dump_print_syncs_single_block(
hdev, index, buf, size, offset, map);
if (rc)
goto out;
}
}
out:
hl_state_dump_free_sync_to_engine_map(map);
free_map_mem:
kfree(map);
return rc;
}
/**
* hl_state_dump_alloc_read_sm_block_monitors - read monitors for a specific
* block
* @hdev: pointer to the device
* @index: sync manager block index starting with E_N
*
* Returns an array of monitor data of size SP_MONITORS_AMOUNT or NULL
* on error
*/
static struct hl_mon_state_dump *
hl_state_dump_alloc_read_sm_block_monitors(struct hl_device *hdev, u32 index)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
struct hl_mon_state_dump *monitors;
s64 base_addr; /* Base addr can be negative */
int i;
monitors = vmalloc(sds->props[SP_MONITORS_AMOUNT] *
sizeof(struct hl_mon_state_dump));
if (!monitors)
return NULL;
base_addr = sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
monitors[i].id = i;
monitors[i].wr_addr_low =
RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_LOW] +
i * sizeof(u32));
monitors[i].wr_addr_high =
RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_HIGH] +
i * sizeof(u32));
monitors[i].wr_data =
RREG32(base_addr + sds->props[SP_MON_OBJ_WR_DATA] +
i * sizeof(u32));
monitors[i].arm_data =
RREG32(base_addr + sds->props[SP_MON_OBJ_ARM_DATA] +
i * sizeof(u32));
monitors[i].status =
RREG32(base_addr + sds->props[SP_MON_OBJ_STATUS] +
i * sizeof(u32));
}
return monitors;
}
/**
* hl_state_dump_free_monitors - free the monitors structure
* @monitors: monitors array created with
* hl_state_dump_alloc_read_sm_block_monitors
*/
static void hl_state_dump_free_monitors(struct hl_mon_state_dump *monitors)
{
vfree(monitors);
}
/**
* hl_state_dump_print_monitors_single_block - print active monitors on a
* single block
* @hdev: pointer to the device
* @index: sync manager block index starting with E_N
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
*
* Returns 0 on success or error code on failure
*/
static int hl_state_dump_print_monitors_single_block(struct hl_device *hdev,
u32 index,
char **buf, size_t *size,
size_t *offset)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
struct hl_mon_state_dump *monitors = NULL;
int rc = 0, i;
if (sds->sync_namager_names) {
rc = hl_snprintf_resize(
buf, size, offset, "%s\n",
sds->sync_namager_names[index]);
if (rc)
goto out;
}
monitors = hl_state_dump_alloc_read_sm_block_monitors(hdev, index);
if (!monitors) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
if (!(sds->funcs.monitor_valid(&monitors[i])))
continue;
/* Monitor is valid, dump it */
rc = sds->funcs.print_single_monitor(buf, size, offset, hdev,
&monitors[i]);
if (rc)
goto free_monitors;
hl_snprintf_resize(buf, size, offset, "\n");
}
free_monitors:
hl_state_dump_free_monitors(monitors);
out:
return rc;
}
/**
* hl_state_dump_print_monitors - print active monitors
* @hdev: pointer to the device
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
*
* Returns 0 on success or error code on failure
*/
static int hl_state_dump_print_monitors(struct hl_device *hdev,
char **buf, size_t *size,
size_t *offset)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
u32 index;
int rc = 0;
rc = hl_snprintf_resize(buf, size, offset,
"Valid (armed) monitor objects:\n");
if (rc)
goto out;
if (sds->sync_namager_names) {
for (index = 0; sds->sync_namager_names[index]; ++index) {
rc = hl_state_dump_print_monitors_single_block(
hdev, index, buf, size, offset);
if (rc)
goto out;
}
} else {
for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
rc = hl_state_dump_print_monitors_single_block(
hdev, index, buf, size, offset);
if (rc)
goto out;
}
}
out:
return rc;
}
/**
* hl_state_dump_print_engine_fences - print active fences for a specific
* engine
* @hdev: pointer to the device
* @engine_type: engine type to use
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
*/
static int
hl_state_dump_print_engine_fences(struct hl_device *hdev,
enum hl_sync_engine_type engine_type,
char **buf, size_t *size, size_t *offset)
{
struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
int rc = 0, i, n_fences;
u64 base_addr, next_fence;
switch (engine_type) {
case ENGINE_TPC:
n_fences = sds->props[SP_NUM_OF_TPC_ENGINES];
base_addr = sds->props[SP_TPC0_CMDQ];
next_fence = sds->props[SP_NEXT_TPC];
break;
case ENGINE_MME:
n_fences = sds->props[SP_NUM_OF_MME_ENGINES];
base_addr = sds->props[SP_MME_CMDQ];
next_fence = sds->props[SP_NEXT_MME];
break;
case ENGINE_DMA:
n_fences = sds->props[SP_NUM_OF_DMA_ENGINES];
base_addr = sds->props[SP_DMA_CMDQ];
next_fence = sds->props[SP_DMA_QUEUES_OFFSET];
break;
default:
return -EINVAL;
}
for (i = 0; i < n_fences; ++i) {
rc = sds->funcs.print_fences_single_engine(
hdev,
base_addr + next_fence * i +
sds->props[SP_FENCE0_CNT_OFFSET],
base_addr + next_fence * i +
sds->props[SP_CP_STS_OFFSET],
engine_type, i, buf, size, offset);
if (rc)
goto out;
}
out:
return rc;
}
/**
* hl_state_dump_print_fences - print active fences
* @hdev: pointer to the device
* @buf: destination buffer double pointer to be used with hl_snprintf_resize
* @size: pointer to the size container
* @offset: pointer to the offset container
*/
static int hl_state_dump_print_fences(struct hl_device *hdev, char **buf,
size_t *size, size_t *offset)
{
int rc = 0;
rc = hl_snprintf_resize(buf, size, offset, "Valid (armed) fences:\n");
if (rc)
goto out;
rc = hl_state_dump_print_engine_fences(hdev, ENGINE_TPC, buf, size, offset);
if (rc)
goto out;
rc = hl_state_dump_print_engine_fences(hdev, ENGINE_MME, buf, size, offset);
if (rc)
goto out;
rc = hl_state_dump_print_engine_fences(hdev, ENGINE_DMA, buf, size, offset);
if (rc)
goto out;
out:
return rc;
}
/**
* hl_state_dump() - dump system state
* @hdev: pointer to device structure
*/
int hl_state_dump(struct hl_device *hdev)
{
char *buf = NULL;
size_t offset = 0, size = 0;
int rc;
rc = hl_snprintf_resize(&buf, &size, &offset,
"Timestamp taken on: %llu\n\n",
ktime_to_ns(ktime_get()));
if (rc)
goto err;
rc = hl_state_dump_print_syncs(hdev, &buf, &size, &offset);
if (rc)
goto err;
hl_snprintf_resize(&buf, &size, &offset, "\n");
rc = hl_state_dump_print_monitors(hdev, &buf, &size, &offset);
if (rc)
goto err;
hl_snprintf_resize(&buf, &size, &offset, "\n");
rc = hl_state_dump_print_fences(hdev, &buf, &size, &offset);
if (rc)
goto err;
hl_snprintf_resize(&buf, &size, &offset, "\n");
hl_debugfs_set_state_dump(hdev, buf, size);
return 0;
err:
vfree(buf);
return rc;
}
| linux-master | drivers/accel/habanalabs/common/state_dump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/slab.h>
int hl_asid_init(struct hl_device *hdev)
{
hdev->asid_bitmap = bitmap_zalloc(hdev->asic_prop.max_asid, GFP_KERNEL);
if (!hdev->asid_bitmap)
return -ENOMEM;
mutex_init(&hdev->asid_mutex);
/* ASID 0 is reserved for the kernel driver and device CPU */
set_bit(0, hdev->asid_bitmap);
return 0;
}
void hl_asid_fini(struct hl_device *hdev)
{
mutex_destroy(&hdev->asid_mutex);
bitmap_free(hdev->asid_bitmap);
}
unsigned long hl_asid_alloc(struct hl_device *hdev)
{
unsigned long found;
mutex_lock(&hdev->asid_mutex);
found = find_first_zero_bit(hdev->asid_bitmap,
hdev->asic_prop.max_asid);
if (found == hdev->asic_prop.max_asid)
found = 0;
else
set_bit(found, hdev->asid_bitmap);
mutex_unlock(&hdev->asid_mutex);
return found;
}
void hl_asid_free(struct hl_device *hdev, unsigned long asid)
{
if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) {
dev_crit(hdev->dev, "Invalid ASID %lu", asid);
return;
}
clear_bit(asid, hdev->asid_bitmap);
}
| linux-master | drivers/accel/habanalabs/common/asid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#define pr_fmt(fmt) "habanalabs: " fmt
#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/vmalloc.h>
#include <trace/events/habanalabs.h>
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 5
enum dma_alloc_type {
DMA_ALLOC_COHERENT,
DMA_ALLOC_POOL,
};
#define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
/*
* hl_set_dram_bar- sets the bar to allow later access to address
*
* @hdev: pointer to habanalabs device structure.
* @addr: the address the caller wants to access.
* @region: the PCI region.
* @new_bar_region_base: the new BAR region base address.
*
* @return: the old BAR base address on success, U64_MAX for failure.
* The caller should set it back to the old address after use.
*
* In case the bar space does not cover the whole address space,
* the bar base address should be set to allow access to a given address.
* This function can be called also if the bar doesn't need to be set,
* in that case it just won't change the base.
*/
static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
u64 *new_bar_region_base)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 bar_base_addr, old_base;
if (is_power_of_2(prop->dram_pci_bar_size))
bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
else
bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
prop->dram_pci_bar_size;
old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
/* in case of success we need to update the new BAR base */
if ((old_base != U64_MAX) && new_bar_region_base)
*new_bar_region_base = bar_base_addr;
return old_base;
}
int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
{
struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
u64 old_base = 0, rc, bar_region_base = region->region_base;
void __iomem *acc_addr;
if (set_dram_bar) {
old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
if (old_base == U64_MAX)
return -EIO;
}
acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
(addr - bar_region_base);
switch (acc_type) {
case DEBUGFS_READ8:
*val = readb(acc_addr);
break;
case DEBUGFS_WRITE8:
writeb(*val, acc_addr);
break;
case DEBUGFS_READ32:
*val = readl(acc_addr);
break;
case DEBUGFS_WRITE32:
writel(*val, acc_addr);
break;
case DEBUGFS_READ64:
*val = readq(acc_addr);
break;
case DEBUGFS_WRITE64:
writeq(*val, acc_addr);
break;
}
if (set_dram_bar) {
rc = hl_set_dram_bar(hdev, old_base, region, NULL);
if (rc == U64_MAX)
return -EIO;
}
return 0;
}
static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, enum dma_alloc_type alloc_type,
const char *caller)
{
void *ptr = NULL;
switch (alloc_type) {
case DMA_ALLOC_COHERENT:
ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
break;
case DMA_ALLOC_POOL:
ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
break;
}
if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
caller);
return ptr;
}
static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
const char *caller)
{
/* this is needed to avoid warning on using freed pointer */
u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
switch (alloc_type) {
case DMA_ALLOC_COHERENT:
hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
break;
case DMA_ALLOC_POOL:
hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
break;
}
trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
}
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, const char *caller)
{
return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
}
void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, const char *caller)
{
hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
}
void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
dma_addr_t *dma_handle, const char *caller)
{
return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
}
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
const char *caller)
{
hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
}
void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
{
return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
}
void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
{
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct scatterlist *sg;
int rc, i;
rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
if (rc)
return rc;
/* Shift to the device's base physical address of host memory if necessary */
if (prop->device_dma_offset_for_host_access)
for_each_sgtable_dma_sg(sgt, sg, i)
sg->dma_address += prop->device_dma_offset_for_host_access;
return 0;
}
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct scatterlist *sg;
int i;
/* Cancel the device's base physical address of host memory if necessary */
if (prop->device_dma_offset_for_host_access)
for_each_sgtable_dma_sg(sgt, sg, i)
sg->dma_address -= prop->device_dma_offset_for_host_access;
dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
}
/*
* hl_access_cfg_region - access the config region
*
* @hdev: pointer to habanalabs device structure
* @addr: the address to access
* @val: the value to write from or read to
* @acc_type: the type of access (read/write 64/32)
*/
int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
u32 val_h, val_l;
if (!IS_ALIGNED(addr, sizeof(u32))) {
dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
return -EINVAL;
}
switch (acc_type) {
case DEBUGFS_READ32:
*val = RREG32(addr - cfg_region->region_base);
break;
case DEBUGFS_WRITE32:
WREG32(addr - cfg_region->region_base, *val);
break;
case DEBUGFS_READ64:
val_l = RREG32(addr - cfg_region->region_base);
val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
*val = (((u64) val_h) << 32) | val_l;
break;
case DEBUGFS_WRITE64:
WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
break;
default:
dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
return -EOPNOTSUPP;
}
return 0;
}
/*
* hl_access_dev_mem - access device memory
*
* @hdev: pointer to habanalabs device structure
* @region_type: the type of the region the address belongs to
* @addr: the address to access
* @val: the value to write from or read to
* @acc_type: the type of access (r/w, 32/64)
*/
int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
u64 addr, u64 *val, enum debugfs_access_type acc_type)
{
switch (region_type) {
case PCI_REGION_CFG:
return hl_access_cfg_region(hdev, addr, val, acc_type);
case PCI_REGION_SRAM:
case PCI_REGION_DRAM:
return hl_access_sram_dram_region(hdev, addr, val, acc_type,
region_type, (region_type == PCI_REGION_DRAM));
default:
return -EFAULT;
}
return 0;
}
void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
{
va_list args;
int str_size;
va_start(args, fmt);
/* Calculate formatted string length. Assuming each string is null terminated, hence
* increment result by 1
*/
str_size = vsnprintf(NULL, 0, fmt, args) + 1;
va_end(args);
if ((e->actual_size + str_size) < e->allocated_buf_size) {
va_start(args, fmt);
vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
va_end(args);
}
/* Need to update the size even when not updating destination buffer to get the exact size
* of all input strings
*/
e->actual_size += str_size;
}
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
if (hdev->reset_info.in_reset) {
if (hdev->reset_info.in_compute_reset)
status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
else
status = HL_DEVICE_STATUS_IN_RESET;
} else if (hdev->reset_info.needs_reset) {
status = HL_DEVICE_STATUS_NEEDS_RESET;
} else if (hdev->disabled) {
status = HL_DEVICE_STATUS_MALFUNCTION;
} else if (!hdev->init_done) {
status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
} else {
status = HL_DEVICE_STATUS_OPERATIONAL;
}
return status;
}
bool hl_device_operational(struct hl_device *hdev,
enum hl_device_status *status)
{
enum hl_device_status current_status;
current_status = hl_device_status(hdev);
if (status)
*status = current_status;
switch (current_status) {
case HL_DEVICE_STATUS_IN_RESET:
case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
case HL_DEVICE_STATUS_MALFUNCTION:
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
case HL_DEVICE_STATUS_OPERATIONAL:
case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
default:
return true;
}
}
bool hl_ctrl_device_operational(struct hl_device *hdev,
enum hl_device_status *status)
{
enum hl_device_status current_status;
current_status = hl_device_status(hdev);
if (status)
*status = current_status;
switch (current_status) {
case HL_DEVICE_STATUS_MALFUNCTION:
return false;
case HL_DEVICE_STATUS_IN_RESET:
case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
case HL_DEVICE_STATUS_NEEDS_RESET:
case HL_DEVICE_STATUS_OPERATIONAL:
case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
default:
return true;
}
}
static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{
if (idle_mask[3])
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[2])
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
message, idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[1])
dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
message, idle_mask[1], idle_mask[0]);
else
dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
}
static void hpriv_release(struct kref *ref)
{
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
bool reset_device, device_is_idle = true;
struct hl_fpriv *hpriv;
struct hl_device *hdev;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
hdev->asic_funcs->send_device_activity(hdev, false);
put_pid(hpriv->taskpid);
hl_debugfs_remove_file(hpriv);
mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
/* There should be no memory buffers at this point and handles IDR can be destroyed */
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
* reset that waits for device release.
*/
reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
/* Check the device idle status and reset if not idle.
* Skip it if already in reset, or if device is going to be reset in any case.
*/
if (!hdev->reset_info.in_reset && !reset_device && hdev->pdev && !hdev->pldm)
device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
if (!device_is_idle) {
print_idle_status_mask(hdev, "device is not idle after user context is closed",
idle_mask);
reset_device = true;
}
/* We need to remove the user from the list to make sure the reset process won't
* try to kill the user process. Because, if we got here, it means there are no
* more driver/device resources that the user process is occupying so there is
* no need to kill it
*
* However, we can't set the compute_ctx to NULL at this stage. This is to prevent
* a race between the release and opening the device again. We don't want to let
* a user open the device while there a reset is about to happen.
*/
mutex_lock(&hdev->fpriv_list_lock);
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_list_lock);
if (reset_device) {
hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
} else {
/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
int rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc)
dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
}
/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
* thread, we don't care because the in_reset is marked so if a user will try to open
* the device it will fail on that, even if compute_ctx is false.
*/
mutex_lock(&hdev->fpriv_list_lock);
hdev->is_compute_ctx_active = false;
mutex_unlock(&hdev->fpriv_list_lock);
hdev->compute_ctx_in_release = 0;
/* release the eventfd */
if (hpriv->notifier_event.eventfd)
eventfd_ctx_put(hpriv->notifier_event.eventfd);
mutex_destroy(&hpriv->notifier_event.lock);
kfree(hpriv);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
{
kref_get(&hpriv->refcount);
}
int hl_hpriv_put(struct hl_fpriv *hpriv)
{
return kref_put(&hpriv->refcount, hpriv_release);
}
static void print_device_in_use_info(struct hl_device *hdev, const char *message)
{
u32 active_cs_num, dmabuf_export_cnt;
bool unknown_reason = true;
char buf[128];
size_t size;
int offset;
size = sizeof(buf);
offset = 0;
active_cs_num = hl_get_active_cs_num(hdev);
if (active_cs_num) {
unknown_reason = false;
offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
}
dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
if (dmabuf_export_cnt) {
unknown_reason = false;
offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
dmabuf_export_cnt);
}
if (unknown_reason)
scnprintf(buf + offset, size - offset, " [unknown reason]");
dev_notice(hdev->dev, "%s%s\n", message, buf);
}
/*
* hl_device_release - release function for habanalabs device
*
* @inode: pointer to inode structure
* @filp: pointer to file structure
*
* Called when process closes an habanalabs device
*/
static int hl_device_release(struct inode *inode, struct file *filp)
{
struct hl_fpriv *hpriv = filp->private_data;
struct hl_device *hdev = hpriv->hdev;
filp->private_data = NULL;
if (!hdev) {
pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
put_pid(hpriv->taskpid);
return 0;
}
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
/* Memory buffers might be still in use at this point and thus the handles IDR destruction
* is postponed to hpriv_release().
*/
hl_mem_mgr_fini(&hpriv->mem_mgr);
hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) {
print_device_in_use_info(hdev, "User process closed FD but device still in use");
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
return 0;
}
static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
{
struct hl_fpriv *hpriv = filp->private_data;
struct hl_device *hdev = hpriv->hdev;
filp->private_data = NULL;
if (!hdev) {
pr_err("Closing FD after device was removed\n");
goto out;
}
mutex_lock(&hdev->fpriv_ctrl_list_lock);
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
out:
/* release the eventfd */
if (hpriv->notifier_event.eventfd)
eventfd_ctx_put(hpriv->notifier_event.eventfd);
mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
return 0;
}
/*
* hl_mmap - mmap function for habanalabs device
*
* @*filp: pointer to file structure
* @*vma: pointer to vm_area_struct of the process
*
* Called when process does an mmap on habanalabs device. Call the relevant mmap
* function at the end of the common code.
*/
static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct hl_fpriv *hpriv = filp->private_data;
struct hl_device *hdev = hpriv->hdev;
unsigned long vm_pgoff;
if (!hdev) {
pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
return -ENODEV;
}
vm_pgoff = vma->vm_pgoff;
switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
case HL_MMAP_TYPE_BLOCK:
vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
return hl_hw_block_mmap(hpriv, vma);
case HL_MMAP_TYPE_CB:
case HL_MMAP_TYPE_TS_BUFF:
return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
}
return -EINVAL;
}
static const struct file_operations hl_ops = {
.owner = THIS_MODULE,
.open = hl_device_open,
.release = hl_device_release,
.mmap = hl_mmap,
.unlocked_ioctl = hl_ioctl,
.compat_ioctl = hl_ioctl
};
static const struct file_operations hl_ctrl_ops = {
.owner = THIS_MODULE,
.open = hl_device_open_ctrl,
.release = hl_device_release_ctrl,
.unlocked_ioctl = hl_ioctl_control,
.compat_ioctl = hl_ioctl_control
};
static void device_release_func(struct device *dev)
{
kfree(dev);
}
/*
* device_init_cdev - Initialize cdev and device for habanalabs device
*
* @hdev: pointer to habanalabs device structure
* @class: pointer to the class object of the device
* @minor: minor number of the specific device
* @fpos: file operations to install for this device
* @name: name of the device as it will appear in the filesystem
* @cdev: pointer to the char device object that will be initialized
* @dev: pointer to the device object that will be initialized
*
* Initialize a cdev and a Linux device for habanalabs's device.
*/
static int device_init_cdev(struct hl_device *hdev, struct class *class,
int minor, const struct file_operations *fops,
char *name, struct cdev *cdev,
struct device **dev)
{
cdev_init(cdev, fops);
cdev->owner = THIS_MODULE;
*dev = kzalloc(sizeof(**dev), GFP_KERNEL);
if (!*dev)
return -ENOMEM;
device_initialize(*dev);
(*dev)->devt = MKDEV(hdev->major, minor);
(*dev)->class = class;
(*dev)->release = device_release_func;
dev_set_drvdata(*dev, hdev);
dev_set_name(*dev, "%s", name);
return 0;
}
static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
{
int rc;
rc = cdev_device_add(&hdev->cdev, hdev->dev);
if (rc) {
dev_err(hdev->dev,
"failed to add a char device to the system\n");
return rc;
}
rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
if (rc) {
dev_err(hdev->dev,
"failed to add a control char device to the system\n");
goto delete_cdev_device;
}
/* hl_sysfs_init() must be done after adding the device to the system */
rc = hl_sysfs_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize sysfs\n");
goto delete_ctrl_cdev_device;
}
hl_debugfs_add_device(hdev);
hdev->cdev_sysfs_debugfs_created = true;
return 0;
delete_ctrl_cdev_device:
cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
delete_cdev_device:
cdev_device_del(&hdev->cdev, hdev->dev);
return rc;
}
static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
{
if (!hdev->cdev_sysfs_debugfs_created)
goto put_devices;
hl_debugfs_remove_device(hdev);
hl_sysfs_fini(hdev);
cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
cdev_device_del(&hdev->cdev, hdev->dev);
put_devices:
put_device(hdev->dev);
put_device(hdev->dev_ctrl);
}
static void device_hard_reset_pending(struct work_struct *work)
{
struct hl_device_reset_work *device_reset_work =
container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = device_reset_work->hdev;
u32 flags;
int rc;
flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
if (ctx) {
/* The read refcount value should subtracted by one, because the read is
* protected with hl_get_compute_ctx().
*/
dev_info(hdev->dev,
"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
hl_ctx_put(ctx);
} else {
dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
HL_PENDING_RESET_PER_SEC);
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
}
}
static void device_release_watchdog_func(struct work_struct *work)
{
struct hl_device_reset_work *watchdog_work =
container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = watchdog_work->hdev;
u32 flags;
dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
hl_device_reset(hdev, flags);
}
/*
* device_early_init - do some early initialization for the habanalabs device
*
* @hdev: pointer to habanalabs device structure
*
* Install the relevant function pointers and call the early_init function,
* if such a function exists
*/
static int device_early_init(struct hl_device *hdev)
{
int i, rc;
char workq_name[32];
switch (hdev->asic_type) {
case ASIC_GOYA:
goya_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
break;
case ASIC_GAUDI:
gaudi_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
break;
case ASIC_GAUDI_SEC:
gaudi_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
break;
case ASIC_GAUDI2:
gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
break;
case ASIC_GAUDI2B:
gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
break;
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
return -EINVAL;
}
rc = hdev->asic_funcs->early_init(hdev);
if (rc)
return rc;
rc = hl_asid_init(hdev);
if (rc)
goto early_fini;
if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
sizeof(struct workqueue_struct *),
GFP_KERNEL);
if (!hdev->cq_wq) {
rc = -ENOMEM;
goto asid_fini;
}
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
}
}
snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
hdev->eq_wq = create_singlethread_workqueue(workq_name);
if (hdev->eq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
}
snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->cs_cmplt_wq) {
dev_err(hdev->dev,
"Failed to allocate CS completions workqueue\n");
rc = -ENOMEM;
goto free_eq_wq;
}
snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->ts_free_obj_wq) {
dev_err(hdev->dev,
"Failed to allocate Timestamp registration free workqueue\n");
rc = -ENOMEM;
goto free_cs_cmplt_wq;
}
snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
if (!hdev->prefetch_wq) {
dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
rc = -ENOMEM;
goto free_ts_free_wq;
}
hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
if (!hdev->hl_chip_info) {
rc = -ENOMEM;
goto free_prefetch_wq;
}
rc = hl_mmu_if_set_funcs(hdev);
if (rc)
goto free_chip_info;
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
hdev->reset_wq = create_singlethread_workqueue(workq_name);
if (!hdev->reset_wq) {
rc = -ENOMEM;
dev_err(hdev->dev, "Failed to create device reset WQ\n");
goto free_cb_mgr;
}
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
hdev->device_reset_work.hdev = hdev;
hdev->device_fini_pending = 0;
INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
device_release_watchdog_func);
hdev->device_release_watchdog_work.hdev = hdev;
mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list);
spin_lock_init(&hdev->cs_mirror_lock);
spin_lock_init(&hdev->reset_info.lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
mutex_init(&hdev->fpriv_list_lock);
mutex_init(&hdev->fpriv_ctrl_list_lock);
mutex_init(&hdev->clk_throttling.lock);
return 0;
free_cb_mgr:
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
free_chip_info:
kfree(hdev->hl_chip_info);
free_prefetch_wq:
destroy_workqueue(hdev->prefetch_wq);
free_ts_free_wq:
destroy_workqueue(hdev->ts_free_obj_wq);
free_cs_cmplt_wq:
destroy_workqueue(hdev->cs_cmplt_wq);
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
if (hdev->cq_wq[i])
destroy_workqueue(hdev->cq_wq[i]);
kfree(hdev->cq_wq);
asid_fini:
hl_asid_fini(hdev);
early_fini:
if (hdev->asic_funcs->early_fini)
hdev->asic_funcs->early_fini(hdev);
return rc;
}
/*
* device_early_fini - finalize all that was done in device_early_init
*
* @hdev: pointer to habanalabs device structure
*
*/
static void device_early_fini(struct hl_device *hdev)
{
int i;
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
mutex_destroy(&hdev->fpriv_list_lock);
mutex_destroy(&hdev->fpriv_ctrl_list_lock);
mutex_destroy(&hdev->clk_throttling.lock);
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->prefetch_wq);
destroy_workqueue(hdev->ts_free_obj_wq);
destroy_workqueue(hdev->cs_cmplt_wq);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->reset_wq);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
destroy_workqueue(hdev->cq_wq[i]);
kfree(hdev->cq_wq);
hl_asid_fini(hdev);
if (hdev->asic_funcs->early_fini)
hdev->asic_funcs->early_fini(hdev);
}
static bool is_pci_link_healthy(struct hl_device *hdev)
{
u16 vendor_id;
if (!hdev->pdev)
return false;
pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
return (vendor_id == PCI_VENDOR_ID_HABANALABS);
}
static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
work_heartbeat.work);
struct hl_info_fw_err_info info = {0};
u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
if (!hl_device_operational(hdev, NULL))
goto reschedule;
if (!hdev->asic_funcs->send_heartbeat(hdev))
goto reschedule;
if (hl_device_operational(hdev, NULL))
dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
is_pci_link_healthy(hdev) ? "healthy" : "broken");
info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
info.event_mask = &event_mask;
hl_handle_fw_err(hdev, &info);
hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
return;
reschedule:
/*
* prev_reset_trigger tracks consecutive fatal h/w errors until first
* heartbeat immediately post reset.
* If control reached here, then at least one heartbeat work has been
* scheduled since last reset/init cycle.
* So if the device is not already in reset cycle, reset the flag
* prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
* status for at least one heartbeat. From this point driver restarts
* tracking future consecutive fatal errors.
*/
if (!hdev->reset_info.in_reset)
hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
/*
* device_late_init - do late stuff initialization for the habanalabs device
*
* @hdev: pointer to habanalabs device structure
*
* Do stuff that either needs the device H/W queues to be active or needs
* to happen after all the rest of the initialization is finished
*/
static int device_late_init(struct hl_device *hdev)
{
int rc;
if (hdev->asic_funcs->late_init) {
rc = hdev->asic_funcs->late_init(hdev);
if (rc) {
dev_err(hdev->dev,
"failed late initialization for the H/W\n");
return rc;
}
}
hdev->high_pll = hdev->asic_prop.high_pll;
if (hdev->heartbeat) {
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
hdev->late_init_done = true;
return 0;
}
/*
* device_late_fini - finalize all that was done in device_late_init
*
* @hdev: pointer to habanalabs device structure
*
*/
static void device_late_fini(struct hl_device *hdev)
{
if (!hdev->late_init_done)
return;
if (hdev->heartbeat)
cancel_delayed_work_sync(&hdev->work_heartbeat);
if (hdev->asic_funcs->late_fini)
hdev->asic_funcs->late_fini(hdev);
hdev->late_init_done = false;
}
int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
{
u64 max_power, curr_power, dc_power, dividend, divisor;
int rc;
max_power = hdev->max_power;
dc_power = hdev->asic_prop.dc_power_default;
divisor = max_power - dc_power;
if (!divisor) {
dev_warn(hdev->dev, "device utilization is not supported\n");
return -EOPNOTSUPP;
}
rc = hl_fw_cpucp_power_get(hdev, &curr_power);
if (rc)
return rc;
curr_power = clamp(curr_power, dc_power, max_power);
dividend = (curr_power - dc_power) * 100;
*utilization = (u32) div_u64(dividend, divisor);
return 0;
}
int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
{
int rc = 0;
mutex_lock(&hdev->debug_lock);
if (!enable) {
if (!hdev->in_debug) {
dev_err(hdev->dev,
"Failed to disable debug mode because device was not in debug mode\n");
rc = -EFAULT;
goto out;
}
if (!hdev->reset_info.hard_reset_pending)
hdev->asic_funcs->halt_coresight(hdev, ctx);
hdev->in_debug = 0;
goto out;
}
if (hdev->in_debug) {
dev_err(hdev->dev,
"Failed to enable debug mode because device is already in debug mode\n");
rc = -EFAULT;
goto out;
}
hdev->in_debug = 1;
out:
mutex_unlock(&hdev->debug_lock);
return rc;
}
static void take_release_locks(struct hl_device *hdev)
{
/* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W
*/
hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev);
/* Flush processes that are sending message to CPU */
mutex_lock(&hdev->send_cpu_message_lock);
mutex_unlock(&hdev->send_cpu_message_lock);
/* Flush anyone that is inside device open */
mutex_lock(&hdev->fpriv_list_lock);
mutex_unlock(&hdev->fpriv_list_lock);
mutex_lock(&hdev->fpriv_ctrl_list_lock);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
static void hl_abort_waiting_for_completions(struct hl_device *hdev)
{
hl_abort_waiting_for_cs_completions(hdev);
/* Release all pending user interrupts, each pending user interrupt
* holds a reference to a user context.
*/
hl_release_pending_user_interrupts(hdev);
}
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
bool skip_wq_flush)
{
if (hard_reset)
device_late_fini(hdev);
/*
* Halt the engines and disable interrupts so we won't get any more
* completions from H/W and we won't have any accesses from the
* H/W to the host machine
*/
hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev, skip_wq_flush);
/* flush the MMU prefetch workqueue */
flush_workqueue(hdev->prefetch_wq);
hl_abort_waiting_for_completions(hdev);
}
/*
* hl_device_suspend - initiate device suspend
*
* @hdev: pointer to habanalabs device structure
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
int hl_device_suspend(struct hl_device *hdev)
{
int rc;
pci_save_state(hdev->pdev);
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Can't suspend while in reset\n");
return -EIO;
}
hdev->reset_info.in_reset = 1;
spin_unlock(&hdev->reset_info.lock);
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
take_release_locks(hdev);
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
dev_err(hdev->dev,
"Failed to disable PCI access of device CPU\n");
/* Shut down the device */
pci_disable_device(hdev->pdev);
pci_set_power_state(hdev->pdev, PCI_D3hot);
return 0;
}
/*
* hl_device_resume - initiate device resume
*
* @hdev: pointer to habanalabs device structure
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
int hl_device_resume(struct hl_device *hdev)
{
int rc;
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
rc = pci_enable_device_mem(hdev->pdev);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI device in resume\n");
return rc;
}
pci_set_master(hdev->pdev);
rc = hdev->asic_funcs->resume(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to resume device after suspend\n");
goto disable_device;
}
/* 'in_reset' was set to true during suspend, now we must clear it in order
* for hard reset to be performed
*/
spin_lock(&hdev->reset_info.lock);
hdev->reset_info.in_reset = 0;
spin_unlock(&hdev->reset_info.lock);
rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
if (rc) {
dev_err(hdev->dev, "Failed to reset device during resume\n");
goto disable_device;
}
return 0;
disable_device:
pci_disable_device(hdev->pdev);
return rc;
}
static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
{
struct task_struct *task = NULL;
struct list_head *fd_list;
struct hl_fpriv *hpriv;
struct mutex *fd_lock;
u32 pending_cnt;
fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
/* Giving time for user to close FD, and for processes that are inside
* hl_device_open to finish
*/
if (!list_empty(fd_list))
ssleep(1);
if (timeout) {
pending_cnt = timeout;
} else {
if (hdev->process_kill_trial_cnt) {
/* Processes have been already killed */
pending_cnt = 1;
goto wait_for_processes;
} else {
/* Wait a small period after process kill */
pending_cnt = HL_PENDING_RESET_PER_SEC;
}
}
mutex_lock(fd_lock);
/* This section must be protected because we are dereferencing
* pointers that are freed if the process exits
*/
list_for_each_entry(hpriv, fd_list, dev_node) {
task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) {
dev_info(hdev->dev, "Killing user process pid=%d\n",
task_pid_nr(task));
send_sig(SIGKILL, task, 1);
usleep_range(1000, 10000);
put_task_struct(task);
} else {
/*
* If we got here, it means that process was killed from outside the driver
* right after it started looping on fd_list and before get_pid_task, thus
* we don't need to kill it.
*/
dev_dbg(hdev->dev,
"Can't get task struct for user process, assuming process was killed from outside the driver\n");
}
}
mutex_unlock(fd_lock);
/*
* We killed the open users, but that doesn't mean they are closed.
* It could be that they are running a long cleanup phase in the driver
* e.g. MMU unmappings, or running other long teardown flow even before
* our cleanup.
* Therefore we need to wait again to make sure they are closed before
* continuing with the reset.
*/
wait_for_processes:
while ((!list_empty(fd_list)) && (pending_cnt)) {
dev_dbg(hdev->dev,
"Waiting for all unmap operations to finish before hard reset\n");
pending_cnt--;
ssleep(1);
}
/* All processes exited successfully */
if (list_empty(fd_list))
return 0;
/* Give up waiting for processes to exit */
if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
return -ETIME;
hdev->process_kill_trial_cnt++;
return -EBUSY;
}
static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
{
struct list_head *fd_list;
struct hl_fpriv *hpriv;
struct mutex *fd_lock;
fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
mutex_lock(fd_lock);
list_for_each_entry(hpriv, fd_list, dev_node)
hpriv->hdev = NULL;
mutex_unlock(fd_lock);
}
static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
{
/* If reset is due to heartbeat, device CPU is no responsive in
* which case no point sending PCI disable message to it.
*/
if ((flags & HL_DRV_RESET_HARD) &&
!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
* sending us interrupts after that. We need to disable
* the access here because if the device is marked
* disable, the message won't be send. Also, in case
* of heartbeat, the device CPU is marked as disable
* so this message won't be sent
*/
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
return;
}
/* verify that last EQs are handled before disabled is set */
if (hdev->cpu_queues_enable)
synchronize_irq(pci_irq_vector(hdev->pdev,
hdev->asic_prop.eq_interrupt_id));
}
}
static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
{
u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
/* No consecutive mechanism when user context exists */
if (hdev->is_compute_ctx_active)
return;
/*
* 'reset cause' is being updated here, because getting here
* means that it's the 1st time and the last time we're here
* ('in_reset' makes sure of it). This makes sure that
* 'reset_cause' will continue holding its 1st recorded reason!
*/
if (flags & HL_DRV_RESET_HEARTBEAT) {
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
} else if (flags & HL_DRV_RESET_TDR) {
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
cur_reset_trigger = HL_DRV_RESET_TDR;
} else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
} else {
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
/*
* If reset cause is same twice, then reset_trigger_repeated
* is set and if this reset is due to a fatal FW error
* device is set to an unstable state.
*/
if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
hdev->reset_info.reset_trigger_repeated = 0;
} else {
hdev->reset_info.reset_trigger_repeated = 1;
}
}
/*
* hl_device_reset - reset the device
*
* @hdev: pointer to habanalabs device structure
* @flags: reset flags.
*
* Block future CS and wait for pending CS to be enqueued
* Call ASIC H/W fini
* Flush all completions
* Re-initialize all internal data structures
* Call ASIC H/W init, late_init
* Test queues
* Enable device
*
* Returns 0 for success or an error on failure.
*/
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
struct hl_ctx *ctx;
int i, rc, hw_fini_rc;
if (!hdev->init_done) {
dev_err(hdev->dev, "Can't reset before initialization is done\n");
return 0;
}
hard_reset = !!(flags & HL_DRV_RESET_HARD);
from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
}
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
hard_reset = true;
}
if (reset_upon_device_release) {
if (hard_reset) {
dev_crit(hdev->dev,
"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
return -EINVAL;
}
goto do_reset;
}
if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
dev_dbg(hdev->dev,
"asic doesn't allow inference soft reset - do hard-reset instead\n");
hard_reset = true;
}
do_reset:
/* Re-entry of reset thread */
if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
goto kill_processes;
/*
* Prevent concurrency in this function - only one reset should be
* done at any given time. We need to perform this only if we didn't
* get here from a dedicated hard reset thread.
*/
if (!from_hard_reset_thread) {
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
/* We allow scheduling of a hard reset only during a compute reset */
if (hard_reset && hdev->reset_info.in_compute_reset)
hdev->reset_info.hard_reset_schedule_flags = flags;
spin_unlock(&hdev->reset_info.lock);
return 0;
}
/* This still allows the completion of some KDMA ops
* Update this before in_reset because in_compute_reset implies we are in reset
*/
hdev->reset_info.in_compute_reset = !hard_reset;
hdev->reset_info.in_reset = 1;
spin_unlock(&hdev->reset_info.lock);
/* Cancel the device release watchdog work if required.
* In case of reset-upon-device-release while the release watchdog work is
* scheduled due to a hard-reset, do hard-reset instead of compute-reset.
*/
if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
struct hl_device_reset_work *watchdog_work =
&hdev->device_release_watchdog_work;
hdev->reset_info.watchdog_active = 0;
if (!from_watchdog_thread)
cancel_delayed_work_sync(&watchdog_work->reset_work);
if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
hdev->reset_info.in_compute_reset = 0;
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
}
}
if (delay_reset)
usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
escalate_reset_flow:
handle_reset_trigger(hdev, flags);
send_disable_pci_access(hdev, flags);
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
take_release_locks(hdev);
if (hard_reset)
dev_info(hdev->dev, "Going to reset device\n");
else if (reset_upon_device_release)
dev_dbg(hdev->dev, "Going to reset device after release by user\n");
else
dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
}
if ((hard_reset) && (!from_hard_reset_thread)) {
hdev->reset_info.hard_reset_pending = true;
hdev->process_kill_trial_cnt = 0;
hdev->device_reset_work.flags = flags;
/*
* Because the reset function can't run from heartbeat work,
* we need to call the reset function from a dedicated work.
*/
queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
return 0;
}
cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
kill_processes:
if (hard_reset) {
/* Kill processes here after CS rollback. This is because the
* process can't really exit until all its CSs are done, which
* is what we do in cs rollback
*/
rc = device_kill_open_processes(hdev, 0, false);
if (rc == -EBUSY) {
if (hdev->device_fini_pending) {
dev_crit(hdev->dev,
"%s Failed to kill all open processes, stopping hard reset\n",
dev_name(&(hdev)->pdev->dev));
goto out_err;
}
/* signal reset thread to reschedule */
return rc;
}
if (rc) {
dev_crit(hdev->dev,
"%s Failed to kill all open processes, stopping hard reset\n",
dev_name(&(hdev)->pdev->dev));
goto out_err;
}
/* Flush the Event queue workers to make sure no other thread is
* reading or writing to registers during the reset
*/
flush_workqueue(hdev->eq_wq);
}
/* Reset the H/W. It will be in idle state after this returns */
hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_reset(hdev, &hdev->event_queue);
}
/* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
hl_hw_queue_reset(hdev, hard_reset);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
/* Make sure the context switch phase will run again */
ctx = hl_get_compute_ctx(hdev);
if (ctx) {
atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
hl_ctx_put(ctx);
}
if (hw_fini_rc) {
rc = hw_fini_rc;
goto out_err;
}
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
hdev->device_cpu_disabled = false;
hdev->reset_info.hard_reset_pending = false;
if (hdev->reset_info.reset_trigger_repeated &&
(hdev->reset_info.prev_reset_trigger ==
HL_DRV_RESET_FW_FATAL_ERR)) {
/* if there 2 back to back resets from FW,
* ensure driver puts the driver in a unusable state
*/
dev_crit(hdev->dev,
"%s Consecutive FW fatal errors received, stopping hard reset\n",
dev_name(&(hdev)->pdev->dev));
rc = -EIO;
goto out_err;
}
if (hdev->kernel_ctx) {
dev_crit(hdev->dev,
"%s kernel ctx was alive during hard reset, something is terribly wrong\n",
dev_name(&(hdev)->pdev->dev));
rc = -EBUSY;
goto out_err;
}
rc = hl_mmu_init(hdev);
if (rc) {
dev_err(hdev->dev,
"Failed to initialize MMU S/W after hard reset\n");
goto out_err;
}
/* Allocate the kernel context */
hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
hl_mmu_fini(hdev);
goto out_err;
}
hdev->is_compute_ctx_active = false;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev,
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
hl_mmu_fini(hdev);
goto out_err;
}
}
/* Device is now enabled as part of the initialization requires
* communication with the device firmware to get information that
* is required for the initialization itself
*/
hdev->disabled = false;
/* F/W security enabled indication might be updated after hard-reset */
if (hard_reset) {
rc = hl_fw_read_preboot_status(hdev);
if (rc)
goto out_err;
}
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
goto out_err;
}
/* If device is not idle fail the reset process */
if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
rc = -EIO;
goto out_err;
}
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
goto out_err;
}
if (hard_reset) {
rc = device_late_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed late init after hard reset\n");
goto out_err;
}
rc = hl_vm_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
goto out_err;
}
if (!hdev->asic_prop.fw_security_enabled)
hl_fw_set_max_power(hdev);
} else {
rc = hdev->asic_funcs->compute_reset_late_init(hdev);
if (rc) {
if (reset_upon_device_release)
dev_err(hdev->dev,
"Failed late init in reset after device release\n");
else
dev_err(hdev->dev, "Failed late init after compute reset\n");
goto out_err;
}
}
rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc) {
dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
goto out_err;
}
spin_lock(&hdev->reset_info.lock);
hdev->reset_info.in_compute_reset = 0;
/* Schedule hard reset only if requested and if not already in hard reset.
* We keep 'in_reset' enabled, so no other reset can go in during the hard
* reset schedule
*/
if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
schedule_hard_reset = true;
else
hdev->reset_info.in_reset = 0;
spin_unlock(&hdev->reset_info.lock);
hdev->reset_info.needs_reset = false;
if (hard_reset)
dev_info(hdev->dev,
"Successfully finished resetting the %s device\n",
dev_name(&(hdev)->pdev->dev));
else
dev_dbg(hdev->dev,
"Successfully finished resetting the %s device\n",
dev_name(&(hdev)->pdev->dev));
if (hard_reset) {
hdev->reset_info.hard_reset_cnt++;
/* After reset is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events
* and if those events are fatal, we won't know about it and
* the device will be operational although it shouldn't be
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
} else {
if (!reset_upon_device_release)
hdev->reset_info.compute_reset_cnt++;
if (schedule_hard_reset) {
dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
flags = hdev->reset_info.hard_reset_schedule_flags;
hdev->reset_info.hard_reset_schedule_flags = 0;
hard_reset = true;
goto escalate_reset_flow;
}
}
return 0;
out_err:
hdev->disabled = true;
spin_lock(&hdev->reset_info.lock);
hdev->reset_info.in_compute_reset = 0;
if (hard_reset) {
dev_err(hdev->dev,
"%s Failed to reset! Device is NOT usable\n",
dev_name(&(hdev)->pdev->dev));
hdev->reset_info.hard_reset_cnt++;
} else {
if (reset_upon_device_release) {
dev_err(hdev->dev, "Failed to reset device after user release\n");
flags &= ~HL_DRV_RESET_DEV_RELEASE;
} else {
dev_err(hdev->dev, "Failed to do compute reset\n");
hdev->reset_info.compute_reset_cnt++;
}
spin_unlock(&hdev->reset_info.lock);
flags |= HL_DRV_RESET_HARD;
hard_reset = true;
goto escalate_reset_flow;
}
hdev->reset_info.in_reset = 0;
spin_unlock(&hdev->reset_info.lock);
return rc;
}
/*
* hl_device_cond_reset() - conditionally reset the device.
* @hdev: pointer to habanalabs device structure.
* @reset_flags: reset flags.
* @event_mask: events to notify user about.
*
* Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
* unless another reset precedes it.
*/
int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
{
struct hl_ctx *ctx = NULL;
/* F/W reset cannot be postponed */
if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
goto device_reset;
/* Device release watchdog is relevant only if user exists and gets a reset notification */
if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
goto device_reset;
}
ctx = hl_get_compute_ctx(hdev);
if (!ctx || !ctx->hpriv->notifier_event.eventfd)
goto device_reset;
/* Schedule the device release watchdog work unless reset is already in progress or if the
* work is already scheduled.
*/
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
spin_unlock(&hdev->reset_info.lock);
goto device_reset;
}
if (hdev->reset_info.watchdog_active)
goto out;
hdev->device_release_watchdog_work.flags = flags;
dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
hdev->reset_info.watchdog_active = 1;
out:
spin_unlock(&hdev->reset_info.lock);
hl_notifier_event_send_all(hdev, event_mask);
hl_ctx_put(ctx);
hl_abort_waiting_for_completions(hdev);
return 0;
device_reset:
if (event_mask)
hl_notifier_event_send_all(hdev, event_mask);
if (ctx)
hl_ctx_put(ctx);
return hl_device_reset(hdev, flags);
}
static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
{
mutex_lock(¬ifier_event->lock);
notifier_event->events_mask |= event_mask;
if (notifier_event->eventfd)
eventfd_signal(notifier_event->eventfd, 1);
mutex_unlock(¬ifier_event->lock);
}
/*
* hl_notifier_event_send_all - notify all user processes via eventfd
*
* @hdev: pointer to habanalabs device structure
* @event_mask: the occurred event/s
* Returns 0 for success or an error on failure.
*/
void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
{
struct hl_fpriv *hpriv;
if (!event_mask) {
dev_warn(hdev->dev, "Skip sending zero event");
return;
}
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_list_lock);
/* control device */
mutex_lock(&hdev->fpriv_ctrl_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
static int create_cdev(struct hl_device *hdev)
{
char *name;
int rc;
hdev->cdev_idx = hdev->id / 2;
name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto out_err;
}
/* Initialize cdev and device structures */
rc = device_init_cdev(hdev, hdev->hclass, hdev->id, &hl_ops, name,
&hdev->cdev, &hdev->dev);
kfree(name);
if (rc)
goto out_err;
name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto free_dev;
}
/* Initialize cdev and device structures for control device */
rc = device_init_cdev(hdev, hdev->hclass, hdev->id_control, &hl_ctrl_ops,
name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
kfree(name);
if (rc)
goto free_dev;
return 0;
free_dev:
put_device(hdev->dev);
out_err:
return rc;
}
/*
* hl_device_init - main initialization function for habanalabs device
*
* @hdev: pointer to habanalabs device structure
*
* Allocate an id for the device, do early initialization and then call the
* ASIC specific initialization functions. Finally, create the cdev and the
* Linux device to expose it to the user
*/
int hl_device_init(struct hl_device *hdev)
{
int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
bool expose_interfaces_on_err = false;
rc = create_cdev(hdev);
if (rc)
goto out_disabled;
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
goto free_dev;
user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
hdev->asic_prop.user_interrupt_count;
if (user_interrupt_cnt) {
hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
GFP_KERNEL);
if (!hdev->user_interrupt) {
rc = -ENOMEM;
goto early_fini;
}
}
/*
* Start calling ASIC initialization. First S/W then H/W and finally
* late init
*/
rc = hdev->asic_funcs->sw_init(hdev);
if (rc)
goto free_usr_intr_mem;
/* initialize completion structure for multi CS wait */
hl_multi_cs_completion_init(hdev);
/*
* Initialize the H/W queues. Must be done before hw_init, because
* there the addresses of the kernel queue are being written to the
* registers of the device
*/
rc = hl_hw_queues_create(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel queues\n");
goto sw_fini;
}
cq_cnt = hdev->asic_prop.completion_queues_count;
/*
* Initialize the completion queues. Must be done before hw_init,
* because there the addresses of the completion queues are being
* passed as arguments to request_irq
*/
if (cq_cnt) {
hdev->completion_queue = kcalloc(cq_cnt,
sizeof(*hdev->completion_queue),
GFP_KERNEL);
if (!hdev->completion_queue) {
dev_err(hdev->dev,
"failed to allocate completion queues\n");
rc = -ENOMEM;
goto hw_queues_destroy;
}
}
for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
rc = hl_cq_init(hdev, &hdev->completion_queue[i],
hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
if (rc) {
dev_err(hdev->dev,
"failed to initialize completion queue\n");
goto cq_fini;
}
hdev->completion_queue[i].cq_idx = i;
}
hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_cs *), GFP_KERNEL);
if (!hdev->shadow_cs_queue) {
rc = -ENOMEM;
goto cq_fini;
}
/*
* Initialize the event queue. Must be done before hw_init,
* because there the address of the event queue is being
* passed as argument to request_irq
*/
rc = hl_eq_init(hdev, &hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "failed to initialize event queue\n");
goto free_shadow_cs_queue;
}
/* MMU S/W must be initialized before kernel context is created */
rc = hl_mmu_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
goto eq_fini;
}
/* Allocate the kernel context */
hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
goto mmu_fini;
}
hdev->is_compute_ctx_active = false;
hdev->asic_funcs->state_dump_init(hdev);
hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
rc = hl_debugfs_device_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");
kfree(hdev->kernel_ctx);
goto mmu_fini;
}
/* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after
* hl_debugfs_device_init().
*/
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel context\n");
kfree(hdev->kernel_ctx);
goto debugfs_device_fini;
}
rc = hl_cb_pool_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CB pool\n");
goto release_ctx;
}
rc = hl_dec_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize the decoder module\n");
goto cb_pool_fini;
}
/*
* From this point, override rc (=0) in case of an error to allow debugging
* (by adding char devices and creating sysfs/debugfs files as part of the error flow).
*/
expose_interfaces_on_err = true;
/* Device is now enabled as part of the initialization requires
* communication with the device firmware to get information that
* is required for the initialization itself
*/
hdev->disabled = false;
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W\n");
rc = 0;
goto out_disabled;
}
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to detect if device is alive\n");
rc = 0;
goto out_disabled;
}
rc = device_late_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed late initialization\n");
rc = 0;
goto out_disabled;
}
dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
hdev->asic_name,
hdev->asic_prop.dram_size / SZ_1G);
rc = hl_vm_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize memory module\n");
rc = 0;
goto out_disabled;
}
/*
* Expose devices and sysfs/debugfs files to user.
* From here there is no need to expose them in case of an error.
*/
expose_interfaces_on_err = false;
rc = cdev_sysfs_debugfs_add(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
rc = 0;
goto out_disabled;
}
/* Need to call this again because the max power might change,
* depending on card type for certain ASICs
*/
if (hdev->asic_prop.set_max_power_on_device_init &&
!hdev->asic_prop.fw_security_enabled)
hl_fw_set_max_power(hdev);
/*
* hl_hwmon_init() must be called after device_late_init(), because only
* there we get the information from the device about which
* hwmon-related sensors the device supports.
* Furthermore, it must be done after adding the device to the system.
*/
rc = hl_hwmon_init(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize hwmon\n");
rc = 0;
goto out_disabled;
}
dev_notice(hdev->dev,
"Successfully added device %s to habanalabs driver\n",
dev_name(&(hdev)->pdev->dev));
hdev->init_done = true;
/* After initialization is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events and if
* those events are fatal, we won't know about it and the device will
* be operational although it shouldn't be
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
return 0;
cb_pool_fini:
hl_cb_pool_fini(hdev);
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
"kernel ctx is still alive on initialization failure\n");
debugfs_device_fini:
hl_debugfs_device_fini(hdev);
mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
hl_eq_fini(hdev, &hdev->event_queue);
free_shadow_cs_queue:
kfree(hdev->shadow_cs_queue);
cq_fini:
for (i = 0 ; i < cq_ready_cnt ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
kfree(hdev->completion_queue);
hw_queues_destroy:
hl_hw_queues_destroy(hdev);
sw_fini:
hdev->asic_funcs->sw_fini(hdev);
free_usr_intr_mem:
kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
free_dev:
put_device(hdev->dev_ctrl);
put_device(hdev->dev);
out_disabled:
hdev->disabled = true;
if (expose_interfaces_on_err)
cdev_sysfs_debugfs_add(hdev);
dev_err(&hdev->pdev->dev,
"Failed to initialize hl%d. Device %s is NOT usable !\n",
hdev->cdev_idx, dev_name(&hdev->pdev->dev));
return rc;
}
/*
* hl_device_fini - main tear-down function for habanalabs device
*
* @hdev: pointer to habanalabs device structure
*
* Destroy the device, call ASIC fini functions and release the id
*/
void hl_device_fini(struct hl_device *hdev)
{
bool device_in_reset;
ktime_t timeout;
u64 reset_sec;
int i, rc;
dev_info(hdev->dev, "Removing device\n");
hdev->device_fini_pending = 1;
flush_delayed_work(&hdev->device_reset_work.reset_work);
if (hdev->pldm)
reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
else
reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
/*
* This function is competing with the reset function, so try to
* take the reset atomic and if we are already in middle of reset,
* wait until reset function is finished. Reset function is designed
* to always finish. However, in Gaudi, because of all the network
* ports, the hard reset could take between 10-30 seconds
*/
timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
spin_lock(&hdev->reset_info.lock);
device_in_reset = !!hdev->reset_info.in_reset;
if (!device_in_reset)
hdev->reset_info.in_reset = 1;
spin_unlock(&hdev->reset_info.lock);
while (device_in_reset) {
usleep_range(50, 200);
spin_lock(&hdev->reset_info.lock);
device_in_reset = !!hdev->reset_info.in_reset;
if (!device_in_reset)
hdev->reset_info.in_reset = 1;
spin_unlock(&hdev->reset_info.lock);
if (ktime_compare(ktime_get(), timeout) > 0) {
dev_crit(hdev->dev,
"%s Failed to remove device because reset function did not finish\n",
dev_name(&(hdev)->pdev->dev));
return;
}
}
cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
/* Disable PCI access from device F/W so it won't send us additional
* interrupts. We disable MSI/MSI-X at the halt_engines function and we
* can't have the F/W sending us interrupts after that. We need to
* disable the access here because if the device is marked disable, the
* message won't be send. Also, in case of heartbeat, the device CPU is
* marked as disable so this message won't be sent
*/
hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
/* Mark device as disabled */
hdev->disabled = true;
take_release_locks(hdev);
hdev->reset_info.hard_reset_pending = true;
hl_hwmon_fini(hdev);
cleanup_resources(hdev, true, false, false);
/* Kill processes here after CS rollback. This is because the process
* can't really exit until all its CSs are done, which is what we
* do in cs rollback
*/
dev_info(hdev->dev,
"Waiting for all processes to exit (timeout of %u seconds)",
HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
hdev->process_kill_trial_cnt = 0;
rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
if (rc) {
dev_crit(hdev->dev, "Failed to kill all open processes\n");
device_disable_open_processes(hdev, false);
}
hdev->process_kill_trial_cnt = 0;
rc = device_kill_open_processes(hdev, 0, true);
if (rc) {
dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
device_disable_open_processes(hdev, true);
}
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
rc = hdev->asic_funcs->hw_fini(hdev, true, false);
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
dev_err(hdev->dev, "kernel ctx is still alive\n");
hl_dec_fini(hdev);
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
vfree(hdev->captured_err_info.page_fault_info.user_mappings);
hl_eq_fini(hdev, &hdev->event_queue);
kfree(hdev->shadow_cs_queue);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
kfree(hdev->completion_queue);
kfree(hdev->user_interrupt);
hl_hw_queues_destroy(hdev);
/* Call ASIC S/W finalize function */
hdev->asic_funcs->sw_fini(hdev);
device_early_fini(hdev);
/* Hide devices and sysfs/debugfs files from user */
cdev_sysfs_debugfs_remove(hdev);
hl_debugfs_device_fini(hdev);
pr_info("removed device successfully\n");
}
/*
* MMIO register access helper functions.
*/
/*
* hl_rreg - Read an MMIO register
*
* @hdev: pointer to habanalabs device structure
* @reg: MMIO register offset (in bytes)
*
* Returns the value of the MMIO register we are asked to read
*
*/
inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
{
u32 val = readl(hdev->rmmio + reg);
if (unlikely(trace_habanalabs_rreg32_enabled()))
trace_habanalabs_rreg32(hdev->dev, reg, val);
return val;
}
/*
* hl_wreg - Write to an MMIO register
*
* @hdev: pointer to habanalabs device structure
* @reg: MMIO register offset (in bytes)
* @val: 32-bit value
*
* Writes the 32-bit value into the MMIO register
*
*/
inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
{
if (unlikely(trace_habanalabs_wreg32_enabled()))
trace_habanalabs_wreg32(hdev->dev, reg, val);
writel(val, hdev->rmmio + reg);
}
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags)
{
struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
dev_err(hdev->dev,
"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
return;
}
/* In case it's the first razwi since the device was opened, capture its parameters */
if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
return;
razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
razwi_info->razwi.addr = addr;
razwi_info->razwi.num_of_possible_engines = num_of_engines;
memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
num_of_engines * sizeof(u16));
razwi_info->razwi.flags = flags;
razwi_info->razwi_info_available = true;
}
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags, u64 *event_mask)
{
hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_RAZWI;
}
static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
{
struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
enum vm_type *vm_type;
struct hl_ctx *ctx;
u32 map_idx = 0;
int i;
/* Reset previous session count*/
pgf_info->num_of_user_mappings = 0;
ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "Can't get user context for user mappings\n");
return;
}
mutex_lock(&ctx->mem_hash_lock);
hash_for_each(ctx->mem_hash, i, hnode, node) {
vm_type = hnode->ptr;
if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
pgf_info->num_of_user_mappings++;
}
if (!pgf_info->num_of_user_mappings)
goto finish;
/* In case we already allocated in previous session, need to release it before
* allocating new buffer.
*/
vfree(pgf_info->user_mappings);
pgf_info->user_mappings =
vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
if (!pgf_info->user_mappings) {
pgf_info->num_of_user_mappings = 0;
goto finish;
}
hash_for_each(ctx->mem_hash, i, hnode, node) {
vm_type = hnode->ptr;
if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
userptr = hnode->ptr;
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
pgf_info->user_mappings[map_idx].size = userptr->size;
map_idx++;
} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
phys_pg_pack = hnode->ptr;
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
map_idx++;
}
}
finish:
mutex_unlock(&ctx->mem_hash_lock);
hl_ctx_put(ctx);
}
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
{
struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
/* Capture only the first page fault */
if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
return;
pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
pgf_info->page_fault.addr = addr;
pgf_info->page_fault.engine_id = eng_id;
hl_capture_user_mappings(hdev, is_pmmu);
pgf_info->page_fault_info_available = true;
}
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
u64 *event_mask)
{
hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
}
static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
{
struct hw_err_info *info = &hdev->captured_err_info.hw_err;
/* Capture only the first HW err */
if (atomic_cmpxchg(&info->event_detected, 0, 1))
return;
info->event.timestamp = ktime_to_ns(ktime_get());
info->event.event_id = event_id;
info->event_info_available = true;
}
void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
{
hl_capture_hw_err(hdev, event_id);
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
}
static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
{
struct fw_err_info *info = &hdev->captured_err_info.fw_err;
/* Capture only the first FW error */
if (atomic_cmpxchg(&info->event_detected, 0, 1))
return;
info->event.timestamp = ktime_to_ns(ktime_get());
info->event.err_type = fw_info->err_type;
if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
info->event.event_id = fw_info->event_id;
info->event_info_available = true;
}
void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
{
hl_capture_fw_err(hdev, info);
if (info->event_mask)
*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
}
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
{
vfree(captured_err_info->page_fault_info.user_mappings);
memset(captured_err_info, 0, sizeof(struct hl_error_info));
atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
captured_err_info->undef_opcode.write_enable = true;
}
| linux-master | drivers/accel/habanalabs/common/device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/slab.h>
static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
bool put_ctx)
{
struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
if (put_hw_sob)
hw_sob_put(handle->hw_sob);
spin_lock(&mgr->lock);
idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock);
if (put_ctx)
hl_ctx_put(handle->ctx);
kfree(handle);
}
void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
encaps_handle_do_release(handle, false, true);
}
static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
encaps_handle_do_release(handle, true, false);
}
void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
encaps_handle_do_release(handle, true, true);
}
static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
{
spin_lock_init(&mgr->lock);
idr_init(&mgr->handles);
}
static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
{
struct hl_cs_encaps_sig_handle *handle;
struct idr *idp;
u32 id;
idp = &mgr->handles;
/* The IDR is expected to be empty at this stage, because any left signal should have been
* released as part of CS roll-back.
*/
if (!idr_is_empty(idp)) {
dev_warn(hdev->dev,
"device released while some encaps signals handles are still allocated\n");
idr_for_each_entry(idp, handle, id)
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
}
idr_destroy(&mgr->handles);
}
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
int i;
/* Release all allocated HW block mapped list entries and destroy
* the mutex.
*/
hl_hw_block_mem_fini(ctx);
/*
* If we arrived here, there are no jobs waiting for this context
* on its queues so we can safely remove it.
* This is because for each CS, we increment the ref count and for
* every CS that was finished we decrement it and we won't arrive
* to this function unless the ref count is 0
*/
for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
hl_fence_put(ctx->cs_pending[i]);
kfree(ctx->cs_pending);
if (ctx->asid != HL_KERNEL_ASID_ID) {
dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
*/
if (hdev->in_debug)
hl_device_set_debug_mode(hdev, ctx, false);
hdev->asic_funcs->ctx_fini(ctx);
hl_dec_ctx_fini(ctx);
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
hdev->asic_funcs->ctx_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_mmu_ctx_fini(ctx);
}
}
void hl_ctx_do_release(struct kref *ref)
{
struct hl_ctx *ctx;
ctx = container_of(ref, struct hl_ctx, refcount);
hl_ctx_fini(ctx);
if (ctx->hpriv) {
struct hl_fpriv *hpriv = ctx->hpriv;
mutex_lock(&hpriv->ctx_lock);
hpriv->ctx = NULL;
mutex_unlock(&hpriv->ctx_lock);
hl_hpriv_put(hpriv);
}
kfree(ctx);
}
int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
{
struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
struct hl_ctx *ctx;
int rc;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto out_err;
}
mutex_lock(&ctx_mgr->lock);
rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
mutex_unlock(&ctx_mgr->lock);
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
goto free_ctx;
}
ctx->handle = rc;
rc = hl_ctx_init(hdev, ctx, false);
if (rc)
goto remove_from_idr;
hl_hpriv_get(hpriv);
ctx->hpriv = hpriv;
/* TODO: remove for multiple contexts per process */
hpriv->ctx = ctx;
/* TODO: remove the following line for multiple process support */
hdev->is_compute_ctx_active = true;
return 0;
remove_from_idr:
mutex_lock(&ctx_mgr->lock);
idr_remove(&ctx_mgr->handles, ctx->handle);
mutex_unlock(&ctx_mgr->lock);
free_ctx:
kfree(ctx);
out_err:
return rc;
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
int rc = 0, i;
ctx->hdev = hdev;
kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_fence *),
GFP_KERNEL);
if (!ctx->cs_pending)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->outcome_store.used_list);
INIT_LIST_HEAD(&ctx->outcome_store.free_list);
hash_init(ctx->outcome_store.outcome_map);
for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
list_add(&ctx->outcome_store.nodes_pool[i].list_link,
&ctx->outcome_store.free_list);
hl_hw_block_mem_init(ctx);
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_vm_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM;
goto err_hw_block_mem_fini;
}
rc = hdev->asic_funcs->ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "ctx_init failed\n");
goto err_vm_ctx_fini;
}
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
dev_err(hdev->dev, "No free ASID, failed to create context\n");
rc = -ENOMEM;
goto err_hw_block_mem_fini;
}
rc = hl_vm_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM;
goto err_asid_free;
}
rc = hl_cb_va_pool_init(ctx);
if (rc) {
dev_err(hdev->dev,
"Failed to init VA pool for mapped CB\n");
goto err_vm_ctx_fini;
}
rc = hdev->asic_funcs->ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "ctx_init failed\n");
goto err_cb_va_pool_fini;
}
hl_encaps_sig_mgr_init(&ctx->sig_mgr);
dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
}
return 0;
err_cb_va_pool_fini:
hl_cb_va_pool_fini(ctx);
err_vm_ctx_fini:
hl_vm_ctx_fini(ctx);
err_asid_free:
if (ctx->asid != HL_KERNEL_ASID_ID)
hl_asid_free(hdev, ctx->asid);
err_hw_block_mem_fini:
hl_hw_block_mem_fini(ctx);
kfree(ctx->cs_pending);
return rc;
}
static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
{
return kref_get_unless_zero(&ctx->refcount);
}
void hl_ctx_get(struct hl_ctx *ctx)
{
kref_get(&ctx->refcount);
}
int hl_ctx_put(struct hl_ctx *ctx)
{
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
{
struct hl_ctx *ctx = NULL;
struct hl_fpriv *hpriv;
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
mutex_lock(&hpriv->ctx_lock);
ctx = hpriv->ctx;
if (ctx && !hl_ctx_get_unless_zero(ctx))
ctx = NULL;
mutex_unlock(&hpriv->ctx_lock);
/* There can only be a single user which has opened the compute device, so exit
* immediately once we find its context or if we see that it has been released
*/
break;
}
mutex_unlock(&hdev->fpriv_list_lock);
return ctx;
}
/*
* hl_ctx_get_fence_locked - get CS fence under CS lock
*
* @ctx: pointer to the context structure.
* @seq: CS sequences number
*
* @return valid fence pointer on success, NULL if fence is gone, otherwise
* error pointer.
*
* NOTE: this function shall be called with cs_lock locked
*/
static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
{
struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct hl_fence *fence;
if (seq >= ctx->cs_sequence)
return ERR_PTR(-EINVAL);
if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
return NULL;
fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
hl_fence_get(fence);
return fence;
}
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
struct hl_fence *fence;
spin_lock(&ctx->cs_lock);
fence = hl_ctx_get_fence_locked(ctx, seq);
spin_unlock(&ctx->cs_lock);
return fence;
}
/*
* hl_ctx_get_fences - get multiple CS fences under the same CS lock
*
* @ctx: pointer to the context structure.
* @seq_arr: array of CS sequences to wait for
* @fence: fence array to store the CS fences
* @arr_len: length of seq_arr and fence_arr
*
* @return 0 on success, otherwise non 0 error code
*/
int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
struct hl_fence **fence, u32 arr_len)
{
struct hl_fence **fence_arr_base = fence;
int i, rc = 0;
spin_lock(&ctx->cs_lock);
for (i = 0; i < arr_len; i++, fence++) {
u64 seq = seq_arr[i];
*fence = hl_ctx_get_fence_locked(ctx, seq);
if (IS_ERR(*fence)) {
dev_err(ctx->hdev->dev,
"Failed to get fence for CS with seq 0x%llx\n",
seq);
rc = PTR_ERR(*fence);
break;
}
}
spin_unlock(&ctx->cs_lock);
if (rc)
hl_fences_put(fence_arr_base, i);
return rc;
}
/*
* hl_ctx_mgr_init - initialize the context manager
*
* @ctx_mgr: pointer to context manager structure
*
* This manager is an object inside the hpriv object of the user process.
* The function is called when a user process opens the FD.
*/
void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
{
mutex_init(&ctx_mgr->lock);
idr_init(&ctx_mgr->handles);
}
/*
* hl_ctx_mgr_fini - finalize the context manager
*
* @hdev: pointer to device structure
* @ctx_mgr: pointer to context manager structure
*
* This function goes over all the contexts in the manager and frees them.
* It is called when a process closes the FD.
*/
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
{
struct hl_ctx *ctx;
struct idr *idp;
u32 id;
idp = &ctx_mgr->handles;
idr_for_each_entry(idp, ctx, id)
kref_put(&ctx->refcount, hl_ctx_do_release);
idr_destroy(&ctx_mgr->handles);
mutex_destroy(&ctx_mgr->lock);
}
| linux-master | drivers/accel/habanalabs/common/context.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#define VCMD_CONTROL_OFFSET 0x40 /* SWREG16 */
#define VCMD_IRQ_STATUS_OFFSET 0x44 /* SWREG17 */
#define VCMD_IRQ_STATUS_ENDCMD_MASK 0x1
#define VCMD_IRQ_STATUS_BUSERR_MASK 0x2
#define VCMD_IRQ_STATUS_TIMEOUT_MASK 0x4
#define VCMD_IRQ_STATUS_CMDERR_MASK 0x8
#define VCMD_IRQ_STATUS_ABORT_MASK 0x10
#define VCMD_IRQ_STATUS_RESET_MASK 0x20
static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
{
const char *format = "abnormal interrupt source:%s%s%s%s%s%s\n";
char *intr_source[6] = {"Unknown", "", "", "", "", ""};
int i = 0;
if (!irq_status)
return;
if (irq_status & VCMD_IRQ_STATUS_ENDCMD_MASK)
intr_source[i++] = " ENDCMD";
if (irq_status & VCMD_IRQ_STATUS_BUSERR_MASK)
intr_source[i++] = " BUSERR";
if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
intr_source[i++] = " TIMEOUT";
if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
intr_source[i++] = " CMDERR";
if (irq_status & VCMD_IRQ_STATUS_ABORT_MASK)
intr_source[i++] = " ABORT";
if (irq_status & VCMD_IRQ_STATUS_RESET_MASK)
intr_source[i++] = " RESET";
dev_err(hdev->dev, format, intr_source[0], intr_source[1],
intr_source[2], intr_source[3], intr_source[4], intr_source[5]);
}
static void dec_abnrm_intr_work(struct work_struct *work)
{
struct hl_dec *dec = container_of(work, struct hl_dec, abnrm_intr_work);
struct hl_device *hdev = dec->hdev;
u32 irq_status, event_mask = 0;
bool reset_required = false;
irq_status = RREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET);
dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, dec->core_id);
dec_print_abnrm_intr_source(hdev, irq_status);
/* Clear the interrupt */
WREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
/* Flush the interrupt clear */
RREG32(dec->base_addr + VCMD_IRQ_STATUS_OFFSET);
if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK) {
reset_required = true;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
}
if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
if (irq_status & (VCMD_IRQ_STATUS_ENDCMD_MASK |
VCMD_IRQ_STATUS_BUSERR_MASK |
VCMD_IRQ_STATUS_ABORT_MASK))
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, 0, event_mask);
} else if (event_mask) {
hl_notifier_event_send_all(hdev, event_mask);
}
}
void hl_dec_fini(struct hl_device *hdev)
{
kfree(hdev->dec);
}
int hl_dec_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_dec *dec;
int rc, j;
/* if max core is 0, nothing to do*/
if (!prop->max_dec)
return 0;
hdev->dec = kcalloc(prop->max_dec, sizeof(struct hl_dec), GFP_KERNEL);
if (!hdev->dec)
return -ENOMEM;
for (j = 0 ; j < prop->max_dec ; j++) {
dec = hdev->dec + j;
dec->hdev = hdev;
INIT_WORK(&dec->abnrm_intr_work, dec_abnrm_intr_work);
dec->core_id = j;
dec->base_addr = hdev->asic_funcs->get_dec_base_addr(hdev, j);
if (!dec->base_addr) {
dev_err(hdev->dev, "Invalid base address of decoder %d\n", j);
rc = -EINVAL;
goto err_dec_fini;
}
}
return 0;
err_dec_fini:
hl_dec_fini(hdev);
return rc;
}
void hl_dec_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_dec *dec;
int j;
for (j = 0 ; j < prop->max_dec ; j++) {
if (!!(prop->decoder_enabled_mask & BIT(j))) {
dec = hdev->dec + j;
/* Stop the decoder */
WREG32(dec->base_addr + VCMD_CONTROL_OFFSET, 0);
}
}
}
| linux-master | drivers/accel/habanalabs/common/decoder.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include <linux/firmware.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/vmalloc.h>
#include <trace/events/habanalabs.h>
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
[COMMS_NOOP] = __stringify(COMMS_NOOP),
[COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
[COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
[COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
[COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
[COMMS_EXEC] = __stringify(COMMS_EXEC),
[COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
[COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
[COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
[COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
};
static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
[COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
[COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
[COMMS_STS_OK] = __stringify(COMMS_STS_OK),
[COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
[COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
};
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
u32 ver_offset;
fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
if (!fw_ver)
return NULL;
str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
if (!str)
goto free_fw_ver;
/* Skip the fw- part */
str += 3;
ver_offset = str - fw_str;
/* Copy until the next whitespace */
whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
if (!whitespace)
goto free_fw_ver;
strscpy(fw_ver, str, whitespace - str + 1);
return fw_ver;
free_fw_ver:
kfree(fw_ver);
return NULL;
}
/**
* extract_u32_until_given_char() - given a string of the format "<u32><char>*", extract the u32.
* @str: the given string
* @ver_num: the pointer to the extracted u32 to be returned to the caller.
* @given_char: the given char at the end of the u32 in the string
*
* Return: Upon success, return a pointer to the given_char in the string. Upon failure, return NULL
*/
static char *extract_u32_until_given_char(char *str, u32 *ver_num, char given_char)
{
char num_str[8] = {}, *ch;
ch = strchrnul(str, given_char);
if (*ch == '\0' || ch == str || ch - str >= sizeof(num_str))
return NULL;
memcpy(num_str, str, ch - str);
if (kstrtou32(num_str, 10, ver_num))
return NULL;
return ch;
}
/**
* hl_get_sw_major_minor_subminor() - extract the FW's SW version major, minor, sub-minor
* from the version string
* @hdev: pointer to the hl_device
* @fw_str: the FW's version string
*
* The extracted version is set in the hdev fields: fw_sw_{major/minor/sub_minor}_ver.
*
* fw_str is expected to have one of two possible formats, examples:
* 1) 'Preboot version hl-gaudi2-1.9.0-fw-42.0.1-sec-3'
* 2) 'Preboot version hl-gaudi2-1.9.0-rc-fw-42.0.1-sec-3'
* In those examples, the SW major,minor,subminor are correspondingly: 1,9,0.
*
* Return: 0 for success or a negative error code for failure.
*/
static int hl_get_sw_major_minor_subminor(struct hl_device *hdev, const char *fw_str)
{
char *end, *start;
end = strnstr(fw_str, "-rc-", VERSION_MAX_LEN);
if (end == fw_str)
return -EINVAL;
if (!end)
end = strnstr(fw_str, "-fw-", VERSION_MAX_LEN);
if (end == fw_str)
return -EINVAL;
if (!end)
return -EINVAL;
for (start = end - 1; start != fw_str; start--) {
if (*start == '-')
break;
}
if (start == fw_str)
return -EINVAL;
/* start/end point each to the starting and ending hyphen of the sw version e.g. -1.9.0- */
start++;
start = extract_u32_until_given_char(start, &hdev->fw_sw_major_ver, '.');
if (!start)
goto err_zero_ver;
start++;
start = extract_u32_until_given_char(start, &hdev->fw_sw_minor_ver, '.');
if (!start)
goto err_zero_ver;
start++;
start = extract_u32_until_given_char(start, &hdev->fw_sw_sub_minor_ver, '-');
if (!start)
goto err_zero_ver;
return 0;
err_zero_ver:
hdev->fw_sw_major_ver = 0;
hdev->fw_sw_minor_ver = 0;
hdev->fw_sw_sub_minor_ver = 0;
return -EINVAL;
}
/**
* hl_get_preboot_major_minor() - extract the FW's version major, minor from the version string.
* @hdev: pointer to the hl_device
* @preboot_ver: the FW's version string
*
* preboot_ver is expected to be the format of <major>.<minor>.<sub minor>*, e.g: 42.0.1-sec-3
* The extracted version is set in the hdev fields: fw_inner_{major/minor}_ver.
*
* Return: 0 on success, negative error code for failure.
*/
static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
{
preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_major_ver, '.');
if (!preboot_ver) {
dev_err(hdev->dev, "Error parsing preboot major version\n");
goto err_zero_ver;
}
preboot_ver++;
preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_minor_ver, '.');
if (!preboot_ver) {
dev_err(hdev->dev, "Error parsing preboot minor version\n");
goto err_zero_ver;
}
return 0;
err_zero_ver:
hdev->fw_inner_major_ver = 0;
hdev->fw_inner_minor_ver = 0;
return -EINVAL;
}
static int hl_request_fw(struct hl_device *hdev,
const struct firmware **firmware_p,
const char *fw_name)
{
size_t fw_size;
int rc;
rc = request_firmware(firmware_p, fw_name, hdev->dev);
if (rc) {
dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
fw_name, rc);
goto out;
}
fw_size = (*firmware_p)->size;
if ((fw_size % 4) != 0) {
dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
fw_name, fw_size);
rc = -EINVAL;
goto release_fw;
}
dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
if (fw_size > FW_FILE_MAX_SIZE) {
dev_err(hdev->dev,
"FW file size %zu exceeds maximum of %u bytes\n",
fw_size, FW_FILE_MAX_SIZE);
rc = -EINVAL;
goto release_fw;
}
return 0;
release_fw:
release_firmware(*firmware_p);
out:
return rc;
}
/**
* hl_release_firmware() - release FW
*
* @fw: fw descriptor
*
* note: this inline function added to serve as a comprehensive mirror for the
* hl_request_fw function.
*/
static inline void hl_release_firmware(const struct firmware *fw)
{
release_firmware(fw);
}
/**
* hl_fw_copy_fw_to_device() - copy FW to device
*
* @hdev: pointer to hl_device structure.
* @fw: fw descriptor
* @dst: IO memory mapped address space to copy firmware to
* @src_offset: offset in src FW to copy from
* @size: amount of bytes to copy (0 to copy the whole binary)
*
* actual copy of FW binary data to device, shared by static and dynamic loaders
*/
static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
const struct firmware *fw, void __iomem *dst,
u32 src_offset, u32 size)
{
const void *fw_data;
/* size 0 indicates to copy the whole file */
if (!size)
size = fw->size;
if (src_offset + size > fw->size) {
dev_err(hdev->dev,
"size to copy(%u) and offset(%u) are invalid\n",
size, src_offset);
return -EINVAL;
}
fw_data = (const void *) fw->data;
memcpy_toio(dst, fw_data + src_offset, size);
return 0;
}
/**
* hl_fw_copy_msg_to_device() - copy message to device
*
* @hdev: pointer to hl_device structure.
* @msg: message
* @dst: IO memory mapped address space to copy firmware to
* @src_offset: offset in src message to copy from
* @size: amount of bytes to copy (0 to copy the whole binary)
*
* actual copy of message data to device.
*/
static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
struct lkd_msg_comms *msg, void __iomem *dst,
u32 src_offset, u32 size)
{
void *msg_data;
/* size 0 indicates to copy the whole file */
if (!size)
size = sizeof(struct lkd_msg_comms);
if (src_offset + size > sizeof(struct lkd_msg_comms)) {
dev_err(hdev->dev,
"size to copy(%u) and offset(%u) are invalid\n",
size, src_offset);
return -EINVAL;
}
msg_data = (void *) msg;
memcpy_toio(dst, msg_data + src_offset, size);
return 0;
}
/**
* hl_fw_load_fw_to_device() - Load F/W code to device's memory.
*
* @hdev: pointer to hl_device structure.
* @fw_name: the firmware image name
* @dst: IO memory mapped address space to copy firmware to
* @src_offset: offset in src FW to copy from
* @size: amount of bytes to copy (0 to copy the whole binary)
*
* Copy fw code from firmware file to device memory.
*
* Return: 0 on success, non-zero for failure.
*/
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size)
{
const struct firmware *fw;
int rc;
rc = hl_request_fw(hdev, &fw, fw_name);
if (rc)
return rc;
rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
hl_release_firmware(fw);
return rc;
}
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
{
struct cpucp_packet pkt = {};
pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(value);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result)
{
struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
struct hl_bd *sent_bd;
u32 tmp, expected_ack_val, pi, opcode;
int rc;
pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
if (!pkt) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for packet to CPU\n");
return -ENOMEM;
}
memcpy(pkt, msg, len);
mutex_lock(&hdev->send_cpu_message_lock);
/* CPU-CP messages can be sent during soft-reset */
if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
rc = 0;
goto out;
}
if (hdev->device_cpu_disabled) {
rc = -EIO;
goto out;
}
/* set fence to a non valid value */
pkt->fence = cpu_to_le32(UINT_MAX);
pi = queue->pi;
/*
* The CPU queue is a synchronous queue with an effective depth of
* a single entry (although it is allocated with room for multiple
* entries). We lock on it using 'send_cpu_message_lock' which
* serializes accesses to the CPU queue.
* Which means that we don't need to lock the access to the entire H/W
* queues module when submitting a JOB to the CPU queue.
*/
hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
else
expected_ack_val = CPUCP_PACKET_FENCE_VAL;
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
(tmp == expected_ack_val), 1000,
timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) {
/* If FW performed reset just before sending it a packet, we will get a timeout.
* This is expected behavior, hence no need for error message.
*/
if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
tmp);
else
dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
hdev->device_cpu_disabled = true;
goto out;
}
tmp = le32_to_cpu(pkt->ctl);
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) {
opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
if (!prop->supports_advanced_cpucp_rc) {
dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
rc = -EIO;
goto scrub_descriptor;
}
switch (rc) {
case cpucp_packet_invalid:
dev_err(hdev->dev,
"CPU packet %d is not supported by F/W\n", opcode);
break;
case cpucp_packet_fault:
dev_err(hdev->dev,
"F/W failed processing CPU packet %d\n", opcode);
break;
case cpucp_packet_invalid_pkt:
dev_dbg(hdev->dev,
"CPU packet %d is not supported by F/W\n", opcode);
break;
case cpucp_packet_invalid_params:
dev_err(hdev->dev,
"F/W reports invalid parameters for CPU packet %d\n", opcode);
break;
default:
dev_err(hdev->dev,
"Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
}
/* propagate the return code from the f/w to the callers who want to check it */
if (result)
*result = rc;
rc = -EIO;
} else if (result) {
*result = le64_to_cpu(pkt->result);
}
scrub_descriptor:
/* Scrub previous buffer descriptor 'ctl' field which contains the
* previous PI value written during packet submission.
* We must do this or else F/W can read an old value upon queue wraparound.
*/
sent_bd = queue->kernel_address;
sent_bd += hl_pi_2_offset(pi);
sent_bd->ctl = cpu_to_le32(UINT_MAX);
out:
mutex_unlock(&hdev->send_cpu_message_lock);
hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
return rc;
}
int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
return rc;
}
int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
size_t irq_arr_size)
{
struct cpucp_unmask_irq_arr_packet *pkt;
size_t total_pkt_size;
u64 result;
int rc;
total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
irq_arr_size;
/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
/* total_pkt_size is casted to u16 later on */
if (total_pkt_size > USHRT_MAX) {
dev_err(hdev->dev, "too many elements in IRQ array\n");
return -EINVAL;
}
pkt = kzalloc(total_pkt_size, GFP_KERNEL);
if (!pkt)
return -ENOMEM;
pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
memcpy(&pkt->irqs, irq_arr, irq_arr_size);
pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
kfree(pkt);
return rc;
}
int hl_fw_test_cpu_queue(struct hl_device *hdev)
{
struct cpucp_packet test_pkt = {};
u64 result;
int rc;
test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
sizeof(test_pkt), 0, &result);
if (!rc) {
if (result != CPUCP_PACKET_FENCE_VAL)
dev_err(hdev->dev,
"CPU queue test failed (%#08llx)\n", result);
} else {
dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
}
return rc;
}
void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle)
{
u64 kernel_addr;
kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
*dma_handle = hdev->cpu_accessible_dma_address +
(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
return (void *) (uintptr_t) kernel_addr;
}
void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr)
{
gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
size);
}
int hl_fw_send_soft_reset(struct hl_device *hdev)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_SOFT_RESET << CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc)
dev_err(hdev->dev, "failed to send soft-reset msg (err = %d)\n", rc);
return rc;
}
int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(open);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc)
dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
return rc;
}
int hl_fw_send_heartbeat(struct hl_device *hdev)
{
struct cpucp_packet hb_pkt;
u64 result;
int rc;
memset(&hb_pkt, 0, sizeof(hb_pkt));
hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
sizeof(hb_pkt), 0, &result);
if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
return -EIO;
if (le32_to_cpu(hb_pkt.status_mask) &
CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
rc = -EIO;
}
return rc;
}
static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
u32 sts_val)
{
bool err_exists = false;
if (!(err_val & CPU_BOOT_ERR0_ENABLED))
return false;
if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
dev_err(hdev->dev,
"Device boot error - DRAM initialization failed\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
dev_err(hdev->dev,
"Device boot error - Thermal Sensor initialization failed\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
if (hdev->bmc_enable) {
dev_err(hdev->dev,
"Device boot error - Skipped waiting for BMC\n");
err_exists = true;
} else {
dev_info(hdev->dev,
"Device boot message - Skipped waiting for BMC\n");
/* This is an info so we don't want it to disable the
* device
*/
err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
}
}
if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
dev_err(hdev->dev,
"Device boot error - Serdes data from BMC not available\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
dev_err(hdev->dev,
"Device boot error - NIC F/W initialization failed\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
dev_err(hdev->dev,
"Device boot warning - security not ready\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
dev_err(hdev->dev, "Device boot error - security failure\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
dev_err(hdev->dev, "Device boot error - eFuse failure\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
dev_err(hdev->dev, "Device boot error - PLL failure\n");
err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
/* Ignore this bit, don't prevent driver loading */
dev_dbg(hdev->dev, "device unusable status is set\n");
err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
}
if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
dev_err(hdev->dev, "Device boot error - binning failure\n");
err_exists = true;
}
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
err_exists = true;
}
/* All warnings should go here in order not to reach the unknown error validation */
if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
dev_warn(hdev->dev,
"Device boot warning - Skipped DRAM initialization\n");
/* This is a warning so we don't want it to disable the
* device
*/
err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
}
if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
dev_warn(hdev->dev,
"Device boot warning - Failed to load preboot primary image\n");
/* This is a warning so we don't want it to disable the
* device as we have a secondary preboot image
*/
err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
}
if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
dev_warn(hdev->dev,
"Device boot warning - TPM failure\n");
/* This is a warning so we don't want it to disable the
* device
*/
err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
}
if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
dev_err(hdev->dev,
"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
err_exists = true;
}
/* return error only if it's in the predefined mask */
if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
lower_32_bits(hdev->boot_error_status_mask)))
return true;
return false;
}
/* placeholder for ERR1 as no errors defined there yet */
static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
u32 sts_val)
{
/*
* keep this variable to preserve the logic of the function.
* this way it would require less modifications when error will be
* added to DEV_ERR1
*/
bool err_exists = false;
if (!(err_val & CPU_BOOT_ERR1_ENABLED))
return false;
if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
dev_err(hdev->dev,
"Device boot error - unknown ERR1 error 0x%08x\n",
err_val);
err_exists = true;
}
/* return error only if it's in the predefined mask */
if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
upper_32_bits(hdev->boot_error_status_mask)))
return true;
return false;
}
static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
u32 cpu_boot_dev_status1_reg)
{
u32 err_val, status_val;
bool err_exists = false;
/* Some of the firmware status codes are deprecated in newer f/w
* versions. In those versions, the errors are reported
* in different registers. Therefore, we need to check those
* registers and print the exact errors. Moreover, there
* may be multiple errors, so we need to report on each error
* separately. Some of the error codes might indicate a state
* that is not an error per-se, but it is an error in production
* environment
*/
err_val = RREG32(boot_err0_reg);
status_val = RREG32(cpu_boot_dev_status0_reg);
err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
err_val = RREG32(boot_err1_reg);
status_val = RREG32(cpu_boot_dev_status1_reg);
err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
if (err_exists)
return -EIO;
return 0;
}
int hl_fw_cpucp_info_get(struct hl_device *hdev,
u32 sts_boot_dev_sts0_reg,
u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
u32 boot_err1_reg)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet pkt = {};
dma_addr_t cpucp_info_dma_addr;
void *cpucp_info_cpu_addr;
char *kernel_ver;
u64 result;
int rc;
cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
&cpucp_info_dma_addr);
if (!cpucp_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP info packet\n");
return -ENOMEM;
}
memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP info pkt, error %d\n", rc);
goto out;
}
rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
if (rc) {
dev_err(hdev->dev, "Errors in device boot\n");
goto out;
}
memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
sizeof(prop->cpucp_info));
rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
if (rc) {
dev_err(hdev->dev,
"Failed to build hwmon channel info, error %d\n", rc);
rc = -EFAULT;
goto out;
}
kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
if (kernel_ver) {
dev_info(hdev->dev, "Linux version %s", kernel_ver);
kfree(kernel_ver);
}
/* assume EQ code doesn't need to check eqe index */
hdev->event_queue.check_eqe_index = false;
/* Read FW application security bits again */
if (prop->fw_cpu_boot_dev_sts0_valid) {
prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
hdev->event_queue.check_eqe_index = true;
}
if (prop->fw_cpu_boot_dev_sts1_valid)
prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
out:
hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
return rc;
}
static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
{
struct cpucp_array_data_packet *pkt;
size_t total_pkt_size, data_size;
u64 result;
int rc;
/* skip sending this info for unsupported ASICs */
if (!hdev->asic_funcs->get_msi_info)
return 0;
data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
/* total_pkt_size is casted to u16 later on */
if (total_pkt_size > USHRT_MAX) {
dev_err(hdev->dev, "CPUCP array data is too big\n");
return -EINVAL;
}
pkt = kzalloc(total_pkt_size, GFP_KERNEL);
if (!pkt)
return -ENOMEM;
pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
memset((void *) &pkt->data, 0xFF, data_size);
hdev->asic_funcs->get_msi_info(pkt->data);
pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
total_pkt_size, 0, &result);
/*
* in case packet result is invalid it means that FW does not support
* this feature and will use default/hard coded MSI values. no reason
* to stop the boot
*/
if (rc && result == cpucp_packet_invalid)
rc = 0;
if (rc)
dev_err(hdev->dev, "failed to send CPUCP array data\n");
kfree(pkt);
return rc;
}
int hl_fw_cpucp_handshake(struct hl_device *hdev,
u32 sts_boot_dev_sts0_reg,
u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
u32 boot_err1_reg)
{
int rc;
rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
sts_boot_dev_sts1_reg, boot_err0_reg,
boot_err1_reg);
if (rc)
return rc;
return hl_fw_send_msi_info_msg(hdev);
}
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
{
struct cpucp_packet pkt = {};
void *eeprom_info_cpu_addr;
dma_addr_t eeprom_info_dma_addr;
u64 result;
int rc;
eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
&eeprom_info_dma_addr);
if (!eeprom_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
return -ENOMEM;
}
memset(eeprom_info_cpu_addr, 0, max_size);
pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
pkt.data_max_size = cpu_to_le32(max_size);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP EEPROM packet, error %d\n",
rc);
goto out;
}
/* result contains the actual size */
memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
out:
hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
return rc;
}
int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
{
struct cpucp_monitor_dump *mon_dump_cpu_addr;
dma_addr_t mon_dump_dma_addr;
struct cpucp_packet pkt = {};
size_t data_size;
__le32 *src_ptr;
u32 *dst_ptr;
u64 result;
int i, rc;
data_size = sizeof(struct cpucp_monitor_dump);
mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
if (!mon_dump_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
return -ENOMEM;
}
memset(mon_dump_cpu_addr, 0, data_size);
pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(mon_dump_dma_addr);
pkt.data_max_size = cpu_to_le32(data_size);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
goto out;
}
/* result contains the actual size */
src_ptr = (__le32 *) mon_dump_cpu_addr;
dst_ptr = data;
for (i = 0; i < (data_size / sizeof(u32)); i++) {
*dst_ptr = le32_to_cpu(*src_ptr);
src_ptr++;
dst_ptr++;
}
out:
hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
return rc;
}
int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters)
{
struct cpucp_packet pkt = {};
u64 result;
int rc;
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
/* Fetch PCI rx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc;
}
counters->rx_throughput = result;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
/* Fetch PCI tx counter */
pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc;
}
counters->tx_throughput = result;
/* Fetch PCI replay counter */
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc;
}
counters->replay_cnt = (u32) result;
return rc;
}
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
{
struct cpucp_packet pkt = {};
u64 result;
int rc;
pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CpuCP total energy pkt, error %d\n",
rc);
return rc;
}
*total_energy = result;
return rc;
}
int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 pll_byte, pll_bit_off;
bool dynamic_pll;
int fw_pll_idx;
dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_DYN_PLL_EN);
if (!dynamic_pll) {
/*
* in case we are working with legacy FW (each asic has unique
* PLL numbering) use the driver based index as they are
* aligned with fw legacy numbering
*/
*pll_index = input_pll_index;
return 0;
}
/* retrieve a FW compatible PLL index based on
* ASIC specific user request
*/
fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
if (fw_pll_idx < 0) {
dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
input_pll_index, fw_pll_idx);
return -EINVAL;
}
/* PLL map is a u8 array */
pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
pll_bit_off = fw_pll_idx & 0x7;
if (!(pll_byte & BIT(pll_bit_off))) {
dev_err(hdev->dev, "PLL index %d is not supported\n",
fw_pll_idx);
return -EINVAL;
}
*pll_index = fw_pll_idx;
return 0;
}
int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr)
{
struct cpucp_packet pkt;
enum pll_index used_pll_idx;
u64 result;
int rc;
rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
if (rc)
return rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
return rc;
}
pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
return 0;
}
int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
return rc;
}
*power = result;
return rc;
}
int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
struct cpucp_hbm_row_info *info)
{
struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
dma_addr_t cpucp_repl_rows_info_dma_addr;
struct cpucp_packet pkt = {};
u64 result;
int rc;
cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
sizeof(struct cpucp_hbm_row_info),
&cpucp_repl_rows_info_dma_addr);
if (!cpucp_repl_rows_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
return -ENOMEM;
}
memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
goto out;
}
memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
out:
hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
cpucp_repl_rows_info_cpu_addr);
return rc;
}
int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
goto out;
}
*pend_rows_num = (u32) result;
out:
return rc;
}
int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
{
struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(asid);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
if (rc)
dev_err(hdev->dev,
"Failed on ASID configuration request for engine core, error %d\n",
rc);
return rc;
}
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
{
struct static_fw_load_mgr *static_loader =
&hdev->fw_loader.static_loader;
int rc;
if (hdev->asic_prop.dynamic_fw_load) {
rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
COMMS_RST_DEV, 0, false,
hdev->fw_loader.cpu_timeout);
if (rc)
dev_err(hdev->dev, "Failed sending COMMS_RST_DEV\n");
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
}
}
void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
{
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
u32 status, cpu_boot_status_reg, cpu_timeout;
struct static_fw_load_mgr *static_loader;
struct pre_fw_load_props *pre_fw_load;
int rc;
if (hdev->device_cpu_is_halted)
return;
/* Stop device CPU to make sure nothing bad happens */
if (hdev->asic_prop.dynamic_fw_load) {
pre_fw_load = &fw_loader->pre_fw_load;
cpu_timeout = fw_loader->cpu_timeout;
cpu_boot_status_reg = pre_fw_load->cpu_boot_status_reg;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
COMMS_GOTO_WFE, 0, false, cpu_timeout);
if (rc) {
dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
} else {
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
status,
status == CPU_BOOT_STATUS_IN_WFE,
hdev->fw_poll_interval_usec,
cpu_timeout);
if (rc)
dev_err(hdev->dev, "Current status=%u. Timed-out updating to WFE\n",
status);
}
} else {
static_loader = &hdev->fw_loader.static_loader;
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
msleep(static_loader->cpu_reset_wait_msec);
/* Must clear this register in order to prevent preboot
* from reading WFE after reboot
*/
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
}
hdev->device_cpu_is_halted = true;
}
static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
{
/* Some of the status codes below are deprecated in newer f/w
* versions but we keep them here for backward compatibility
*/
switch (status) {
case CPU_BOOT_STATUS_NA:
dev_err(hdev->dev,
"Device boot progress - BTL/ROM did NOT run\n");
break;
case CPU_BOOT_STATUS_IN_WFE:
dev_err(hdev->dev,
"Device boot progress - Stuck inside WFE loop\n");
break;
case CPU_BOOT_STATUS_IN_BTL:
dev_err(hdev->dev,
"Device boot progress - Stuck in BTL\n");
break;
case CPU_BOOT_STATUS_IN_PREBOOT:
dev_err(hdev->dev,
"Device boot progress - Stuck in Preboot\n");
break;
case CPU_BOOT_STATUS_IN_SPL:
dev_err(hdev->dev,
"Device boot progress - Stuck in SPL\n");
break;
case CPU_BOOT_STATUS_IN_UBOOT:
dev_err(hdev->dev,
"Device boot progress - Stuck in u-boot\n");
break;
case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
dev_err(hdev->dev,
"Device boot progress - DRAM initialization failed\n");
break;
case CPU_BOOT_STATUS_UBOOT_NOT_READY:
dev_err(hdev->dev,
"Device boot progress - Cannot boot\n");
break;
case CPU_BOOT_STATUS_TS_INIT_FAIL:
dev_err(hdev->dev,
"Device boot progress - Thermal Sensor initialization failed\n");
break;
case CPU_BOOT_STATUS_SECURITY_READY:
dev_err(hdev->dev,
"Device boot progress - Stuck in preboot after security initialization\n");
break;
default:
dev_err(hdev->dev,
"Device boot progress - Invalid or unexpected status code %d\n", status);
break;
}
}
int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
u32 status;
int rc;
/* Need to check two possible scenarios:
*
* CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
* the preboot is waiting for the boot fit
*
* All other status values - for older firmwares where the uboot was
* loaded from the FLASH
*/
rc = hl_poll_timeout(
hdev,
pre_fw_load->cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
hdev->fw_poll_interval_usec,
pre_fw_load->wait_for_preboot_timeout);
if (rc) {
detect_cpu_boot_status(hdev, status);
dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
/* If we read all FF, then something is totally wrong, no point
* of reading specific errors
*/
if (status != -1)
fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
pre_fw_load->boot_err1_reg,
pre_fw_load->sts_boot_dev_sts0_reg,
pre_fw_load->sts_boot_dev_sts1_reg);
return -EIO;
}
hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
return 0;
}
static int hl_fw_read_preboot_caps(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load;
struct asic_fixed_properties *prop;
u32 reg_val;
int rc;
prop = &hdev->asic_prop;
pre_fw_load = &hdev->fw_loader.pre_fw_load;
rc = hl_fw_wait_preboot_ready(hdev);
if (rc)
return rc;
/*
* the registers DEV_STS* contain FW capabilities/features.
* We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
* is set.
* In the first read of this register we store the value of this
* register ONLY if the register is enabled (which will be propagated
* to next stages) and also mark the register as valid.
* In case it is not enabled the stored value will be left 0- all
* caps/features are off
*/
reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_cpu_boot_dev_sts0_valid = true;
prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
}
reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
prop->fw_cpu_boot_dev_sts1_valid = true;
prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
}
prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
/* initialize FW loader once we know what load protocol is used */
hdev->asic_funcs->init_firmware_loader(hdev);
dev_dbg(hdev->dev, "Attempting %s FW load\n",
prop->dynamic_fw_load ? "dynamic" : "legacy");
return 0;
}
static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
enum hl_fw_component fwc)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
struct static_fw_load_mgr *static_loader;
char *dest, *boot_ver, *preboot_ver;
u32 ver_off, limit;
const char *name;
char btl_ver[32];
static_loader = &hdev->fw_loader.static_loader;
switch (fwc) {
case FW_COMP_BOOT_FIT:
ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
dest = prop->uboot_ver;
name = "Boot-fit";
limit = static_loader->boot_fit_version_max_off;
break;
case FW_COMP_PREBOOT:
ver_off = RREG32(static_loader->preboot_version_offset_reg);
dest = prop->preboot_ver;
name = "Preboot";
limit = static_loader->preboot_version_max_off;
break;
default:
dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
return -EIO;
}
ver_off &= static_loader->sram_offset_mask;
if (ver_off < limit) {
memcpy_fromio(dest,
hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
VERSION_MAX_LEN);
} else {
dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
name, ver_off);
strscpy(dest, "unavailable", VERSION_MAX_LEN);
return -EIO;
}
if (fwc == FW_COMP_BOOT_FIT) {
boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
if (boot_ver) {
dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
kfree(boot_ver);
}
} else if (fwc == FW_COMP_PREBOOT) {
preboot_ver = strnstr(prop->preboot_ver, "Preboot",
VERSION_MAX_LEN);
if (preboot_ver && preboot_ver != prop->preboot_ver) {
strscpy(btl_ver, prop->preboot_ver,
min((int) (preboot_ver - prop->preboot_ver),
31));
dev_info(hdev->dev, "%s\n", btl_ver);
}
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
if (preboot_ver) {
dev_info(hdev->dev, "preboot version %s\n",
preboot_ver);
kfree(preboot_ver);
}
}
return 0;
}
/**
* hl_fw_preboot_update_state - update internal data structures during
* handshake with preboot
*
*
* @hdev: pointer to the habanalabs device structure
*
* @return 0 on success, otherwise non-zero error code
*/
static void hl_fw_preboot_update_state(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
/* We read boot_dev_sts registers multiple times during boot:
* 1. preboot - a. Check whether the security status bits are valid
* b. Check whether fw security is enabled
* c. Check whether hard reset is done by preboot
* 2. boot cpu - a. Fetch boot cpu security status
* b. Check whether hard reset is done by boot cpu
* 3. FW application - a. Fetch fw application security status
* b. Check whether hard reset is done by fw app
*/
prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
cpu_boot_dev_sts0);
dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
cpu_boot_dev_sts1);
dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_dbg(hdev->dev, "firmware-level security is %s\n",
prop->fw_security_enabled ? "enabled" : "disabled");
dev_dbg(hdev->dev, "GIC controller is %s\n",
prop->gic_interrupts_enable ? "enabled" : "disabled");
}
static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
{
int rc;
rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
if (rc)
return rc;
return 0;
}
int hl_fw_read_preboot_status(struct hl_device *hdev)
{
int rc;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
/* get FW pre-load parameters */
hdev->asic_funcs->init_firmware_preload_params(hdev);
/*
* In order to determine boot method (static VS dynamic) we need to
* read the boot caps register
*/
rc = hl_fw_read_preboot_caps(hdev);
if (rc)
return rc;
hl_fw_preboot_update_state(hdev);
/* no need to read preboot status in dynamic load */
if (hdev->asic_prop.dynamic_fw_load)
return 0;
return hl_fw_static_read_preboot_status(hdev);
}
/* associate string with COMM status */
static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
[COMMS_STS_NOOP] = "NOOP",
[COMMS_STS_ACK] = "ACK",
[COMMS_STS_OK] = "OK",
[COMMS_STS_ERR] = "ERR",
[COMMS_STS_VALID_ERR] = "VALID_ERR",
[COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
};
/**
* hl_fw_dynamic_report_error_status - report error status
*
* @hdev: pointer to the habanalabs device structure
* @status: value of FW status register
* @expected_status: the expected status
*/
static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
u32 status,
enum comms_sts expected_status)
{
enum comms_sts comm_status =
FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
if (comm_status < COMMS_STS_INVLD_LAST)
dev_err(hdev->dev, "Device status %s, expected status: %s\n",
hl_dynamic_fw_status_str[comm_status],
hl_dynamic_fw_status_str[expected_status]);
else
dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
comm_status,
hl_dynamic_fw_status_str[expected_status]);
}
/**
* hl_fw_dynamic_send_cmd - send LKD to FW cmd
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @cmd: LKD to FW cmd code
* @size: size of next FW component to be loaded (0 if not necessary)
*
* LDK to FW exact command layout is defined at struct comms_command.
* note: the size argument is used only when the next FW component should be
* loaded, otherwise it shall be 0. the size is used by the FW in later
* protocol stages and when sending only indicating the amount of memory
* to be allocated by the FW to receive the next boot component.
*/
static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_cmd cmd, unsigned int size)
{
struct cpu_dyn_regs *dyn_regs;
u32 val;
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
}
/**
* hl_fw_dynamic_extract_fw_response - update the FW response
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @response: FW response
* @status: the status read from CPU status register
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
struct fw_response *response,
u32 status)
{
response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
COMMS_STATUS_OFFSET_ALIGN_SHIFT;
response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
if ((response->ram_type != COMMS_SRAM) &&
(response->ram_type != COMMS_DRAM)) {
dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
response->ram_type);
return -EIO;
}
return 0;
}
/**
* hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @expected_status: expected status to wait for
* @timeout: timeout for status wait
*
* @return 0 on success, otherwise non-zero error code
*
* waiting for status from FW include polling the FW status register until
* expected status is received or timeout occurs (whatever occurs first).
*/
static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_sts expected_status,
u32 timeout)
{
struct cpu_dyn_regs *dyn_regs;
u32 status;
int rc;
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
/* Wait for expected status */
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
status,
FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
hdev->fw_comms_poll_interval_usec,
timeout);
if (rc) {
hl_fw_dynamic_report_error_status(hdev, status,
expected_status);
return -EIO;
}
trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
/*
* skip storing FW response for NOOP to preserve the actual desired
* FW status
*/
if (expected_status == COMMS_STS_NOOP)
return 0;
rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
&fw_loader->dynamic_loader.response,
status);
return rc;
}
/**
* hl_fw_dynamic_send_clear_cmd - send clear command to FW
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
*
* @return 0 on success, otherwise non-zero error code
*
* after command cycle between LKD to FW CPU (i.e. LKD got an expected status
* from FW) we need to clear the CPU status register in order to avoid garbage
* between command cycles.
* This is done by sending clear command and polling the CPU to LKD status
* register to hold the status NOOP
*/
static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
fw_loader->cpu_timeout);
}
/**
* hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @cmd: LKD to FW cmd code
* @size: size of next FW component to be loaded (0 if not necessary)
* @wait_ok: if true also wait for OK response from FW
* @timeout: timeout for status wait
*
* @return 0 on success, otherwise non-zero error code
*
* brief:
* when sending protocol command we have the following steps:
* - send clear (clear command and verify clear status register)
* - send the actual protocol command
* - wait for ACK on the protocol command
* - send clear
* - send NOOP
* if, in addition, the specific protocol command should wait for OK then:
* - wait for OK
* - send clear
* - send NOOP
*
* NOTES:
* send clear: this is necessary in order to clear the status register to avoid
* leftovers between command
* NOOP command: necessary to avoid loop on the clear command by the FW
*/
int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_cmd cmd, unsigned int size,
bool wait_ok, u32 timeout)
{
int rc;
trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
/* first send clear command to clean former commands */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
if (rc)
return rc;
/* send the actual command */
hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
/* wait for ACK for the command */
rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
timeout);
if (rc)
return rc;
/* clear command to prepare for NOOP command */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
if (rc)
return rc;
/* send the actual NOOP command */
hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
if (!wait_ok)
return 0;
rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
timeout);
if (rc)
return rc;
/* clear command to prepare for NOOP command */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
if (rc)
return rc;
/* send the actual NOOP command */
hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
return 0;
}
/**
* hl_fw_compat_crc32 - CRC compatible with FW
*
* @data: pointer to the data
* @size: size of the data
*
* @return the CRC32 result
*
* NOTE: kernel's CRC32 differs from standard CRC32 calculation.
* in order to be aligned we need to flip the bits of both the input
* initial CRC and kernel's CRC32 result.
* in addition both sides use initial CRC of 0,
*/
static u32 hl_fw_compat_crc32(u8 *data, size_t size)
{
return ~crc32_le(~((u32)0), data, size);
}
/**
* hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
* transfer (image or descriptor) between
* host and FW
*
* @hdev: pointer to the habanalabs device structure
* @addr: device address of memory transfer
* @size: memory transfer size
* @region: PCI memory region
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
u64 addr, size_t size,
struct pci_mem_region *region)
{
u64 end_addr;
/* now make sure that the memory transfer is within region's bounds */
end_addr = addr + size;
if (end_addr >= region->region_base + region->region_size) {
dev_err(hdev->dev,
"dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
end_addr);
return -EIO;
}
/*
* now make sure memory transfer is within predefined BAR bounds.
* this is to make sure we do not need to set the bar (e.g. for DRAM
* memory transfers)
*/
if (end_addr >= region->region_base - region->offset_in_bar +
region->bar_size) {
dev_err(hdev->dev,
"FW image beyond PCI BAR bounds\n");
return -EIO;
}
return 0;
}
/**
* hl_fw_dynamic_validate_descriptor - validate FW descriptor
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @fw_desc: the descriptor from FW
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
struct lkd_fw_comms_desc *fw_desc)
{
struct pci_mem_region *region;
enum pci_region region_id;
size_t data_size;
u32 data_crc32;
u8 *data_ptr;
u64 addr;
int rc;
if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
fw_desc->header.magic);
if (fw_desc->header.version != HL_COMMS_DESC_VER)
dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
fw_desc->header.version);
/*
* Calc CRC32 of data without header. use the size of the descriptor
* reported by firmware, without calculating it ourself, to allow adding
* more fields to the lkd_fw_comms_desc structure.
* note that no alignment/stride address issues here as all structures
* are 64 bit padded.
*/
data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
data_size = le16_to_cpu(fw_desc->header.size);
data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
data_crc32, fw_desc->header.crc32);
return -EIO;
}
/* find memory region to which to copy the image */
addr = le64_to_cpu(fw_desc->img_addr);
region_id = hl_get_pci_memory_region(hdev, addr);
if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
return -EIO;
}
region = &hdev->pci_mem_region[region_id];
/* store the region for the copy stage */
fw_loader->dynamic_loader.image_region = region;
/*
* here we know that the start address is valid, now make sure that the
* image is within region's bounds
*/
rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
fw_loader->dynamic_loader.fw_image_size,
region);
if (rc) {
dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
return rc;
}
/* here we can mark the descriptor as valid as the content has been validated */
fw_loader->dynamic_loader.fw_desc_valid = true;
return 0;
}
static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
struct fw_response *response,
struct pci_mem_region *region)
{
u64 device_addr;
int rc;
device_addr = region->region_base + response->ram_offset;
/*
* validate that the descriptor is within region's bounds
* Note that as the start address was supplied according to the RAM
* type- testing only the end address is enough
*/
rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
sizeof(struct lkd_fw_comms_desc),
region);
return rc;
}
/*
* hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
*
* @hdev: pointer to the habanalabs device structure
* @fw_desc: the descriptor from FW
*/
static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
struct lkd_fw_comms_desc *fw_desc)
{
int i;
char *msg;
for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
if (!fw_desc->ascii_msg[i].valid)
return;
/* force NULL termination */
msg = fw_desc->ascii_msg[i].msg;
msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
switch (fw_desc->ascii_msg[i].msg_lvl) {
case LKD_FW_ASCII_MSG_ERR:
dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
break;
case LKD_FW_ASCII_MSG_WRN:
dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
break;
case LKD_FW_ASCII_MSG_INF:
dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
break;
default:
dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
break;
}
}
}
/**
* hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
struct lkd_fw_comms_desc *fw_desc;
struct pci_mem_region *region;
struct fw_response *response;
void *temp_fw_desc;
void __iomem *src;
u16 fw_data_size;
enum pci_region region_id;
int rc;
fw_desc = &fw_loader->dynamic_loader.comm_desc;
response = &fw_loader->dynamic_loader.response;
region_id = (response->ram_type == COMMS_SRAM) ?
PCI_REGION_SRAM : PCI_REGION_DRAM;
region = &hdev->pci_mem_region[region_id];
rc = hl_fw_dynamic_validate_response(hdev, response, region);
if (rc) {
dev_err(hdev->dev,
"invalid mem transfer request for FW descriptor\n");
return rc;
}
/*
* extract address to copy the descriptor from
* in addition, as the descriptor value is going to be over-ridden by new data- we mark it
* as invalid.
* it will be marked again as valid once validated
*/
fw_loader->dynamic_loader.fw_desc_valid = false;
src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
response->ram_offset;
/*
* We do the copy of the fw descriptor in 2 phases:
* 1. copy the header + data info according to our lkd_fw_comms_desc definition.
* then we're able to read the actual data size provided by fw.
* this is needed for cases where data in descriptor was changed(add/remove)
* in embedded specs header file before updating lkd copy of the header file
* 2. copy descriptor to temporary buffer with aligned size and send it to validation
*/
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
fw_data_size = le16_to_cpu(fw_desc->header.size);
temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
if (!temp_fw_desc)
return -ENOMEM;
memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
(struct lkd_fw_comms_desc *) temp_fw_desc);
if (!rc)
hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
vfree(temp_fw_desc);
return rc;
}
/**
* hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @next_image_size: size to allocate for next FW component
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
size_t next_image_size)
{
int rc;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
next_image_size, true,
fw_loader->cpu_timeout);
if (rc)
return rc;
return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
}
/**
* hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
*
* @hdev: pointer to the habanalabs device structure
* @fwc: the firmware component
* @fw_version: fw component's version string
*/
static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
enum hl_fw_component fwc,
const char *fw_version)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
char *preboot_ver, *boot_ver;
char btl_ver[32];
int rc;
switch (fwc) {
case FW_COMP_BOOT_FIT:
strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
if (boot_ver) {
dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
kfree(boot_ver);
}
break;
case FW_COMP_PREBOOT:
strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
preboot_ver = strnstr(prop->preboot_ver, "Preboot", VERSION_MAX_LEN);
dev_info(hdev->dev, "preboot full version: '%s'\n", preboot_ver);
if (preboot_ver && preboot_ver != prop->preboot_ver) {
strscpy(btl_ver, prop->preboot_ver,
min((int) (preboot_ver - prop->preboot_ver), 31));
dev_info(hdev->dev, "%s\n", btl_ver);
}
rc = hl_get_sw_major_minor_subminor(hdev, preboot_ver);
if (rc)
return rc;
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
if (preboot_ver) {
rc = hl_get_preboot_major_minor(hdev, preboot_ver);
kfree(preboot_ver);
if (rc)
return rc;
}
break;
default:
dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
return -EINVAL;
}
return 0;
}
/**
* hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
*
* @hdev: pointer to the habanalabs device structure
* @fw: fw descriptor
* @fw_loader: managing structure for loading device's FW
*/
static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
const struct firmware *fw,
struct fw_load_mgr *fw_loader)
{
struct lkd_fw_comms_desc *fw_desc;
struct pci_mem_region *region;
void __iomem *dest;
u64 addr;
int rc;
fw_desc = &fw_loader->dynamic_loader.comm_desc;
addr = le64_to_cpu(fw_desc->img_addr);
/* find memory region to which to copy the image */
region = fw_loader->dynamic_loader.image_region;
dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
(addr - region->region_base);
rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
fw_loader->boot_fit_img.src_off,
fw_loader->boot_fit_img.copy_size);
return rc;
}
/**
* hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
*
* @hdev: pointer to the habanalabs device structure
* @msg: message
* @fw_loader: managing structure for loading device's FW
*/
static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
{
struct lkd_fw_comms_desc *fw_desc;
struct pci_mem_region *region;
void __iomem *dest;
u64 addr;
int rc;
fw_desc = &fw_loader->dynamic_loader.comm_desc;
addr = le64_to_cpu(fw_desc->img_addr);
/* find memory region to which to copy the image */
region = fw_loader->dynamic_loader.image_region;
dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
(addr - region->region_base);
rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
return rc;
}
/**
* hl_fw_boot_fit_update_state - update internal data structures after boot-fit
* is loaded
*
* @hdev: pointer to the habanalabs device structure
* @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
* @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
*
* @return 0 on success, otherwise non-zero error code
*/
static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
u32 cpu_boot_dev_sts0_reg,
u32 cpu_boot_dev_sts1_reg)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
/* Read boot_cpu status bits */
if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_bootfit_cpu_boot_dev_sts0 =
RREG32(cpu_boot_dev_sts0_reg);
prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
prop->fw_bootfit_cpu_boot_dev_sts0);
}
if (prop->fw_cpu_boot_dev_sts1_valid) {
prop->fw_bootfit_cpu_boot_dev_sts1 =
RREG32(cpu_boot_dev_sts1_reg);
dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
prop->fw_bootfit_cpu_boot_dev_sts1);
}
dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
prop->hard_reset_done_by_fw ? "enabled" : "disabled");
}
static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
/* Check whether all 3 interrupt interfaces are set, if not use a
* single interface
*/
if (!hdev->asic_prop.gic_interrupts_enable &&
!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
dev_warn(hdev->dev,
"Using a single interrupt interface towards cpucp");
}
}
/**
* hl_fw_dynamic_load_image - load FW image using dynamic protocol
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @load_fwc: the FW component to be loaded
* @img_ld_timeout: image load timeout
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_load_image(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum hl_fw_component load_fwc,
u32 img_ld_timeout)
{
enum hl_fw_component cur_fwc;
const struct firmware *fw;
char *fw_name;
int rc = 0;
/*
* when loading image we have one of 2 scenarios:
* 1. current FW component is preboot and we want to load boot-fit
* 2. current FW component is boot-fit and we want to load linux
*/
if (load_fwc == FW_COMP_BOOT_FIT) {
cur_fwc = FW_COMP_PREBOOT;
fw_name = fw_loader->boot_fit_img.image_name;
} else {
cur_fwc = FW_COMP_BOOT_FIT;
fw_name = fw_loader->linux_img.image_name;
}
/* request FW in order to communicate to FW the size to be allocated */
rc = hl_request_fw(hdev, &fw, fw_name);
if (rc)
return rc;
/* store the image size for future validation */
fw_loader->dynamic_loader.fw_image_size = fw->size;
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
if (rc)
goto release_fw;
/* read preboot version */
rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
if (rc)
goto release_fw;
/* copy boot fit to space allocated by FW */
rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
if (rc)
goto release_fw;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
0, true,
fw_loader->cpu_timeout);
if (rc)
goto release_fw;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
0, false,
img_ld_timeout);
release_fw:
hl_release_firmware(fw);
return rc;
}
static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
struct dynamic_fw_load_mgr *dyn_loader;
u32 status;
int rc;
dyn_loader = &fw_loader->dynamic_loader;
/*
* Make sure CPU boot-loader is running
* Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
* yet there is a debug scenario in which we loading uboot (without Linux)
* which at later stage is relocated to DRAM. In this case we expect
* uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
* poll flags
*/
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
status,
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
hdev->fw_poll_interval_usec,
dyn_loader->wait_for_bl_timeout);
if (rc) {
dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
return rc;
}
dev_dbg(hdev->dev, "uboot status = %d\n", status);
return 0;
}
static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
struct dynamic_fw_load_mgr *dyn_loader;
u32 status;
int rc;
dyn_loader = &fw_loader->dynamic_loader;
/* Make sure CPU linux is running */
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
hdev->fw_poll_interval_usec,
fw_loader->cpu_timeout);
if (rc) {
dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
return rc;
}
dev_dbg(hdev->dev, "Boot status = %d\n", status);
return 0;
}
/**
* hl_fw_linux_update_state - update internal data structures after Linux
* is loaded.
* Note: Linux initialization is comprised mainly
* of two stages - loading kernel (SRAM_AVAIL)
* & loading ARMCP.
* Therefore reading boot device status in any of
* these stages might result in different values.
*
* @hdev: pointer to the habanalabs device structure
* @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
* @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
*
* @return 0 on success, otherwise non-zero error code
*/
static void hl_fw_linux_update_state(struct hl_device *hdev,
u32 cpu_boot_dev_sts0_reg,
u32 cpu_boot_dev_sts1_reg)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
/* Read FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid) {
prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
prop->gic_interrupts_enable = false;
dev_dbg(hdev->dev,
"Firmware application CPU status0 %#x\n",
prop->fw_app_cpu_boot_dev_sts0);
dev_dbg(hdev->dev, "GIC controller is %s\n",
prop->gic_interrupts_enable ?
"enabled" : "disabled");
}
if (prop->fw_cpu_boot_dev_sts1_valid) {
prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
dev_dbg(hdev->dev,
"Firmware application CPU status1 %#x\n",
prop->fw_app_cpu_boot_dev_sts1);
}
dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
}
/**
* hl_fw_dynamic_send_msg - send a COMMS message with attached data
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
* @msg_type: message type
* @data: data to be sent
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
{
struct lkd_msg_comms *msg;
int rc;
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
/* create message to be sent */
msg->header.type = msg_type;
msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
switch (msg_type) {
case HL_COMMS_RESET_CAUSE_TYPE:
msg->reset_cause = *(__u8 *) data;
break;
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
msg_type);
rc = -EINVAL;
goto out;
}
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
sizeof(struct lkd_msg_comms));
if (rc)
goto out;
/* copy message to space allocated by FW */
rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
if (rc)
goto out;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
0, true,
fw_loader->cpu_timeout);
if (rc)
goto out;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
0, true,
fw_loader->cpu_timeout);
out:
kfree(msg);
return rc;
}
/**
* hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
*
* @return 0 on success, otherwise non-zero error code
*
* brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
* the communication is done using registers:
* - LKD command register
* - FW status register
* the protocol is race free. this goal is achieved by splitting the requests
* and response to known synchronization points between the LKD and the FW.
* each response to LKD request is known and bound to a predefined timeout.
* in case of timeout expiration without the desired status from FW- the
* protocol (and hence the boot) will fail.
*/
static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
struct cpu_dyn_regs *dyn_regs;
int rc, fw_error_rc;
dev_info(hdev->dev,
"Loading %sfirmware to device, may take some time...\n",
hdev->asic_prop.fw_security_enabled ? "secured " : "");
/* initialize FW descriptor as invalid */
fw_loader->dynamic_loader.fw_desc_valid = false;
/*
* In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
* It will be updated from FW after hl_fw_dynamic_request_descriptor().
*/
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
0, true,
fw_loader->cpu_timeout);
if (rc)
goto protocol_err;
if (hdev->reset_info.curr_reset_cause) {
rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
if (rc)
goto protocol_err;
/* Clear current reset cause */
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
struct lkd_fw_binning_info *binning_info;
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
if (rc)
goto protocol_err;
/* read preboot version */
rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
if (rc)
return rc;
/* read binning info from preboot */
if (hdev->support_preboot_binning) {
binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
rc = hdev->asic_funcs->set_dram_properties(hdev);
if (rc)
return rc;
rc = hdev->asic_funcs->set_binning_masks(hdev);
if (rc)
return rc;
dev_dbg(hdev->dev,
"Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
hdev->decoder_binning, hdev->rotator_binning);
}
return 0;
}
/* load boot fit to FW */
rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
fw_loader->boot_fit_timeout);
if (rc) {
dev_err(hdev->dev, "failed to load boot fit\n");
goto protocol_err;
}
rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
if (rc)
goto protocol_err;
hl_fw_boot_fit_update_state(hdev,
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
/*
* when testing FW load (without Linux) on PLDM we don't want to
* wait until boot fit is active as it may take several hours.
* instead, we load the bootfit and let it do all initialization in
* the background.
*/
if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
return 0;
/* Enable DRAM scrambling before Linux boot and after successful
* UBoot
*/
hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
if (!(hdev->fw_components & FW_TYPE_LINUX)) {
dev_info(hdev->dev, "Skip loading Linux F/W\n");
return 0;
}
if (fw_loader->skip_bmc) {
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
COMMS_SKIP_BMC, 0,
true,
fw_loader->cpu_timeout);
if (rc) {
dev_err(hdev->dev, "failed to load boot fit\n");
goto protocol_err;
}
}
/* load Linux image to FW */
rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
fw_loader->cpu_timeout);
if (rc) {
dev_err(hdev->dev, "failed to load Linux\n");
goto protocol_err;
}
rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
if (rc)
goto protocol_err;
hl_fw_linux_update_state(hdev,
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
hl_fw_dynamic_update_linux_interrupt_if(hdev);
protocol_err:
if (fw_loader->dynamic_loader.fw_desc_valid) {
fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
le32_to_cpu(dyn_regs->cpu_boot_err1),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
if (fw_error_rc)
return fw_error_rc;
}
return rc;
}
/**
* hl_fw_static_init_cpu - initialize the device CPU using static protocol
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
*
* @return 0 on success, otherwise non-zero error code
*/
static int hl_fw_static_init_cpu(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
struct static_fw_load_mgr *static_loader;
u32 cpu_boot_status_reg;
int rc;
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
return 0;
/* init common loader parameters */
cpu_timeout = fw_loader->cpu_timeout;
/* init static loader parameters */
static_loader = &fw_loader->static_loader;
cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
cpu_timeout / USEC_PER_SEC);
/* Wait for boot FIT request */
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
status,
status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
hdev->fw_poll_interval_usec,
fw_loader->boot_fit_timeout);
if (rc) {
dev_dbg(hdev->dev,
"No boot fit request received (status = %d), resuming boot\n", status);
} else {
rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
if (rc)
goto out;
/* Clear device CPU message status */
WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
/* Signal device CPU that boot loader is ready */
WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
/* Poll for CPU device ack */
rc = hl_poll_timeout(
hdev,
cpu_msg_status_reg,
status,
status == CPU_MSG_OK,
hdev->fw_poll_interval_usec,
fw_loader->boot_fit_timeout);
if (rc) {
dev_err(hdev->dev,
"Timeout waiting for boot fit load ack (status = %d)\n", status);
goto out;
}
/* Clear message */
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
}
/*
* Make sure CPU boot-loader is running
* Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
* yet there is a debug scenario in which we loading uboot (without Linux)
* which at later stage is relocated to DRAM. In this case we expect
* uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
* poll flags
*/
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_DRAM_RDY) ||
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
hdev->fw_poll_interval_usec,
cpu_timeout);
dev_dbg(hdev->dev, "uboot status = %d\n", status);
/* Read U-Boot version now in case we will later fail */
hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
/* update state according to boot stage */
hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
cpu_boot_dev_status1_reg);
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
goto out;
}
/* Enable DRAM scrambling before Linux boot and after successful
* UBoot
*/
hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
if (!(hdev->fw_components & FW_TYPE_LINUX)) {
dev_info(hdev->dev, "Skip loading Linux F/W\n");
rc = 0;
goto out;
}
if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
rc = 0;
goto out;
}
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
rc = hdev->asic_funcs->load_firmware_to_device(hdev);
if (rc)
goto out;
if (fw_loader->skip_bmc) {
WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
hdev->fw_poll_interval_usec,
cpu_timeout);
if (rc) {
dev_err(hdev->dev,
"Failed to get ACK on skipping BMC (status = %d)\n",
status);
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
rc = -EIO;
goto out;
}
}
WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
hdev->fw_poll_interval_usec,
cpu_timeout);
/* Clear message */
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
if (rc) {
if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
dev_err(hdev->dev,
"Device reports FIT image is corrupted\n");
else
dev_err(hdev->dev,
"Failed to load firmware to device (status = %d)\n",
status);
rc = -EIO;
goto out;
}
rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
fw_loader->static_loader.boot_err1_reg,
cpu_boot_dev_status0_reg,
cpu_boot_dev_status1_reg);
if (rc)
return rc;
hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
cpu_boot_dev_status1_reg);
return 0;
out:
fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
fw_loader->static_loader.boot_err1_reg,
cpu_boot_dev_status0_reg,
cpu_boot_dev_status1_reg);
return rc;
}
/**
* hl_fw_init_cpu - initialize the device CPU
*
* @hdev: pointer to the habanalabs device structure
*
* @return 0 on success, otherwise non-zero error code
*
* perform necessary initializations for device's CPU. takes into account if
* init protocol is static or dynamic.
*/
int hl_fw_init_cpu(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
return prop->dynamic_fw_load ?
hl_fw_dynamic_init_cpu(hdev, fw_loader) :
hl_fw_static_init_cpu(hdev, fw_loader);
}
void hl_fw_set_pll_profile(struct hl_device *hdev)
{
hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
hdev->asic_prop.max_freq_value);
}
int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
{
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
if (!hdev->pdev) {
*cur_clk = 0;
*max_clk = 0;
return 0;
}
value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
return value;
}
*max_clk = (value / 1000 / 1000);
value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
return value;
}
*cur_clk = (value / 1000 / 1000);
return 0;
}
long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
u64 result;
int rc;
rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
if (rc)
return rc;
memset(&pkt, 0, sizeof(pkt));
if (curr)
pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
else
pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) {
dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
used_pll_idx, rc);
return rc;
}
return (long) result;
}
void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
int rc;
rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
if (rc)
return;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
used_pll_idx, rc);
}
long hl_fw_get_max_power(struct hl_device *hdev)
{
struct cpucp_packet pkt;
u64 result;
int rc;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) {
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
return rc;
}
return result;
}
void hl_fw_set_max_power(struct hl_device *hdev)
{
struct cpucp_packet pkt;
int rc;
/* TODO: remove this after simulator supports this packet */
if (!hdev->pdev)
return;
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(hdev->max_power);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
}
static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
u32 nonce, u32 timeout)
{
struct cpucp_packet pkt = {};
dma_addr_t req_dma_addr;
void *req_cpu_addr;
int rc;
req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
if (!req_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
return -ENOMEM;
}
memset(data, 0, size);
pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(req_dma_addr);
pkt.data_max_size = cpu_to_le32(size);
pkt.nonce = cpu_to_le32(nonce);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
timeout, NULL);
if (rc) {
dev_err(hdev->dev,
"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
goto out;
}
memcpy(data, req_cpu_addr, size);
out:
hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
return rc;
}
int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
u32 nonce)
{
return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
sizeof(struct cpucp_sec_attest_info), nonce,
HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
}
int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
dma_addr_t buff, u32 *size)
{
struct cpucp_packet pkt = {};
u64 result;
int rc = 0;
pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(buff);
pkt.data_max_size = cpu_to_le32(*size);
pkt.pkt_subidx = cpu_to_le32(sub_opcode);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc)
dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
else
dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
*size = (u32)result;
return rc;
}
| linux-master | drivers/accel/habanalabs/common/firmware_if.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
"Error due to un-priv read",
"Error due to un-secure read",
"Error due to read from unmapped reg",
"Error due to un-priv write",
"Error due to un-secure write",
"Error due to write to unmapped reg",
"External I/F write sec violation",
"External I/F write to un-mapped reg",
"Read to write only",
"Write to read only"
};
/**
* hl_get_pb_block - return the relevant block within the block array
*
* @hdev: pointer to hl_device structure
* @mm_reg_addr: register address in the desired block
* @pb_blocks: blocks array
* @array_size: blocks array size
*
*/
static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
const u32 pb_blocks[], int array_size)
{
int i;
u32 start_addr, end_addr;
for (i = 0 ; i < array_size ; i++) {
start_addr = pb_blocks[i];
end_addr = start_addr + HL_BLOCK_SIZE;
if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
return i;
}
dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
mm_reg_addr);
return -EDOM;
}
/**
* hl_unset_pb_in_block - clear a specific protection bit in a block
*
* @hdev: pointer to hl_device structure
* @reg_offset: register offset will be converted to bit offset in pb block
* @sgs_entry: pb array
*
*/
static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
struct hl_block_glbl_sec *sgs_entry)
{
if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
dev_err(hdev->dev,
"Register offset(%d) is out of range(%d) or invalid\n",
reg_offset, HL_BLOCK_SIZE);
return -EINVAL;
}
UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
(reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
return 0;
}
/**
* hl_unsecure_register - locate the relevant block for this register and
* remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_addr: register address to unsecure
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @array_size: blocks array size
*
*/
int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
int array_size)
{
u32 reg_offset;
int block_num;
block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
array_size);
if (block_num < 0)
return block_num;
reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
}
/**
* hl_unsecure_register_range - locate the relevant block for this register
* range and remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_range: register address range to unsecure
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @array_size: blocks array size
*
*/
static int hl_unsecure_register_range(struct hl_device *hdev,
struct range mm_reg_range, int offset, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[],
int array_size)
{
u32 reg_offset;
int i, block_num, rc = 0;
block_num = hl_get_pb_block(hdev,
mm_reg_range.start + offset, pb_blocks,
array_size);
if (block_num < 0)
return block_num;
for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
reg_offset = (i + offset) - pb_blocks[block_num];
rc |= hl_unset_pb_in_block(hdev, reg_offset,
&sgs_array[block_num]);
}
return rc;
}
/**
* hl_unsecure_registers - locate the relevant block for all registers and
* remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_array: register address array to unsecure
* @mm_array_size: register array size
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @blocks_array_size: blocks array size
*
*/
int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
int mm_array_size, int offset, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
{
int i, rc = 0;
for (i = 0 ; i < mm_array_size ; i++) {
rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
pb_blocks, sgs_array, blocks_array_size);
if (rc)
return rc;
}
return rc;
}
/**
* hl_unsecure_registers_range - locate the relevant block for all register
* ranges and remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_range_array: register address range array to unsecure
* @mm_array_size: register array size
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @blocks_array_size: blocks array size
*
*/
static int hl_unsecure_registers_range(struct hl_device *hdev,
const struct range mm_reg_range_array[], int mm_array_size,
int offset, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
{
int i, rc = 0;
for (i = 0 ; i < mm_array_size ; i++) {
rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
offset, pb_blocks, sgs_array, blocks_array_size);
if (rc)
return rc;
}
return rc;
}
/**
* hl_ack_pb_security_violations - Ack security violation
*
* @hdev: pointer to hl_device structure
* @pb_blocks: blocks array
* @block_offset: additional offset to the block
* @array_size: blocks array size
*
*/
static void hl_ack_pb_security_violations(struct hl_device *hdev,
const u32 pb_blocks[], u32 block_offset, int array_size)
{
int i;
u32 cause, addr, block_base;
for (i = 0 ; i < array_size ; i++) {
block_base = pb_blocks[i] + block_offset;
cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
if (cause) {
addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
hdev->asic_funcs->pb_print_security_errors(hdev,
block_base, cause, addr);
WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
}
}
}
/**
* hl_config_glbl_sec - set pb in HW according to given pb array
*
* @hdev: pointer to hl_device structure
* @pb_blocks: blocks array
* @sgs_array: pb array
* @block_offset: additional offset to the block
* @array_size: blocks array size
*
*/
void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[], u32 block_offset,
int array_size)
{
int i, j;
u32 sgs_base;
if (hdev->pldm)
usleep_range(100, 1000);
for (i = 0 ; i < array_size ; i++) {
sgs_base = block_offset + pb_blocks[i] +
HL_BLOCK_GLBL_SEC_OFFS;
for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
WREG32(sgs_base + j * sizeof(u32),
sgs_array[i].sec_array[j]);
}
}
/**
* hl_secure_block - locally memsets a block to 0
*
* @hdev: pointer to hl_device structure
* @sgs_array: pb array to clear
* @array_size: blocks array size
*
*/
void hl_secure_block(struct hl_device *hdev,
struct hl_block_glbl_sec sgs_array[], int array_size)
{
int i;
for (i = 0 ; i < array_size ; i++)
memset((char *)(sgs_array[i].sec_array), 0,
HL_BLOCK_GLBL_SEC_SIZE);
}
/**
* hl_init_pb_with_mask - set selected pb instances with mask in HW according
* to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @user_regs_array: unsecured register array
* @user_regs_array_size: unsecured register array size
* @mask: enabled instances mask: 1- enabled, 0- disabled
*/
int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const u32 *user_regs_array, u32 user_regs_array_size, u64 mask)
{
int i, j;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0,
pb_blocks, glbl_sec, blocks_array_size);
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_dcores ; i++) {
for (j = 0 ; j < num_instances ; j++) {
int seq = i * num_instances + j;
if (!(mask & BIT_ULL(seq)))
continue;
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
i * dcore_offset + j * instance_offset,
blocks_array_size);
}
}
kfree(glbl_sec);
return 0;
}
/**
* hl_init_pb - set pb in HW according to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @user_regs_array: unsecured register array
* @user_regs_array_size: unsecured register array size
*
*/
int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const u32 *user_regs_array, u32 user_regs_array_size)
{
return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
num_instances, instance_offset, pb_blocks,
blocks_array_size, user_regs_array,
user_regs_array_size, ULLONG_MAX);
}
/**
* hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
* given configuration unsecurring registers
* ranges instead of specific registers
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @user_regs_range_array: unsecured register range array
* @user_regs_range_array_size: unsecured register range array size
* @mask: enabled instances mask: 1- enabled, 0- disabled
*/
int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *user_regs_range_array,
u32 user_regs_range_array_size, u64 mask)
{
int i, j, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
rc = hl_unsecure_registers_range(hdev, user_regs_range_array,
user_regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
if (rc)
goto free_glbl_sec;
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_dcores ; i++) {
for (j = 0 ; j < num_instances ; j++) {
int seq = i * num_instances + j;
if (!(mask & BIT_ULL(seq)))
continue;
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
i * dcore_offset + j * instance_offset,
blocks_array_size);
}
}
free_glbl_sec:
kfree(glbl_sec);
return rc;
}
/**
* hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
* registers ranges instead of specific registers
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @user_regs_range_array: unsecured register range array
* @user_regs_range_array_size: unsecured register range array size
*
*/
int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *user_regs_range_array,
u32 user_regs_range_array_size)
{
return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
num_instances, instance_offset, pb_blocks,
blocks_array_size, user_regs_range_array,
user_regs_range_array_size, ULLONG_MAX);
}
/**
* hl_init_pb_single_dcore - set pb for a single docre in HW
* according to given configuration
*
* @hdev: pointer to hl_device structure
* @dcore_offset: offset from the dcore0
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @user_regs_array: unsecured register array
* @user_regs_array_size: unsecured register array size
*
*/
int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const u32 *user_regs_array, u32 user_regs_array_size)
{
int i, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size,
0, pb_blocks, glbl_sec, blocks_array_size);
if (rc)
goto free_glbl_sec;
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_instances ; i++)
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
dcore_offset + i * instance_offset,
blocks_array_size);
free_glbl_sec:
kfree(glbl_sec);
return rc;
}
/**
* hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
* to given configuration unsecurring
* registers ranges instead of specific
* registers
*
* @hdev: pointer to hl_device structure
* @dcore_offset: offset from the dcore0
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @user_regs_range_array: unsecured register range array
* @user_regs_range_array_size: unsecured register range array size
*
*/
int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *user_regs_range_array, u32 user_regs_range_array_size)
{
int i;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
hl_unsecure_registers_range(hdev, user_regs_range_array,
user_regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_instances ; i++)
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
dcore_offset + i * instance_offset,
blocks_array_size);
kfree(glbl_sec);
return 0;
}
/**
* hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @mask: enabled instances mask: 1- enabled, 0- disabled
*
*/
void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
{
int i, j;
/* ack all blocks */
for (i = 0 ; i < num_dcores ; i++) {
for (j = 0 ; j < num_instances ; j++) {
int seq = i * num_instances + j;
if (!(mask & BIT_ULL(seq)))
continue;
hl_ack_pb_security_violations(hdev, pb_blocks,
i * dcore_offset + j * instance_offset,
blocks_array_size);
}
}
}
/**
* hl_ack_pb - ack pb in HW according to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
*
*/
void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size)
{
hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
instance_offset, pb_blocks, blocks_array_size,
ULLONG_MAX);
}
/**
* hl_ack_pb_single_dcore - ack pb for single docre in HW
* according to given configuration
*
* @hdev: pointer to hl_device structure
* @dcore_offset: offset from dcore0
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
*
*/
void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size)
{
int i;
/* ack all blocks */
for (i = 0 ; i < num_instances ; i++)
hl_ack_pb_security_violations(hdev, pb_blocks,
dcore_offset + i * instance_offset,
blocks_array_size);
}
static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
struct hl_special_block_info *block_info,
u32 major, u32 minor, u32 sub_minor)
{
u32 fw_block_base_address = block_info->base_addr +
major * block_info->major_offset +
minor * block_info->minor_offset +
sub_minor * block_info->sub_minor_offset;
struct asic_fixed_properties *prop = &hdev->asic_prop;
/* Calculation above returns an address for FW use, and therefore should
* be casted for driver use.
*/
return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
}
static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
int block_type)
{
int i;
/* Check if block type is listed in the exclusion list of block types */
for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
if (block_type == skip_blocks_cfg->block_types[i])
return true;
return false;
}
static bool hl_check_block_range_exclusion(struct hl_device *hdev,
struct hl_skip_blocks_cfg *skip_blocks_cfg,
struct hl_special_block_info *block_info,
u32 major, u32 minor, u32 sub_minor)
{
u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
int i, j;
block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
major, minor, sub_minor);
for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
skip_blocks_cfg->block_ranges[i].start) /
HL_BLOCK_SIZE + 1;
for (j = 0 ; j < blocks_in_range ; j++) {
block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
j * HL_BLOCK_SIZE;
if (block_base_addr == block_base_addr_in_range)
return true;
}
}
return false;
}
static int hl_read_glbl_errors(struct hl_device *hdev,
u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
{
struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
struct hl_special_block_info *current_block = &special_blocks[blk_idx];
u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
int i;
block_base = base + major * current_block->major_offset +
minor * current_block->minor_offset +
sub_minor * current_block->sub_minor_offset;
glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
cause_val = RREG32(glbl_err_cause);
if (!cause_val)
return 0;
glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
addr_val = RREG32(glbl_err_addr);
for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
if (cause_val & BIT(i))
dev_err_ratelimited(hdev->dev,
"%s, addr %#llx\n",
hl_glbl_error_cause[i],
hdev->asic_prop.cfg_base_address + block_base +
FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
}
WREG32(glbl_err_cause, cause_val);
return 0;
}
void hl_check_for_glbl_errors(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_special_blocks_cfg special_blocks_cfg;
struct iterate_special_ctx glbl_err_iter;
int rc;
memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
glbl_err_iter.fn = &hl_read_glbl_errors;
glbl_err_iter.data = &special_blocks_cfg;
rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
if (rc)
dev_err_ratelimited(hdev->dev,
"Could not iterate special blocks, glbl error check failed\n");
}
int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
{
struct hl_special_blocks_cfg *special_blocks_cfg =
(struct hl_special_blocks_cfg *)ctx->data;
struct hl_skip_blocks_cfg *skip_blocks_cfg =
special_blocks_cfg->skip_blocks_cfg;
u32 major, minor, sub_minor, blk_idx, num_blocks;
struct hl_special_block_info *block_info_arr;
int rc;
block_info_arr = hdev->asic_prop.special_blocks;
if (!block_info_arr)
return -EINVAL;
num_blocks = hdev->asic_prop.num_of_special_blocks;
for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
continue;
for (major = 0 ; major < block_info_arr->major ; major++) {
minor = 0;
do {
sub_minor = 0;
do {
if ((hl_check_block_range_exclusion(hdev,
skip_blocks_cfg, block_info_arr,
major, minor, sub_minor)) ||
(skip_blocks_cfg->skip_block_hook &&
skip_blocks_cfg->skip_block_hook(hdev,
special_blocks_cfg,
blk_idx, major, minor, sub_minor))) {
sub_minor++;
continue;
}
rc = ctx->fn(hdev, blk_idx, major, minor,
sub_minor, ctx->data);
if (rc)
return rc;
sub_minor++;
} while (sub_minor < block_info_arr->sub_minor);
minor++;
} while (minor < block_info_arr->minor);
}
}
return 0;
}
| linux-master | drivers/accel/habanalabs/common/security.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#define CB_VA_POOL_SIZE (4UL * SZ_1G)
static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 page_size = prop->pmmu.page_size;
int rc;
if (!hdev->supports_cb_mapping) {
dev_err_ratelimited(hdev->dev,
"Mapping a CB to the device's MMU is not supported\n");
return -EINVAL;
}
if (cb->is_mmu_mapped)
return 0;
cb->roundup_size = roundup(cb->size, page_size);
cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
if (!cb->virtual_addr) {
dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
return -ENOMEM;
}
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
if (rc) {
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
goto err_va_pool_free;
}
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
if (rc)
goto err_mmu_unmap;
mutex_unlock(&hdev->mmu_lock);
cb->is_mmu_mapped = true;
return 0;
err_mmu_unmap:
hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
err_va_pool_free:
mutex_unlock(&hdev->mmu_lock);
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
return rc;
}
static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&hdev->mmu_lock);
gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
}
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_internal)
gen_pool_free(hdev->internal_cb_pool,
(uintptr_t)cb->kernel_address, cb->size);
else
hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
kfree(cb);
}
static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_pool) {
atomic_set(&cb->is_handle_destroyed, 0);
spin_lock(&hdev->cb_pool_lock);
list_add(&cb->pool_list, &hdev->cb_pool);
spin_unlock(&hdev->cb_pool_lock);
} else {
cb_fini(hdev, cb);
}
}
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
int ctx_id, bool internal_cb)
{
struct hl_cb *cb = NULL;
u32 cb_offset;
void *p;
/*
* We use of GFP_ATOMIC here because this function can be called from
* the latency-sensitive code path for command submission. Due to H/W
* limitations in some of the ASICs, the kernel must copy the user CB
* that is designated for an external queue and actually enqueue
* the kernel's copy. Hence, we must never sleep in this code section
* and must use GFP_ATOMIC for all memory allocations.
*/
if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
if (!cb)
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
if (internal_cb) {
p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
if (!p) {
kfree(cb);
return NULL;
}
cb_offset = p - hdev->internal_cb_pool_virt_addr;
cb->is_internal = true;
cb->bus_address = hdev->internal_cb_va_base + cb_offset;
} else if (ctx_id == HL_KERNEL_ASID_ID) {
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
if (!p)
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
} else {
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
GFP_USER | __GFP_ZERO);
}
if (!p) {
dev_err(hdev->dev,
"failed to allocate %d of dma memory for CB\n",
cb_size);
kfree(cb);
return NULL;
}
cb->kernel_address = p;
cb->size = cb_size;
return cb;
}
struct hl_cb_mmap_mem_alloc_args {
struct hl_device *hdev;
struct hl_ctx *ctx;
u32 cb_size;
bool internal_cb;
bool map_cb;
};
static void hl_cb_mmap_mem_release(struct hl_mmap_mem_buf *buf)
{
struct hl_cb *cb = buf->private;
hl_debugfs_remove_cb(cb);
if (cb->is_mmu_mapped)
cb_unmap_mem(cb->ctx, cb);
hl_ctx_put(cb->ctx);
cb_do_release(cb->hdev, cb);
}
static int hl_cb_mmap_mem_alloc(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
{
struct hl_cb_mmap_mem_alloc_args *cb_args = args;
struct hl_cb *cb;
int rc, ctx_id = cb_args->ctx->asid;
bool alloc_new_cb = true;
if (!cb_args->internal_cb) {
/* Minimum allocation must be PAGE SIZE */
if (cb_args->cb_size < PAGE_SIZE)
cb_args->cb_size = PAGE_SIZE;
if (ctx_id == HL_KERNEL_ASID_ID &&
cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
spin_lock(&cb_args->hdev->cb_pool_lock);
if (!list_empty(&cb_args->hdev->cb_pool)) {
cb = list_first_entry(&cb_args->hdev->cb_pool,
typeof(*cb), pool_list);
list_del(&cb->pool_list);
spin_unlock(&cb_args->hdev->cb_pool_lock);
alloc_new_cb = false;
} else {
spin_unlock(&cb_args->hdev->cb_pool_lock);
dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
}
}
}
if (alloc_new_cb) {
cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
if (!cb)
return -ENOMEM;
}
cb->hdev = cb_args->hdev;
cb->ctx = cb_args->ctx;
cb->buf = buf;
cb->buf->mappable_size = cb->size;
cb->buf->private = cb;
hl_ctx_get(cb->ctx);
if (cb_args->map_cb) {
if (ctx_id == HL_KERNEL_ASID_ID) {
dev_err(cb_args->hdev->dev,
"CB mapping is not supported for kernel context\n");
rc = -EINVAL;
goto release_cb;
}
rc = cb_map_mem(cb_args->ctx, cb);
if (rc)
goto release_cb;
}
hl_debugfs_add_cb(cb);
return 0;
release_cb:
hl_ctx_put(cb->ctx);
cb_do_release(cb_args->hdev, cb);
return rc;
}
static int hl_cb_mmap(struct hl_mmap_mem_buf *buf,
struct vm_area_struct *vma, void *args)
{
struct hl_cb *cb = buf->private;
return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
cb->bus_address, cb->size);
}
static struct hl_mmap_mem_buf_behavior cb_behavior = {
.topic = "CB",
.mem_id = HL_MMAP_TYPE_CB,
.alloc = hl_cb_mmap_mem_alloc,
.release = hl_cb_mmap_mem_release,
.mmap = hl_cb_mmap,
};
int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
bool map_cb, u64 *handle)
{
struct hl_cb_mmap_mem_alloc_args args = {
.hdev = hdev,
.ctx = ctx,
.cb_size = cb_size,
.internal_cb = internal_cb,
.map_cb = map_cb,
};
struct hl_mmap_mem_buf *buf;
int ctx_id = ctx->asid;
if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
dev_warn_ratelimited(hdev->dev,
"Device is disabled or in reset. Can't create new CBs\n");
return -EBUSY;
}
if (cb_size > SZ_2M) {
dev_err(hdev->dev, "CB size %d must be less than %d\n",
cb_size, SZ_2M);
return -EINVAL;
}
buf = hl_mmap_mem_buf_alloc(
mmg, &cb_behavior,
ctx_id == HL_KERNEL_ASID_ID ? GFP_ATOMIC : GFP_KERNEL, &args);
if (!buf)
return -ENOMEM;
*handle = buf->handle;
return 0;
}
int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
{
struct hl_cb *cb;
int rc;
cb = hl_cb_get(mmg, cb_handle);
if (!cb) {
dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
cb_handle);
return -EINVAL;
}
/* Make sure that CB handle isn't destroyed more than once */
rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
hl_cb_put(cb);
if (rc) {
dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
cb_handle);
return -EINVAL;
}
rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
if (rc < 0)
return rc; /* Invalid handle */
if (rc == 0)
dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle);
return 0;
}
static int hl_cb_info(struct hl_mem_mgr *mmg,
u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
struct hl_cb *cb;
int rc = 0;
cb = hl_cb_get(mmg, handle);
if (!cb) {
dev_err(mmg->dev,
"CB info failed, no match to handle 0x%llx\n", handle);
return -EINVAL;
}
if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
if (cb->is_mmu_mapped) {
*device_va = cb->virtual_addr;
} else {
dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
rc = -EINVAL;
goto out;
}
} else {
*usage_cnt = atomic_read(&cb->cs_cnt);
}
out:
hl_cb_put(cb);
return rc;
}
int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cb_args *args = data;
struct hl_device *hdev = hpriv->hdev;
u64 handle = 0, device_va = 0;
enum hl_device_status status;
u32 usage_cnt = 0;
int rc;
if (!hl_device_operational(hdev, &status)) {
dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute CB IOCTL\n",
hdev->status[status]);
return -EBUSY;
}
switch (args->in.op) {
case HL_CB_OP_CREATE:
if (args->in.cb_size > HL_MAX_CB_SIZE) {
dev_err(hdev->dev,
"User requested CB size %d must be less than %d\n",
args->in.cb_size, HL_MAX_CB_SIZE);
rc = -EINVAL;
} else {
rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
args->in.cb_size, false,
!!(args->in.flags & HL_CB_FLAGS_MAP),
&handle);
}
memset(args, 0, sizeof(*args));
args->out.cb_handle = handle;
break;
case HL_CB_OP_DESTROY:
rc = hl_cb_destroy(&hpriv->mem_mgr,
args->in.cb_handle);
break;
case HL_CB_OP_INFO:
rc = hl_cb_info(&hpriv->mem_mgr, args->in.cb_handle,
args->in.flags,
&usage_cnt,
&device_va);
if (rc)
break;
memset(&args->out, 0, sizeof(args->out));
if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA)
args->out.device_va = device_va;
else
args->out.usage_cnt = usage_cnt;
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle)
{
struct hl_mmap_mem_buf *buf;
buf = hl_mmap_mem_buf_get(mmg, handle);
if (!buf)
return NULL;
return buf->private;
}
void hl_cb_put(struct hl_cb *cb)
{
hl_mmap_mem_buf_put(cb->buf);
}
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
bool internal_cb)
{
u64 cb_handle;
struct hl_cb *cb;
int rc;
rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
internal_cb, false, &cb_handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
return NULL;
}
cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
/* hl_cb_get should never fail here */
if (!cb) {
dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
(u32) cb_handle);
goto destroy_cb;
}
return cb;
destroy_cb:
hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
return NULL;
}
int hl_cb_pool_init(struct hl_device *hdev)
{
struct hl_cb *cb;
int i;
INIT_LIST_HEAD(&hdev->cb_pool);
spin_lock_init(&hdev->cb_pool_lock);
for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
HL_KERNEL_ASID_ID, false);
if (cb) {
cb->is_pool = true;
list_add(&cb->pool_list, &hdev->cb_pool);
} else {
hl_cb_pool_fini(hdev);
return -ENOMEM;
}
}
return 0;
}
int hl_cb_pool_fini(struct hl_device *hdev)
{
struct hl_cb *cb, *tmp;
list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
list_del(&cb->pool_list);
cb_fini(hdev, cb);
}
return 0;
}
int hl_cb_va_pool_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
if (!hdev->supports_cb_mapping)
return 0;
ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
if (!ctx->cb_va_pool) {
dev_err(hdev->dev,
"Failed to create VA gen pool for CB mapping\n");
return -ENOMEM;
}
ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
if (!ctx->cb_va_pool_base) {
rc = -ENOMEM;
goto err_pool_destroy;
}
rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to VA gen pool for CB mapping\n");
goto err_unreserve_va_block;
}
return 0;
err_unreserve_va_block:
hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
err_pool_destroy:
gen_pool_destroy(ctx->cb_va_pool);
return rc;
}
void hl_cb_va_pool_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
if (!hdev->supports_cb_mapping)
return;
gen_pool_destroy(ctx->cb_va_pool);
hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
}
| linux-master | drivers/accel/habanalabs/common/command_buffer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/uaccess.h>
#include <linux/slab.h>
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
#define MAX_TS_ITER_NUM 100
/**
* enum hl_cs_wait_status - cs wait status
* @CS_WAIT_STATUS_BUSY: cs was not completed yet
* @CS_WAIT_STATUS_COMPLETED: cs completed
* @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
*/
enum hl_cs_wait_status {
CS_WAIT_STATUS_BUSY,
CS_WAIT_STATUS_COMPLETED,
CS_WAIT_STATUS_GONE
};
static void job_wq_completion(struct work_struct *work);
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp);
static void cs_do_release(struct kref *ref);
static void hl_push_cs_outcome(struct hl_device *hdev,
struct hl_cs_outcome_store *outcome_store,
u64 seq, ktime_t ts, int error)
{
struct hl_cs_outcome *node;
unsigned long flags;
/*
* CS outcome store supports the following operations:
* push outcome - store a recent CS outcome in the store
* pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
* It uses 2 lists: used list and free list.
* It has a pre-allocated amount of nodes, each node stores
* a single CS outcome.
* Initially, all the nodes are in the free list.
* On push outcome, a node (any) is taken from the free list, its
* information is filled in, and the node is moved to the used list.
* It is possible, that there are no nodes left in the free list.
* In this case, we will lose some information about old outcomes. We
* will pop the OLDEST node from the used list, and make it free.
* On pop, the node is searched for in the used list (using a search
* index).
* If found, the node is then removed from the used list, and moved
* back to the free list. The outcome data that the node contained is
* returned back to the user.
*/
spin_lock_irqsave(&outcome_store->db_lock, flags);
if (list_empty(&outcome_store->free_list)) {
node = list_last_entry(&outcome_store->used_list,
struct hl_cs_outcome, list_link);
hash_del(&node->map_link);
dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
} else {
node = list_last_entry(&outcome_store->free_list,
struct hl_cs_outcome, list_link);
}
list_del_init(&node->list_link);
node->seq = seq;
node->ts = ts;
node->error = error;
list_add(&node->list_link, &outcome_store->used_list);
hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
spin_unlock_irqrestore(&outcome_store->db_lock, flags);
}
static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
u64 seq, ktime_t *ts, int *error)
{
struct hl_cs_outcome *node;
unsigned long flags;
spin_lock_irqsave(&outcome_store->db_lock, flags);
hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
if (node->seq == seq) {
*ts = node->ts;
*error = node->error;
hash_del(&node->map_link);
list_del_init(&node->list_link);
list_add(&node->list_link, &outcome_store->free_list);
spin_unlock_irqrestore(&outcome_store->db_lock, flags);
return true;
}
spin_unlock_irqrestore(&outcome_store->db_lock, flags);
return false;
}
static void hl_sob_reset(struct kref *ref)
{
struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
kref);
struct hl_device *hdev = hw_sob->hdev;
dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
hdev->asic_funcs->reset_sob(hdev, hw_sob);
hw_sob->need_reset = false;
}
void hl_sob_reset_error(struct kref *ref)
{
struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
kref);
struct hl_device *hdev = hw_sob->hdev;
dev_crit(hdev->dev,
"SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
hw_sob->q_idx, hw_sob->sob_id);
}
void hw_sob_put(struct hl_hw_sob *hw_sob)
{
if (hw_sob)
kref_put(&hw_sob->kref, hl_sob_reset);
}
static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
{
if (hw_sob)
kref_put(&hw_sob->kref, hl_sob_reset_error);
}
void hw_sob_get(struct hl_hw_sob *hw_sob)
{
if (hw_sob)
kref_get(&hw_sob->kref);
}
/**
* hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
* @sob_base: sob base id
* @sob_mask: sob user mask, each bit represents a sob offset from sob base
* @mask: generated mask
*
* Return: 0 if given parameters are valid
*/
int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
{
int i;
if (sob_mask == 0)
return -EINVAL;
if (sob_mask == 0x1) {
*mask = ~(1 << (sob_base & 0x7));
} else {
/* find msb in order to verify sob range is valid */
for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
if (BIT(i) & sob_mask)
break;
if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
return -EINVAL;
*mask = ~sob_mask;
}
return 0;
}
static void hl_fence_release(struct kref *kref)
{
struct hl_fence *fence =
container_of(kref, struct hl_fence, refcount);
struct hl_cs_compl *hl_cs_cmpl =
container_of(fence, struct hl_cs_compl, base_fence);
kfree(hl_cs_cmpl);
}
void hl_fence_put(struct hl_fence *fence)
{
if (IS_ERR_OR_NULL(fence))
return;
kref_put(&fence->refcount, hl_fence_release);
}
void hl_fences_put(struct hl_fence **fence, int len)
{
int i;
for (i = 0; i < len; i++, fence++)
hl_fence_put(*fence);
}
void hl_fence_get(struct hl_fence *fence)
{
if (fence)
kref_get(&fence->refcount);
}
static void hl_fence_init(struct hl_fence *fence, u64 sequence)
{
kref_init(&fence->refcount);
fence->cs_sequence = sequence;
fence->error = 0;
fence->timestamp = ktime_set(0, 0);
fence->mcs_handling_done = false;
init_completion(&fence->completion);
}
void cs_get(struct hl_cs *cs)
{
kref_get(&cs->refcount);
}
static int cs_get_unless_zero(struct hl_cs *cs)
{
return kref_get_unless_zero(&cs->refcount);
}
static void cs_put(struct hl_cs *cs)
{
kref_put(&cs->refcount, cs_do_release);
}
static void cs_job_do_release(struct kref *ref)
{
struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
kfree(job);
}
static void hl_cs_job_put(struct hl_cs_job *job)
{
kref_put(&job->refcount, cs_job_do_release);
}
bool cs_needs_completion(struct hl_cs *cs)
{
/* In case this is a staged CS, only the last CS in sequence should
* get a completion, any non staged CS will always get a completion
*/
if (cs->staged_cs && !cs->staged_last)
return false;
return true;
}
bool cs_needs_timeout(struct hl_cs *cs)
{
/* In case this is a staged CS, only the first CS in sequence should
* get a timeout, any non staged CS will always get a timeout
*/
if (cs->staged_cs && !cs->staged_first)
return false;
return true;
}
static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
{
/* Patched CB is created for external queues jobs */
return (job->queue_type == QUEUE_TYPE_EXT);
}
/*
* cs_parser - parse the user command submission
*
* @hpriv : pointer to the private data of the fd
* @job : pointer to the job that holds the command submission info
*
* The function parses the command submission of the user. It calls the
* ASIC specific parser, which returns a list of memory blocks to send
* to the device as different command buffers
*
*/
static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_parser parser;
int rc;
parser.ctx_id = job->cs->ctx->asid;
parser.cs_sequence = job->cs->sequence;
parser.job_id = job->id;
parser.hw_queue_id = job->hw_queue_id;
parser.job_userptr_list = &job->userptr_list;
parser.patched_cb = NULL;
parser.user_cb = job->user_cb;
parser.user_cb_size = job->user_cb_size;
parser.queue_type = job->queue_type;
parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
parser.completion = cs_needs_completion(job->cs);
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
if (is_cb_patched(hdev, job)) {
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
job->contains_dma_pkt = parser.contains_dma_pkt;
atomic_inc(&job->patched_cb->cs_cnt);
}
/*
* Whether the parsing worked or not, we don't need the
* original CB anymore because it was already parsed and
* won't be accessed again for this CS
*/
atomic_dec(&job->user_cb->cs_cnt);
hl_cb_put(job->user_cb);
job->user_cb = NULL;
} else if (!rc) {
job->job_cb_size = job->user_cb_size;
}
return rc;
}
static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
if (is_cb_patched(hdev, job)) {
hl_userptr_delete_list(hdev, &job->userptr_list);
/*
* We might arrive here from rollback and patched CB wasn't
* created, so we need to check it's not NULL
*/
if (job->patched_cb) {
atomic_dec(&job->patched_cb->cs_cnt);
hl_cb_put(job->patched_cb);
}
}
/* For H/W queue jobs, if a user CB was allocated by driver,
* the user CB isn't released in cs_parser() and thus should be
* released here. This is also true for INT queues jobs which were
* allocated by driver.
*/
if (job->is_kernel_allocated_cb &&
(job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) {
atomic_dec(&job->user_cb->cs_cnt);
hl_cb_put(job->user_cb);
}
/*
* This is the only place where there can be multiple threads
* modifying the list at the same time
*/
spin_lock(&cs->job_lock);
list_del(&job->cs_node);
spin_unlock(&cs->job_lock);
hl_debugfs_remove_job(hdev, job);
/* We decrement reference only for a CS that gets completion
* because the reference was incremented only for this kind of CS
* right before it was scheduled.
*
* In staged submission, only the last CS marked as 'staged_last'
* gets completion, hence its release function will be called from here.
* As for all the rest CS's in the staged submission which do not get
* completion, their CS reference will be decremented by the
* 'staged_last' CS during the CS release flow.
* All relevant PQ CI counters will be incremented during the CS release
* flow by calling 'hl_hw_queue_update_ci'.
*/
if (cs_needs_completion(cs) &&
(job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
/* In CS based completions, the timestamp is already available,
* so no need to extract it from job
*/
if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
cs->completion_timestamp = job->timestamp;
cs_put(cs);
}
hl_cs_job_put(job);
}
/*
* hl_staged_cs_find_first - locate the first CS in this staged submission
*
* @hdev: pointer to device structure
* @cs_seq: staged submission sequence number
*
* @note: This function must be called under 'hdev->cs_mirror_lock'
*
* Find and return a CS pointer with the given sequence
*/
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
{
struct hl_cs *cs;
list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
if (cs->staged_cs && cs->staged_first &&
cs->sequence == cs_seq)
return cs;
return NULL;
}
/*
* is_staged_cs_last_exists - returns true if the last CS in sequence exists
*
* @hdev: pointer to device structure
* @cs: staged submission member
*
*/
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_cs *last_entry;
last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
staged_cs_node);
if (last_entry->staged_last)
return true;
return false;
}
/*
* staged_cs_get - get CS reference if this CS is a part of a staged CS
*
* @hdev: pointer to device structure
* @cs: current CS
* @cs_seq: staged submission sequence number
*
* Increment CS reference for every CS in this staged submission except for
* the CS which get completion.
*/
static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
{
/* Only the last CS in this staged submission will get a completion.
* We must increment the reference for all other CS's in this
* staged submission.
* Once we get a completion we will release the whole staged submission.
*/
if (!cs->staged_last)
cs_get(cs);
}
/*
* staged_cs_put - put a CS in case it is part of staged submission
*
* @hdev: pointer to device structure
* @cs: CS to put
*
* This function decrements a CS reference (for a non completion CS)
*/
static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
{
/* We release all CS's in a staged submission except the last
* CS which we have never incremented its reference.
*/
if (!cs_needs_completion(cs))
cs_put(cs);
}
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_cs *next = NULL, *iter, *first_cs;
if (!cs_needs_timeout(cs))
return;
spin_lock(&hdev->cs_mirror_lock);
/* We need to handle tdr only once for the complete staged submission.
* Hence, we choose the CS that reaches this function first which is
* the CS marked as 'staged_last'.
* In case single staged cs was submitted which has both first and last
* indications, then "cs_find_first" below will return NULL, since we
* removed the cs node from the list before getting here,
* in such cases just continue with the cs to cancel it's TDR work.
*/
if (cs->staged_cs && cs->staged_last) {
first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
if (first_cs)
cs = first_cs;
}
spin_unlock(&hdev->cs_mirror_lock);
/* Don't cancel TDR in case this CS was timedout because we might be
* running from the TDR context
*/
if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
return;
if (cs->tdr_active)
cancel_delayed_work_sync(&cs->work_tdr);
spin_lock(&hdev->cs_mirror_lock);
/* queue TDR for next CS */
list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
if (cs_needs_timeout(iter)) {
next = iter;
break;
}
if (next && !next->tdr_active) {
next->tdr_active = true;
schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
}
spin_unlock(&hdev->cs_mirror_lock);
}
/*
* force_complete_multi_cs - complete all contexts that wait on multi-CS
*
* @hdev: pointer to habanalabs device structure
*/
static void force_complete_multi_cs(struct hl_device *hdev)
{
int i;
for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
struct multi_cs_completion *mcs_compl;
mcs_compl = &hdev->multi_cs_completion[i];
spin_lock(&mcs_compl->lock);
if (!mcs_compl->used) {
spin_unlock(&mcs_compl->lock);
continue;
}
/* when calling force complete no context should be waiting on
* multi-cS.
* We are calling the function as a protection for such case
* to free any pending context and print error message
*/
dev_err(hdev->dev,
"multi-CS completion context %d still waiting when calling force completion\n",
i);
complete_all(&mcs_compl->completion);
spin_unlock(&mcs_compl->lock);
}
}
/*
* complete_multi_cs - complete all waiting entities on multi-CS
*
* @hdev: pointer to habanalabs device structure
* @cs: CS structure
* The function signals a waiting entity that has an overlapping stream masters
* with the completed CS.
* For example:
* - a completed CS worked on stream master QID 4, multi CS completion
* is actively waiting on stream master QIDs 3, 5. don't send signal as no
* common stream master QID
* - a completed CS worked on stream master QID 4, multi CS completion
* is actively waiting on stream master QIDs 3, 4. send signal as stream
* master QID 4 is common
*/
static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_fence *fence = cs->fence;
int i;
/* in case of multi CS check for completion only for the first CS */
if (cs->staged_cs && !cs->staged_first)
return;
for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
struct multi_cs_completion *mcs_compl;
mcs_compl = &hdev->multi_cs_completion[i];
if (!mcs_compl->used)
continue;
spin_lock(&mcs_compl->lock);
/*
* complete if:
* 1. still waiting for completion
* 2. the completed CS has at least one overlapping stream
* master with the stream masters in the completion
*/
if (mcs_compl->used &&
(fence->stream_master_qid_map &
mcs_compl->stream_master_qid_map)) {
/* extract the timestamp only of first completed CS */
if (!mcs_compl->timestamp)
mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
complete_all(&mcs_compl->completion);
/*
* Setting mcs_handling_done inside the lock ensures
* at least one fence have mcs_handling_done set to
* true before wait for mcs finish. This ensures at
* least one CS will be set as completed when polling
* mcs fences.
*/
fence->mcs_handling_done = true;
}
spin_unlock(&mcs_compl->lock);
}
/* In case CS completed without mcs completion initialized */
fence->mcs_handling_done = true;
}
static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
struct hl_cs *cs,
struct hl_cs_compl *hl_cs_cmpl)
{
/* Skip this handler if the cs wasn't submitted, to avoid putting
* the hw_sob twice, since this case already handled at this point,
* also skip if the hw_sob pointer wasn't set.
*/
if (!hl_cs_cmpl->hw_sob || !cs->submitted)
return;
spin_lock(&hl_cs_cmpl->lock);
/*
* we get refcount upon reservation of signals or signal/wait cs for the
* hw_sob object, and need to put it when the first staged cs
* (which contains the encaps signals) or cs signal/wait is completed.
*/
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
(hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
(!!hl_cs_cmpl->encaps_signals)) {
dev_dbg(hdev->dev,
"CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
hl_cs_cmpl->cs_seq,
hl_cs_cmpl->type,
hl_cs_cmpl->hw_sob->sob_id,
hl_cs_cmpl->sob_val);
hw_sob_put(hl_cs_cmpl->hw_sob);
if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
hdev->asic_funcs->reset_sob_group(hdev,
hl_cs_cmpl->sob_group);
}
spin_unlock(&hl_cs_cmpl->lock);
}
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
struct hl_device *hdev = cs->ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_cs_compl *hl_cs_cmpl =
container_of(cs->fence, struct hl_cs_compl, base_fence);
cs->completed = true;
/*
* Although if we reached here it means that all external jobs have
* finished, because each one of them took refcnt to CS, we still
* need to go over the internal jobs and complete them. Otherwise, we
* will have leaked memory and what's worse, the CS object (and
* potentially the CTX object) could be released, while the JOB
* still holds a pointer to them (but no reference).
*/
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
hl_complete_job(hdev, job);
if (!cs->submitted) {
/*
* In case the wait for signal CS was submitted, the fence put
* occurs in init_signal_wait_cs() or collective_wait_init_cs()
* right before hanging on the PQ.
*/
if (cs->type == CS_TYPE_WAIT ||
cs->type == CS_TYPE_COLLECTIVE_WAIT)
hl_fence_put(cs->signal_fence);
goto out;
}
/* Need to update CI for all queue jobs that does not get completion */
hl_hw_queue_update_ci(cs);
/* remove CS from CS mirror list */
spin_lock(&hdev->cs_mirror_lock);
list_del_init(&cs->mirror_node);
spin_unlock(&hdev->cs_mirror_lock);
cs_handle_tdr(hdev, cs);
if (cs->staged_cs) {
/* the completion CS decrements reference for the entire
* staged submission
*/
if (cs->staged_last) {
struct hl_cs *staged_cs, *tmp_cs;
list_for_each_entry_safe(staged_cs, tmp_cs,
&cs->staged_cs_node, staged_cs_node)
staged_cs_put(hdev, staged_cs);
}
/* A staged CS will be a member in the list only after it
* was submitted. We used 'cs_mirror_lock' when inserting
* it to list so we will use it again when removing it
*/
if (cs->submitted) {
spin_lock(&hdev->cs_mirror_lock);
list_del(&cs->staged_cs_node);
spin_unlock(&hdev->cs_mirror_lock);
}
/* decrement refcount to handle when first staged cs
* with encaps signals is completed.
*/
if (hl_cs_cmpl->encaps_signals)
kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
hl_encaps_release_handle_and_put_ctx);
}
if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
out:
/* Must be called before hl_ctx_put because inside we use ctx to get
* the device
*/
hl_debugfs_remove_cs(cs);
hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
/* We need to mark an error for not submitted because in that case
* the hl fence release flow is different. Mainly, we don't need
* to handle hw_sob for signal/wait
*/
if (cs->timedout)
cs->fence->error = -ETIMEDOUT;
else if (cs->aborted)
cs->fence->error = -EIO;
else if (!cs->submitted)
cs->fence->error = -EBUSY;
if (unlikely(cs->skip_reset_on_timeout)) {
dev_err(hdev->dev,
"Command submission %llu completed after %llu (s)\n",
cs->sequence,
div_u64(jiffies - cs->submission_time_jiffies, HZ));
}
if (cs->timestamp) {
cs->fence->timestamp = cs->completion_timestamp;
hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
cs->fence->timestamp, cs->fence->error);
}
hl_ctx_put(cs->ctx);
complete_all(&cs->fence->completion);
complete_multi_cs(hdev, cs);
cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
hl_fence_put(cs->fence);
kfree(cs->jobs_in_queue_cnt);
kfree(cs);
}
static void cs_timedout(struct work_struct *work)
{
struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work);
bool skip_reset_on_timeout, device_reset = false;
struct hl_device *hdev;
u64 event_mask = 0x0;
uint timeout_sec;
int rc;
skip_reset_on_timeout = cs->skip_reset_on_timeout;
rc = cs_get_unless_zero(cs);
if (!rc)
return;
if ((!cs->submitted) || (cs->completed)) {
cs_put(cs);
return;
}
hdev = cs->ctx->hdev;
if (likely(!skip_reset_on_timeout)) {
if (hdev->reset_on_lockup)
device_reset = true;
else
hdev->reset_info.needs_reset = true;
/* Mark the CS is timed out so we won't try to cancel its TDR */
cs->timedout = true;
}
/* Save only the first CS timeout parameters */
rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
if (rc) {
hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
hdev->captured_err_info.cs_timeout.seq = cs->sequence;
event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
}
timeout_sec = jiffies_to_msecs(hdev->timeout_jiffies) / 1000;
switch (cs->type) {
case CS_TYPE_SIGNAL:
dev_err(hdev->dev,
"Signal command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec);
break;
case CS_TYPE_WAIT:
dev_err(hdev->dev,
"Wait command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec);
break;
case CS_TYPE_COLLECTIVE_WAIT:
dev_err(hdev->dev,
"Collective Wait command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec);
break;
default:
dev_err(hdev->dev,
"Command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec);
break;
}
rc = hl_state_dump(hdev);
if (rc)
dev_err(hdev->dev, "Error during system state dump %d\n", rc);
cs_put(cs);
if (device_reset) {
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
} else if (event_mask) {
hl_notifier_event_send_all(hdev, event_mask);
}
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_cs_type cs_type, u64 user_sequence,
struct hl_cs **cs_new, u32 flags, u32 timeout)
{
struct hl_cs_counters_atomic *cntr;
struct hl_fence *other = NULL;
struct hl_cs_compl *cs_cmpl;
struct hl_cs *cs;
int rc;
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
if (!cs)
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
}
/* increment refcnt for context */
hl_ctx_get(ctx);
cs->ctx = ctx;
cs->submitted = false;
cs->completed = false;
cs->type = cs_type;
cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cs->timeout_jiffies = timeout;
cs->skip_reset_on_timeout =
hdev->reset_info.skip_reset_on_timeout ||
!!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
cs->submission_time_jiffies = jiffies;
INIT_LIST_HEAD(&cs->job_list);
INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl)
cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
if (!cs_cmpl) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt)
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
if (!cs->jobs_in_queue_cnt) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs_cmpl;
}
cs_cmpl->hdev = hdev;
cs_cmpl->type = cs->type;
spin_lock_init(&cs_cmpl->lock);
cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
cs_cmpl->cs_seq = ctx->cs_sequence;
other = ctx->cs_pending[cs_cmpl->cs_seq &
(hdev->asic_prop.max_pending_cs - 1)];
if (other && !completion_done(&other->completion)) {
/* If the following statement is true, it means we have reached
* a point in which only part of the staged submission was
* submitted and we don't have enough room in the 'cs_pending'
* array for the rest of the submission.
* This causes a deadlock because this CS will never be
* completed as it depends on future CS's for completion.
*/
if (other->cs_sequence == user_sequence)
dev_crit_ratelimited(hdev->dev,
"Staged CS %llu deadlock due to lack of resources",
user_sequence);
dev_dbg_ratelimited(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
rc = -EAGAIN;
goto free_fence;
}
/* init hl_fence */
hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
cs->sequence = cs_cmpl->cs_seq;
ctx->cs_pending[cs_cmpl->cs_seq &
(hdev->asic_prop.max_pending_cs - 1)] =
&cs_cmpl->base_fence;
ctx->cs_sequence++;
hl_fence_get(&cs_cmpl->base_fence);
hl_fence_put(other);
spin_unlock(&ctx->cs_lock);
*cs_new = cs;
return 0;
free_fence:
spin_unlock(&ctx->cs_lock);
kfree(cs->jobs_in_queue_cnt);
free_cs_cmpl:
kfree(cs_cmpl);
free_cs:
kfree(cs);
hl_ctx_put(ctx);
return rc;
}
static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
{
struct hl_cs_job *job, *tmp;
staged_cs_put(hdev, cs);
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
hl_complete_job(hdev, job);
}
/*
* release_reserved_encaps_signals() - release reserved encapsulated signals.
* @hdev: pointer to habanalabs device structure
*
* Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
* encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
* For these signals need also to put the refcount of the H/W SOB which was taken at the
* reservation.
*/
static void release_reserved_encaps_signals(struct hl_device *hdev)
{
struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
struct hl_cs_encaps_sig_handle *handle;
struct hl_encaps_signals_mgr *mgr;
u32 id;
if (!ctx)
return;
mgr = &ctx->sig_mgr;
idr_for_each_entry(&mgr->handles, handle, id)
if (handle->cs_seq == ULLONG_MAX)
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
hl_ctx_put(ctx);
}
void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
{
int i;
struct hl_cs *cs, *tmp;
if (!skip_wq_flush) {
flush_workqueue(hdev->ts_free_obj_wq);
/* flush all completions before iterating over the CS mirror list in
* order to avoid a race with the release functions
*/
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
flush_workqueue(hdev->cs_cmplt_wq);
}
/* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
cs->ctx->asid, cs->sequence);
cs_rollback(hdev, cs);
cs_put(cs);
}
force_complete_multi_cs(hdev);
release_reserved_encaps_signals(hdev);
}
static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend, *temp;
spin_lock(&interrupt->wait_list_lock);
list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) {
if (pend->ts_reg_info.buf) {
list_del(&pend->wait_list_node);
hl_mmap_mem_buf_put(pend->ts_reg_info.buf);
hl_cb_put(pend->ts_reg_info.cq_cb);
} else {
pend->fence.error = -EIO;
complete_all(&pend->fence.completion);
}
}
spin_unlock(&interrupt->wait_list_lock);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_user_interrupt *interrupt;
int i;
if (!prop->user_interrupt_count)
return;
/* We iterate through the user interrupt requests and waking up all
* user threads waiting for interrupt completion. We iterate the
* list under a lock, this is why all user threads, once awake,
* will wait on the same lock and will release the waiting object upon
* unlock.
*/
for (i = 0 ; i < prop->user_interrupt_count ; i++) {
interrupt = &hdev->user_interrupt[i];
wake_pending_user_interrupt_threads(interrupt);
}
interrupt = &hdev->common_user_cq_interrupt;
wake_pending_user_interrupt_threads(interrupt);
interrupt = &hdev->common_decoder_interrupt;
wake_pending_user_interrupt_threads(interrupt);
}
static void force_complete_cs(struct hl_device *hdev)
{
struct hl_cs *cs;
spin_lock(&hdev->cs_mirror_lock);
list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
cs->fence->error = -EIO;
complete_all(&cs->fence->completion);
}
spin_unlock(&hdev->cs_mirror_lock);
}
void hl_abort_waiting_for_cs_completions(struct hl_device *hdev)
{
force_complete_cs(hdev);
force_complete_multi_cs(hdev);
}
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
finish_work);
struct hl_cs *cs = job->cs;
struct hl_device *hdev = cs->ctx->hdev;
/* job is no longer needed */
hl_complete_job(hdev, job);
}
static void cs_completion(struct work_struct *work)
{
struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
struct hl_device *hdev = cs->ctx->hdev;
struct hl_cs_job *job, *tmp;
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
hl_complete_job(hdev, job);
}
u32 hl_get_active_cs_num(struct hl_device *hdev)
{
u32 active_cs_num = 0;
struct hl_cs *cs;
spin_lock(&hdev->cs_mirror_lock);
list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node)
if (!cs->completed)
active_cs_num++;
spin_unlock(&hdev->cs_mirror_lock);
return active_cs_num;
}
static int validate_queue_index(struct hl_device *hdev,
struct hl_cs_chunk *chunk,
enum hl_queue_type *queue_type,
bool *is_kernel_allocated_cb)
{
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
/* This must be checked here to prevent out-of-bounds access to
* hw_queues_props array
*/
if (chunk->queue_index >= asic->max_queues) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
}
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
dev_err(hdev->dev, "Queue index %d is not applicable\n",
chunk->queue_index);
return -EINVAL;
}
if (hw_queue_prop->binned) {
dev_err(hdev->dev, "Queue index %d is binned out\n",
chunk->queue_index);
return -EINVAL;
}
if (hw_queue_prop->driver_only) {
dev_err(hdev->dev,
"Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
return -EINVAL;
}
/* When hw queue type isn't QUEUE_TYPE_HW,
* USER_ALLOC_CB flag shall be referred as "don't care".
*/
if (hw_queue_prop->type == QUEUE_TYPE_HW) {
if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
dev_err(hdev->dev,
"Queue index %d doesn't support user CB\n",
chunk->queue_index);
return -EINVAL;
}
*is_kernel_allocated_cb = false;
} else {
if (!(hw_queue_prop->cb_alloc_flags &
CB_ALLOC_KERNEL)) {
dev_err(hdev->dev,
"Queue index %d doesn't support kernel CB\n",
chunk->queue_index);
return -EINVAL;
}
*is_kernel_allocated_cb = true;
}
} else {
*is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
& CB_ALLOC_KERNEL);
}
*queue_type = hw_queue_prop->type;
return 0;
}
static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
struct hl_mem_mgr *mmg,
struct hl_cs_chunk *chunk)
{
struct hl_cb *cb;
cb = hl_cb_get(mmg, chunk->cb_handle);
if (!cb) {
dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle);
return NULL;
}
if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
goto release_cb;
}
atomic_inc(&cb->cs_cnt);
return cb;
release_cb:
hl_cb_put(cb);
return NULL;
}
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
{
struct hl_cs_job *job;
job = kzalloc(sizeof(*job), GFP_ATOMIC);
if (!job)
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
return NULL;
kref_init(&job->refcount);
job->queue_type = queue_type;
job->is_kernel_allocated_cb = is_kernel_allocated_cb;
if (is_cb_patched(hdev, job))
INIT_LIST_HEAD(&job->userptr_list);
if (job->queue_type == QUEUE_TYPE_EXT)
INIT_WORK(&job->finish_work, job_wq_completion);
return job;
}
static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
{
if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
return CS_TYPE_SIGNAL;
else if (cs_type_flags & HL_CS_FLAGS_WAIT)
return CS_TYPE_WAIT;
else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
return CS_TYPE_COLLECTIVE_WAIT;
else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
return CS_RESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
return CS_UNRESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
return CS_TYPE_ENGINE_CORE;
else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND)
return CS_TYPE_ENGINES;
else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
return CS_TYPE_FLUSH_PCI_HBW_WRITES;
else
return CS_TYPE_DEFAULT;
}
static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
u32 cs_type_flags, num_chunks;
enum hl_device_status status;
enum hl_cs_type cs_type;
bool is_sync_stream;
int i;
for (i = 0 ; i < sizeof(args->in.pad) ; i++)
if (args->in.pad[i]) {
dev_dbg(hdev->dev, "Padding bytes must be 0\n");
return -EINVAL;
}
if (!hl_device_operational(hdev, &status)) {
return -EBUSY;
}
if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
!hdev->supports_staged_submission) {
dev_err(hdev->dev, "staged submission not supported");
return -EPERM;
}
cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
dev_err(hdev->dev,
"CS type flags are mutually exclusive, context %d\n",
ctx->asid);
return -EINVAL;
}
cs_type = hl_cs_get_cs_type(cs_type_flags);
num_chunks = args->in.num_chunks_execute;
is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
cs_type == CS_TYPE_COLLECTIVE_WAIT);
if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
dev_err(hdev->dev, "Sync stream CS is not supported\n");
return -EINVAL;
}
if (cs_type == CS_TYPE_DEFAULT) {
if (!num_chunks) {
dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
return -EINVAL;
}
} else if (is_sync_stream && num_chunks != 1) {
dev_err(hdev->dev,
"Sync stream CS mandates one chunk only, context %d\n",
ctx->asid);
return -EINVAL;
}
return 0;
}
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
void __user *chunks, u32 num_chunks,
struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
return -EINVAL;
}
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
if (!*cs_chunk_array)
*cs_chunk_array = kmalloc_array(num_chunks,
sizeof(**cs_chunk_array), GFP_KERNEL);
if (!*cs_chunk_array) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
}
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
}
return 0;
}
static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
u64 sequence, u32 flags,
u32 encaps_signal_handle)
{
if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
return 0;
cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
if (cs->staged_first) {
/* Staged CS sequence is the first CS sequence */
INIT_LIST_HEAD(&cs->staged_cs_node);
cs->staged_sequence = cs->sequence;
if (cs->encaps_signals)
cs->encaps_sig_hdl_id = encaps_signal_handle;
} else {
/* User sequence will be validated in 'hl_hw_queue_schedule_cs'
* under the cs_mirror_lock
*/
cs->staged_sequence = sequence;
}
/* Increment CS reference if needed */
staged_cs_get(hdev, cs);
cs->staged_cs = true;
return 0;
}
static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
{
int i;
for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
if (qid == hdev->stream_master_qid_arr[i])
return BIT(i);
return 0;
}
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 num_chunks, u64 *cs_seq, u32 flags,
u32 encaps_signals_handle, u32 timeout,
u16 *signal_initial_sob_count)
{
bool staged_mid, int_queues_only = true, using_hw_queues = false;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
u64 user_sequence;
u8 stream_master_qid_map = 0;
int rc, i;
cntr = &hdev->aggregated_cs_counters;
user_sequence = *cs_seq;
*cs_seq = ULLONG_MAX;
rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
hpriv->ctx);
if (rc)
goto out;
if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
staged_mid = true;
else
staged_mid = false;
rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
timeout);
if (rc)
goto free_cs_chunk_array;
*cs_seq = cs->sequence;
hl_debugfs_add_cs(cs);
rc = cs_staged_submission(hdev, cs, user_sequence, flags,
encaps_signals_handle);
if (rc)
goto free_cs_object;
/* If this is a staged submission we must return the staged sequence
* rather than the internal CS sequence
*/
if (cs->staged_cs)
*cs_seq = cs->staged_sequence;
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
enum hl_queue_type queue_type;
bool is_kernel_allocated_cb;
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
if (is_kernel_allocated_cb) {
cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk);
if (!cb) {
atomic64_inc(
&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
} else {
cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
if (queue_type == QUEUE_TYPE_EXT ||
queue_type == QUEUE_TYPE_HW) {
int_queues_only = false;
/*
* store which stream are being used for external/HW
* queues of this CS
*/
if (hdev->supports_wait_for_multi_cs)
stream_master_qid_map |=
get_stream_master_qid_mask(hdev,
chunk->queue_index);
}
if (queue_type == QUEUE_TYPE_HW)
using_hw_queues = true;
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
if (is_kernel_allocated_cb)
goto release_cb;
goto free_cs_object;
}
job->id = i + 1;
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
job->hw_queue_id = chunk->queue_index;
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
cs->jobs_cnt++;
list_add_tail(&job->cs_node, &cs->job_list);
/*
* Increment CS reference. When CS reference is 0, CS is
* done and can be signaled to user and free all its resources
* Only increment for JOB on external or H/W queues, because
* only for those JOBs we get completion
*/
if (cs_needs_completion(cs) &&
(job->queue_type == QUEUE_TYPE_EXT ||
job->queue_type == QUEUE_TYPE_HW))
cs_get(cs);
hl_debugfs_add_job(hdev, job);
rc = cs_parser(hpriv, job);
if (rc) {
atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
cs->ctx->asid, cs->sequence, job->id, rc);
goto free_cs_object;
}
}
/* We allow a CS with any queue type combination as long as it does
* not get a completion
*/
if (int_queues_only && cs_needs_completion(cs)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
}
if (using_hw_queues)
INIT_WORK(&cs->finish_work, cs_completion);
/*
* store the (external/HW queues) streams used by the CS in the
* fence object for multi-CS completion
*/
if (hdev->supports_wait_for_multi_cs)
cs->fence->stream_master_qid_map = stream_master_qid_map;
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to submit CS %d.%llu to H/W queues, error %d\n",
cs->ctx->asid, cs->sequence, rc);
goto free_cs_object;
}
*signal_initial_sob_count = cs->initial_sob_count;
rc = HL_CS_STATUS_SUCCESS;
goto put_cs;
release_cb:
atomic_dec(&cb->cs_cnt);
hl_cb_put(cb);
free_cs_object:
cs_rollback(hdev, cs);
*cs_seq = ULLONG_MAX;
/* The path below is both for good and erroneous exits */
put_cs:
/* We finished with the CS in this function, so put the ref */
cs_put(cs);
free_cs_chunk_array:
kfree(cs_chunk_array);
out:
return rc;
}
static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
u64 *cs_seq)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
bool need_soft_reset = false;
int rc = 0, do_ctx_switch = 0;
void __user *chunks;
u32 num_chunks, tmp;
u16 sob_count;
int ret;
if (hdev->supports_ctx_switch)
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
mutex_lock(&hpriv->restore_phase_mutex);
if (do_ctx_switch) {
rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
if (rc) {
dev_err_ratelimited(hdev->dev,
"Failed to switch to context %d, rejecting CS! %d\n",
ctx->asid, rc);
/*
* If we timedout, or if the device is not IDLE
* while we want to do context-switch (-EBUSY),
* we need to soft-reset because QMAN is
* probably stuck. However, we can't call to
* reset here directly because of deadlock, so
* need to do it at the very end of this
* function
*/
if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
need_soft_reset = true;
mutex_unlock(&hpriv->restore_phase_mutex);
goto out;
}
}
hdev->asic_funcs->restore_phase_topology(hdev);
chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
num_chunks = args->in.num_chunks_restore;
if (!num_chunks) {
dev_dbg(hdev->dev,
"Need to run restore phase but restore CS is empty\n");
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
}
mutex_unlock(&hpriv->restore_phase_mutex);
if (rc) {
dev_err(hdev->dev,
"Failed to submit restore CS for context %d (%d)\n",
ctx->asid, rc);
goto out;
}
/* Need to wait for restore completion before execution phase */
if (num_chunks) {
enum hl_cs_wait_status status;
wait_again:
ret = _hl_cs_wait_ioctl(hdev, ctx,
jiffies_to_usecs(hdev->timeout_jiffies),
*cs_seq, &status, NULL);
if (ret) {
if (ret == -ERESTARTSYS) {
usleep_range(100, 200);
goto wait_again;
}
dev_err(hdev->dev,
"Restore CS for context %d failed to complete %d\n",
ctx->asid, ret);
rc = -ENOEXEC;
goto out;
}
}
if (hdev->supports_ctx_switch)
ctx->thread_ctx_switch_wait_token = 1;
} else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
rc = hl_poll_timeout_memory(hdev,
&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
100, jiffies_to_usecs(hdev->timeout_jiffies), false);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev,
"context switch phase timeout (%d)\n", tmp);
goto out;
}
}
out:
if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
hl_device_reset(hdev, 0);
return rc;
}
/*
* hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
* if the SOB value reaches the max value move to the other SOB reserved
* to the queue.
* @hdev: pointer to device structure
* @q_idx: stream queue index
* @hw_sob: the H/W SOB used in this signal CS.
* @count: signals count
* @encaps_sig: tells whether it's reservation for encaps signals or not.
*
* Note that this function must be called while hw_queues_lock is taken.
*/
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
{
struct hl_sync_stream_properties *prop;
struct hl_hw_sob *sob = *hw_sob, *other_sob;
u8 other_sob_offset;
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
hw_sob_get(sob);
/* check for wraparound */
if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
/*
* Decrement as we reached the max value.
* The release function won't be called here as we've
* just incremented the refcount right before calling this
* function.
*/
hw_sob_put_err(sob);
/*
* check the other sob value, if it still in use then fail
* otherwise make the switch
*/
other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
other_sob = &prop->hw_sob[other_sob_offset];
if (kref_read(&other_sob->kref) != 1) {
dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
q_idx);
return -EINVAL;
}
/*
* next_sob_val always points to the next available signal
* in the sob, so in encaps signals it will be the next one
* after reserving the required amount.
*/
if (encaps_sig)
prop->next_sob_val = count + 1;
else
prop->next_sob_val = count;
/* only two SOBs are currently in use */
prop->curr_sob_offset = other_sob_offset;
*hw_sob = other_sob;
/*
* check if other_sob needs reset, then do it before using it
* for the reservation or the next signal cs.
* we do it here, and for both encaps and regular signal cs
* cases in order to avoid possible races of two kref_put
* of the sob which can occur at the same time if we move the
* sob reset(kref_put) to cs_do_release function.
* in addition, if we have combination of cs signal and
* encaps, and at the point we need to reset the sob there was
* no more reservations and only signal cs keep coming,
* in such case we need signal_cs to put the refcount and
* reset the sob.
*/
if (other_sob->need_reset)
hw_sob_put(other_sob);
if (encaps_sig) {
/* set reset indication for the sob */
sob->need_reset = true;
hw_sob_get(other_sob);
}
dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
prop->curr_sob_offset, q_idx);
} else {
prop->next_sob_val += count;
}
return 0;
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
bool encaps_signals)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
int rc = 0;
if (encaps_signals) {
*signal_seq = chunk->encaps_signal_seq;
return 0;
}
signal_seq_arr_len = chunk->num_signal_seq_arr;
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
}
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
if (!signal_seq_arr)
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_KERNEL);
if (!signal_seq_arr) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
}
size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
goto out;
}
/* currently it is guaranteed to have only one signal seq */
*signal_seq = signal_seq_arr[0];
out:
kfree(signal_seq_arr);
return rc;
}
static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
{
struct hl_cs_counters_atomic *cntr;
struct hl_cs_job *job;
struct hl_cb *cb;
u32 cb_size;
cntr = &hdev->aggregated_cs_counters;
job = hl_cs_allocate_job(hdev, q_type, true);
if (!job) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
return -ENOMEM;
}
if (cs->type == CS_TYPE_WAIT)
cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
else
cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW);
if (!cb) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
kfree(job);
return -EFAULT;
}
job->id = 0;
job->cs = cs;
job->user_cb = cb;
atomic_inc(&job->user_cb->cs_cnt);
job->user_cb_size = cb_size;
job->hw_queue_id = q_idx;
if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
&& cs->encaps_signals)
job->encaps_sig_wait_offset = encaps_signal_offset;
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need the CB in
* the CB idr anymore and to decrement its refcount as it was
* incremented inside hl_cb_kernel_create().
*/
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size;
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
cs_get(cs);
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
cs->jobs_cnt++;
list_add_tail(&job->cs_node, &cs->job_list);
hl_debugfs_add_job(hdev, job);
return 0;
}
static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
u32 q_idx, u32 count,
u32 *handle_id, u32 *sob_addr,
u32 *signals_count)
{
struct hw_queue_properties *hw_queue_prop;
struct hl_sync_stream_properties *prop;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_encaps_sig_handle *handle;
struct hl_encaps_signals_mgr *mgr;
struct hl_hw_sob *hw_sob;
int hdl_id;
int rc = 0;
if (count >= HL_MAX_SOB_VAL) {
dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
count);
rc = -EINVAL;
goto out;
}
if (q_idx >= hdev->asic_prop.max_queues) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
q_idx);
rc = -EINVAL;
goto out;
}
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
if (!hw_queue_prop->supports_sync_stream) {
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
rc = -EINVAL;
goto out;
}
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
rc = -ENOMEM;
goto out;
}
handle->count = count;
hl_ctx_get(hpriv->ctx);
handle->ctx = hpriv->ctx;
mgr = &hpriv->ctx->sig_mgr;
spin_lock(&mgr->lock);
hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
spin_unlock(&mgr->lock);
if (hdl_id < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
rc = -EINVAL;
goto put_ctx;
}
handle->id = hdl_id;
handle->q_idx = q_idx;
handle->hdev = hdev;
kref_init(&handle->refcount);
hdev->asic_funcs->hw_queues_lock(hdev);
hw_sob = &prop->hw_sob[prop->curr_sob_offset];
/*
* Increment the SOB value by count by user request
* to reserve those signals
* check if the signals amount to reserve is not exceeding the max sob
* value, if yes then switch sob.
*/
rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
true);
if (rc) {
dev_err(hdev->dev, "Failed to switch SOB\n");
hdev->asic_funcs->hw_queues_unlock(hdev);
rc = -EINVAL;
goto remove_idr;
}
/* set the hw_sob to the handle after calling the sob wraparound handler
* since sob could have changed.
*/
handle->hw_sob = hw_sob;
/* store the current sob value for unreserve validity check, and
* signal offset support
*/
handle->pre_sob_val = prop->next_sob_val - handle->count;
handle->cs_seq = ULLONG_MAX;
*signals_count = prop->next_sob_val;
hdev->asic_funcs->hw_queues_unlock(hdev);
*sob_addr = handle->hw_sob->sob_addr;
*handle_id = hdl_id;
dev_dbg(hdev->dev,
"Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
hw_sob->sob_id, handle->hw_sob->sob_addr,
prop->next_sob_val - 1, q_idx, hdl_id);
goto out;
remove_idr:
spin_lock(&mgr->lock);
idr_remove(&mgr->handles, hdl_id);
spin_unlock(&mgr->lock);
put_ctx:
hl_ctx_put(handle->ctx);
kfree(handle);
out:
return rc;
}
static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
{
struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
struct hl_sync_stream_properties *prop;
struct hl_device *hdev = hpriv->hdev;
struct hl_encaps_signals_mgr *mgr;
struct hl_hw_sob *hw_sob;
u32 q_idx, sob_addr;
int rc = 0;
mgr = &hpriv->ctx->sig_mgr;
spin_lock(&mgr->lock);
encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
if (encaps_sig_hdl) {
dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
handle_id, encaps_sig_hdl->hw_sob->sob_addr,
encaps_sig_hdl->count);
hdev->asic_funcs->hw_queues_lock(hdev);
q_idx = encaps_sig_hdl->q_idx;
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
hw_sob = &prop->hw_sob[prop->curr_sob_offset];
sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
/* Check if sob_val got out of sync due to other
* signal submission requests which were handled
* between the reserve-unreserve calls or SOB switch
* upon reaching SOB max value.
*/
if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
!= prop->next_sob_val ||
sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
encaps_sig_hdl->pre_sob_val,
(prop->next_sob_val - encaps_sig_hdl->count));
hdev->asic_funcs->hw_queues_unlock(hdev);
rc = -EINVAL;
goto out_unlock;
}
/*
* Decrement the SOB value by count by user request
* to unreserve those signals
*/
prop->next_sob_val -= encaps_sig_hdl->count;
hdev->asic_funcs->hw_queues_unlock(hdev);
hw_sob_put(hw_sob);
/* Release the id and free allocated memory of the handle */
idr_remove(&mgr->handles, handle_id);
/* unlock before calling ctx_put, where we might sleep */
spin_unlock(&mgr->lock);
hl_ctx_put(encaps_sig_hdl->ctx);
kfree(encaps_sig_hdl);
goto out;
} else {
rc = -EINVAL;
dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
}
out_unlock:
spin_unlock(&mgr->lock);
out:
return rc;
}
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
u64 *cs_seq, u32 flags, u32 timeout,
u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
{
struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
bool handle_found = false, is_wait_cs = false,
wait_cs_submitted = false,
cs_encaps_signals = false;
struct hl_cs_chunk *cs_chunk_array, *chunk;
bool staged_cs_with_encaps_signals = false;
struct hw_queue_properties *hw_queue_prop;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
struct hl_cs *cs;
u64 signal_seq;
int rc;
cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
ctx);
if (rc)
goto out;
/* currently it is guaranteed to have only one chunk */
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
goto free_cs_chunk_array;
}
q_idx = chunk->queue_index;
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
rc = -EINVAL;
goto free_cs_chunk_array;
}
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
goto free_cs_chunk_array;
}
if (!hdev->nic_ports_mask) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Collective operations not supported when NIC ports are disabled");
rc = -EINVAL;
goto free_cs_chunk_array;
}
collective_engine_id = chunk->collective_engine_id;
}
is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
cs_type == CS_TYPE_COLLECTIVE_WAIT);
cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
if (is_wait_cs) {
rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
ctx, cs_encaps_signals);
if (rc)
goto free_cs_chunk_array;
if (cs_encaps_signals) {
/* check if cs sequence has encapsulated
* signals handle
*/
struct idr *idp;
u32 id;
spin_lock(&ctx->sig_mgr.lock);
idp = &ctx->sig_mgr.handles;
idr_for_each_entry(idp, encaps_sig_hdl, id) {
if (encaps_sig_hdl->cs_seq == signal_seq) {
/* get refcount to protect removing this handle from idr,
* needed when multiple wait cs are used with offset
* to wait on reserved encaps signals.
* Since kref_put of this handle is executed outside the
* current lock, it is possible that the handle refcount
* is 0 but it yet to be removed from the list. In this
* case need to consider the handle as not valid.
*/
if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
handle_found = true;
break;
}
}
spin_unlock(&ctx->sig_mgr.lock);
if (!handle_found) {
/* treat as signal CS already finished */
dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
signal_seq);
rc = 0;
goto free_cs_chunk_array;
}
/* validate also the signal offset value */
if (chunk->encaps_signal_offset >
encaps_sig_hdl->count) {
dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
chunk->encaps_signal_offset,
encaps_sig_hdl->count);
rc = -EINVAL;
goto free_cs_chunk_array;
}
}
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
rc = PTR_ERR(sig_fence);
goto free_cs_chunk_array;
}
if (!sig_fence) {
/* signal CS already finished */
rc = 0;
goto free_cs_chunk_array;
}
sig_waitcs_cmpl =
container_of(sig_fence, struct hl_cs_compl, base_fence);
staged_cs_with_encaps_signals = !!
(sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
(flags & HL_CS_FLAGS_ENCAP_SIGNALS));
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
!staged_cs_with_encaps_signals) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal/encaps-signal CS\n",
signal_seq);
hl_fence_put(sig_fence);
rc = -EINVAL;
goto free_cs_chunk_array;
}
if (completion_done(&sig_fence->completion)) {
/* signal CS already finished */
hl_fence_put(sig_fence);
rc = 0;
goto free_cs_chunk_array;
}
}
rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
if (rc) {
if (is_wait_cs)
hl_fence_put(sig_fence);
goto free_cs_chunk_array;
}
/*
* Save the signal CS fence for later initialization right before
* hanging the wait CS on the queue.
* for encaps signals case, we save the cs sequence and handle pointer
* for later initialization.
*/
if (is_wait_cs) {
cs->signal_fence = sig_fence;
/* store the handle pointer, so we don't have to
* look for it again, later on the flow
* when we need to set SOB info in hw_queue.
*/
if (cs->encaps_signals)
cs->encaps_sig_hdl = encaps_sig_hdl;
}
hl_debugfs_add_cs(cs);
*cs_seq = cs->sequence;
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
q_idx, chunk->encaps_signal_offset);
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id,
chunk->encaps_signal_offset);
else {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
}
if (rc)
goto free_cs_object;
if (q_type == QUEUE_TYPE_HW)
INIT_WORK(&cs->finish_work, cs_completion);
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
/* In case wait cs failed here, it means the signal cs
* already completed. we want to free all it's related objects
* but we don't want to fail the ioctl.
*/
if (is_wait_cs)
rc = 0;
else if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to submit CS %d.%llu to H/W queues, error %d\n",
ctx->asid, cs->sequence, rc);
goto free_cs_object;
}
*signal_sob_addr_offset = cs->sob_addr_offset;
*signal_initial_sob_count = cs->initial_sob_count;
rc = HL_CS_STATUS_SUCCESS;
if (is_wait_cs)
wait_cs_submitted = true;
goto put_cs;
free_cs_object:
cs_rollback(hdev, cs);
*cs_seq = ULLONG_MAX;
/* The path below is both for good and erroneous exits */
put_cs:
/* We finished with the CS in this function, so put the ref */
cs_put(cs);
free_cs_chunk_array:
if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
kfree(cs_chunk_array);
out:
return rc;
}
static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
u32 num_engine_cores, u32 core_command)
{
struct hl_device *hdev = hpriv->hdev;
void __user *engine_cores_arr;
u32 *cores;
int rc;
if (!hdev->asic_prop.supports_engine_modes)
return -EPERM;
if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
return -EINVAL;
}
if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
dev_err(hdev->dev, "Engine core command is invalid\n");
return -EINVAL;
}
engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
if (!cores)
return -ENOMEM;
if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
kfree(cores);
return -EFAULT;
}
rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
kfree(cores);
return rc;
}
static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr,
u32 num_engines, enum hl_engine_command command)
{
struct hl_device *hdev = hpriv->hdev;
u32 *engines, max_num_of_engines;
void __user *engines_arr;
int rc;
if (!hdev->asic_prop.supports_engine_modes)
return -EPERM;
if (command >= HL_ENGINE_COMMAND_MAX) {
dev_err(hdev->dev, "Engine command is invalid\n");
return -EINVAL;
}
max_num_of_engines = hdev->asic_prop.max_num_of_engines;
if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT)
max_num_of_engines = hdev->asic_prop.num_engine_cores;
if (!num_engines || num_engines > max_num_of_engines) {
dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines);
return -EINVAL;
}
engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr;
engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL);
if (!engines)
return -ENOMEM;
if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) {
dev_err(hdev->dev, "Failed to copy engine-ids array from user\n");
kfree(engines);
return -EFAULT;
}
rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command);
kfree(engines);
return rc;
}
static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
{
struct hl_device *hdev = hpriv->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (!prop->hbw_flush_reg) {
dev_dbg(hdev->dev, "HBW flush is not supported\n");
return -EOPNOTSUPP;
}
RREG32(prop->hbw_flush_reg);
return 0;
}
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
enum hl_cs_type cs_type = 0;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
u32 num_chunks, flags, timeout,
signals_count = 0, sob_addr = 0, handle_id = 0;
u16 sob_initial_count = 0;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
if (rc)
goto out;
rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
if (rc)
goto out;
cs_type = hl_cs_get_cs_type(args->in.cs_flags &
~HL_CS_FLAGS_FORCE_RESTORE);
chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
num_chunks = args->in.num_chunks_execute;
flags = args->in.cs_flags;
/* In case this is a staged CS, user should supply the CS sequence */
if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
cs_seq = args->in.seq;
timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
? msecs_to_jiffies(args->in.timeout * 1000)
: hpriv->hdev->timeout_jiffies;
switch (cs_type) {
case CS_TYPE_SIGNAL:
case CS_TYPE_WAIT:
case CS_TYPE_COLLECTIVE_WAIT:
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
&cs_seq, args->in.cs_flags, timeout,
&sob_addr, &sob_initial_count);
break;
case CS_RESERVE_SIGNALS:
rc = cs_ioctl_reserve_signals(hpriv,
args->in.encaps_signals_q_idx,
args->in.encaps_signals_count,
&handle_id, &sob_addr, &signals_count);
break;
case CS_UNRESERVE_SIGNALS:
rc = cs_ioctl_unreserve_signals(hpriv,
args->in.encaps_sig_handle_id);
break;
case CS_TYPE_ENGINE_CORE:
rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
args->in.num_engine_cores, args->in.core_command);
break;
case CS_TYPE_ENGINES:
rc = cs_ioctl_engines(hpriv, args->in.engines,
args->in.num_engines, args->in.engine_command);
break;
case CS_TYPE_FLUSH_PCI_HBW_WRITES:
rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
args->in.encaps_sig_handle_id,
timeout, &sob_initial_count);
break;
}
out:
if (rc != -EAGAIN) {
memset(args, 0, sizeof(*args));
switch (cs_type) {
case CS_RESERVE_SIGNALS:
args->out.handle_id = handle_id;
args->out.sob_base_addr_offset = sob_addr;
args->out.count = signals_count;
break;
case CS_TYPE_SIGNAL:
args->out.sob_base_addr_offset = sob_addr;
args->out.sob_count_before_submission = sob_initial_count;
args->out.seq = cs_seq;
break;
case CS_TYPE_DEFAULT:
args->out.sob_count_before_submission = sob_initial_count;
args->out.seq = cs_seq;
break;
default:
args->out.seq = cs_seq;
break;
}
args->out.status = rc;
}
return rc;
}
static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
{
struct hl_device *hdev = ctx->hdev;
ktime_t timestamp_kt;
long completion_rc;
int rc = 0, error;
if (IS_ERR(fence)) {
rc = PTR_ERR(fence);
if (rc == -EINVAL)
dev_notice_ratelimited(hdev->dev,
"Can't wait on CS %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
return rc;
}
if (!fence) {
if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, ×tamp_kt, &error)) {
dev_dbg(hdev->dev,
"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
seq, ctx->cs_sequence);
*status = CS_WAIT_STATUS_GONE;
return 0;
}
completion_rc = 1;
goto report_results;
}
if (!timeout_us) {
completion_rc = completion_done(&fence->completion);
} else {
unsigned long timeout;
timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
timeout_us : usecs_to_jiffies(timeout_us);
completion_rc =
wait_for_completion_interruptible_timeout(
&fence->completion, timeout);
}
error = fence->error;
timestamp_kt = fence->timestamp;
report_results:
if (completion_rc > 0) {
*status = CS_WAIT_STATUS_COMPLETED;
if (timestamp)
*timestamp = ktime_to_ns(timestamp_kt);
} else {
*status = CS_WAIT_STATUS_BUSY;
}
if (completion_rc == -ERESTARTSYS)
rc = completion_rc;
else if (error == -ETIMEDOUT || error == -EIO)
rc = error;
return rc;
}
/*
* hl_cs_poll_fences - iterate CS fences to check for CS completion
*
* @mcs_data: multi-CS internal data
* @mcs_compl: multi-CS completion structure
*
* @return 0 on success, otherwise non 0 error code
*
* The function iterates on all CS sequence in the list and set bit in
* completion_bitmap for each completed CS.
* While iterating, the function sets the stream map of each fence in the fence
* array in the completion QID stream map to be used by CSs to perform
* completion to the multi-CS context.
* This function shall be called after taking context ref
*/
static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
{
struct hl_fence **fence_ptr = mcs_data->fence_arr;
struct hl_device *hdev = mcs_data->ctx->hdev;
int i, rc, arr_len = mcs_data->arr_len;
u64 *seq_arr = mcs_data->seq_arr;
ktime_t max_ktime, first_cs_time;
enum hl_cs_wait_status status;
memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
/* get all fences under the same lock */
rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
if (rc)
return rc;
/*
* re-initialize the completion here to handle 2 possible cases:
* 1. CS will complete the multi-CS prior clearing the completion. in which
* case the fence iteration is guaranteed to catch the CS completion.
* 2. the completion will occur after re-init of the completion.
* in which case we will wake up immediately in wait_for_completion.
*/
reinit_completion(&mcs_compl->completion);
/*
* set to maximum time to verify timestamp is valid: if at the end
* this value is maintained- no timestamp was updated
*/
max_ktime = ktime_set(KTIME_SEC_MAX, 0);
first_cs_time = max_ktime;
for (i = 0; i < arr_len; i++, fence_ptr++) {
struct hl_fence *fence = *fence_ptr;
/*
* In order to prevent case where we wait until timeout even though a CS associated
* with the multi-CS actually completed we do things in the below order:
* 1. for each fence set it's QID map in the multi-CS completion QID map. This way
* any CS can, potentially, complete the multi CS for the specific QID (note
* that once completion is initialized, calling complete* and then wait on the
* completion will cause it to return at once)
* 2. only after allowing multi-CS completion for the specific QID we check whether
* the specific CS already completed (and thus the wait for completion part will
* be skipped). if the CS not completed it is guaranteed that completing CS will
* wake up the completion.
*/
if (fence)
mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
/*
* function won't sleep as it is called with timeout 0 (i.e.
* poll the fence)
*/
rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
if (rc) {
dev_err(hdev->dev,
"wait_for_fence error :%d for CS seq %llu\n",
rc, seq_arr[i]);
break;
}
switch (status) {
case CS_WAIT_STATUS_BUSY:
/* CS did not finished, QID to wait on already stored */
break;
case CS_WAIT_STATUS_COMPLETED:
/*
* Using mcs_handling_done to avoid possibility of mcs_data
* returns to user indicating CS completed before it finished
* all of its mcs handling, to avoid race the next time the
* user waits for mcs.
* note: when reaching this case fence is definitely not NULL
* but NULL check was added to overcome static analysis
*/
if (fence && !fence->mcs_handling_done) {
/*
* in case multi CS is completed but MCS handling not done
* we "complete" the multi CS to prevent it from waiting
* until time-out and the "multi-CS handling done" will have
* another chance at the next iteration
*/
complete_all(&mcs_compl->completion);
break;
}
mcs_data->completion_bitmap |= BIT(i);
/*
* For all completed CSs we take the earliest timestamp.
* For this we have to validate that the timestamp is
* earliest of all timestamps so far.
*/
if (fence && mcs_data->update_ts &&
(ktime_compare(fence->timestamp, first_cs_time) < 0))
first_cs_time = fence->timestamp;
break;
case CS_WAIT_STATUS_GONE:
mcs_data->update_ts = false;
mcs_data->gone_cs = true;
/*
* It is possible to get an old sequence numbers from user
* which related to already completed CSs and their fences
* already gone. In this case, CS set as completed but
* no need to consider its QID for mcs completion.
*/
mcs_data->completion_bitmap |= BIT(i);
break;
default:
dev_err(hdev->dev, "Invalid fence status\n");
rc = -EINVAL;
break;
}
}
hl_fences_put(mcs_data->fence_arr, arr_len);
if (mcs_data->update_ts &&
(ktime_compare(first_cs_time, max_ktime) != 0))
mcs_data->timestamp = ktime_to_ns(first_cs_time);
return rc;
}
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp)
{
struct hl_fence *fence;
int rc = 0;
if (timestamp)
*timestamp = 0;
hl_ctx_get(ctx);
fence = hl_ctx_get_fence(ctx, seq);
rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
hl_fence_put(fence);
hl_ctx_put(ctx);
return rc;
}
static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
{
if (usecs <= U32_MAX)
return usecs_to_jiffies(usecs);
/*
* If the value in nanoseconds is larger than 64 bit, use the largest
* 64 bit value.
*/
if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
return nsecs_to_jiffies(U64_MAX);
return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
}
/*
* hl_wait_multi_cs_completion_init - init completion structure
*
* @hdev: pointer to habanalabs device structure
* @stream_master_bitmap: stream master QIDs map, set bit indicates stream
* master QID to wait on
*
* @return valid completion struct pointer on success, otherwise error pointer
*
* up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
* the function gets the first available completion (by marking it "used")
* and initialize its values.
*/
static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
{
struct multi_cs_completion *mcs_compl;
int i;
/* find free multi_cs completion structure */
for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
mcs_compl = &hdev->multi_cs_completion[i];
spin_lock(&mcs_compl->lock);
if (!mcs_compl->used) {
mcs_compl->used = 1;
mcs_compl->timestamp = 0;
/*
* init QID map to 0 to avoid completion by CSs. the actual QID map
* to multi-CS CSs will be set incrementally at a later stage
*/
mcs_compl->stream_master_qid_map = 0;
spin_unlock(&mcs_compl->lock);
break;
}
spin_unlock(&mcs_compl->lock);
}
if (i == MULTI_CS_MAX_USER_CTX) {
dev_err(hdev->dev, "no available multi-CS completion structure\n");
return ERR_PTR(-ENOMEM);
}
return mcs_compl;
}
/*
* hl_wait_multi_cs_completion_fini - return completion structure and set as
* unused
*
* @mcs_compl: pointer to the completion structure
*/
static void hl_wait_multi_cs_completion_fini(
struct multi_cs_completion *mcs_compl)
{
/*
* free completion structure, do it under lock to be in-sync with the
* thread that signals completion
*/
spin_lock(&mcs_compl->lock);
mcs_compl->used = 0;
spin_unlock(&mcs_compl->lock);
}
/*
* hl_wait_multi_cs_completion - wait for first CS to complete
*
* @mcs_data: multi-CS internal data
*
* @return 0 on success, otherwise non 0 error code
*/
static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
struct multi_cs_completion *mcs_compl)
{
long completion_rc;
completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
mcs_data->timeout_jiffies);
/* update timestamp */
if (completion_rc > 0)
mcs_data->timestamp = mcs_compl->timestamp;
if (completion_rc == -ERESTARTSYS)
return completion_rc;
mcs_data->wait_status = completion_rc;
return 0;
}
/*
* hl_multi_cs_completion_init - init array of multi-CS completion structures
*
* @hdev: pointer to habanalabs device structure
*/
void hl_multi_cs_completion_init(struct hl_device *hdev)
{
struct multi_cs_completion *mcs_cmpl;
int i;
for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
mcs_cmpl = &hdev->multi_cs_completion[i];
mcs_cmpl->used = 0;
spin_lock_init(&mcs_cmpl->lock);
init_completion(&mcs_cmpl->completion);
}
}
/*
* hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
*
* @hpriv: pointer to the private data of the fd
* @data: pointer to multi-CS wait ioctl in/out args
*
*/
static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct multi_cs_completion *mcs_compl;
struct hl_device *hdev = hpriv->hdev;
struct multi_cs_data mcs_data = {};
union hl_wait_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
struct hl_fence **fence_arr;
void __user *seq_arr;
u32 size_to_copy;
u64 *cs_seq_arr;
u8 seq_arr_len;
int rc, i;
for (i = 0 ; i < sizeof(args->in.pad) ; i++)
if (args->in.pad[i]) {
dev_dbg(hdev->dev, "Padding bytes must be 0\n");
return -EINVAL;
}
if (!hdev->supports_wait_for_multi_cs) {
dev_err(hdev->dev, "Wait for multi CS is not supported\n");
return -EPERM;
}
seq_arr_len = args->in.seq_arr_len;
if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
return -EINVAL;
}
/* allocate memory for sequence array */
cs_seq_arr =
kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
if (!cs_seq_arr)
return -ENOMEM;
/* copy CS sequence array from user */
seq_arr = (void __user *) (uintptr_t) args->in.seq;
size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
rc = -EFAULT;
goto free_seq_arr;
}
/* allocate array for the fences */
fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
if (!fence_arr) {
rc = -ENOMEM;
goto free_seq_arr;
}
/* initialize the multi-CS internal data */
mcs_data.ctx = ctx;
mcs_data.seq_arr = cs_seq_arr;
mcs_data.fence_arr = fence_arr;
mcs_data.arr_len = seq_arr_len;
hl_ctx_get(ctx);
/* wait (with timeout) for the first CS to be completed */
mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
mcs_compl = hl_wait_multi_cs_completion_init(hdev);
if (IS_ERR(mcs_compl)) {
rc = PTR_ERR(mcs_compl);
goto put_ctx;
}
/* poll all CS fences, extract timestamp */
mcs_data.update_ts = true;
rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
/*
* skip wait for CS completion when one of the below is true:
* - an error on the poll function
* - one or more CS in the list completed
* - the user called ioctl with timeout 0
*/
if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
goto completion_fini;
while (true) {
rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
if (rc || (mcs_data.wait_status == 0))
break;
/*
* poll fences once again to update the CS map.
* no timestamp should be updated this time.
*/
mcs_data.update_ts = false;
rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
if (rc || mcs_data.completion_bitmap)
break;
/*
* if hl_wait_multi_cs_completion returned before timeout (i.e.
* it got a completion) it either got completed by CS in the multi CS list
* (in which case the indication will be non empty completion_bitmap) or it
* got completed by CS submitted to one of the shared stream master but
* not in the multi CS list (in which case we should wait again but modify
* the timeout and set timestamp as zero to let a CS related to the current
* multi-CS set a new, relevant, timestamp)
*/
mcs_data.timeout_jiffies = mcs_data.wait_status;
mcs_compl->timestamp = 0;
}
completion_fini:
hl_wait_multi_cs_completion_fini(mcs_compl);
put_ctx:
hl_ctx_put(ctx);
kfree(fence_arr);
free_seq_arr:
kfree(cs_seq_arr);
if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for Multi-CS\n");
rc = -EINTR;
}
if (rc)
return rc;
/* update output args */
memset(args, 0, sizeof(*args));
if (mcs_data.completion_bitmap) {
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
args->out.cs_completion_map = mcs_data.completion_bitmap;
/* if timestamp not 0- it's valid */
if (mcs_data.timestamp) {
args->out.timestamp_nsec = mcs_data.timestamp;
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
}
/* update if some CS was gone */
if (!mcs_data.timestamp)
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
} else {
args->out.status = HL_WAIT_CS_STATUS_BUSY;
}
return 0;
}
static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
enum hl_cs_wait_status status;
u64 seq = args->in.seq;
s64 timestamp;
int rc;
rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, ×tamp);
if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for CS handle %llu\n",
seq);
return -EINTR;
}
memset(args, 0, sizeof(*args));
if (rc) {
if (rc == -ETIMEDOUT) {
dev_err_ratelimited(hdev->dev,
"CS %llu has timed-out while user process is waiting for it\n",
seq);
args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
} else if (rc == -EIO) {
dev_err_ratelimited(hdev->dev,
"CS %llu has been aborted while user process is waiting for it\n",
seq);
args->out.status = HL_WAIT_CS_STATUS_ABORTED;
}
return rc;
}
if (timestamp) {
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
args->out.timestamp_nsec = timestamp;
}
switch (status) {
case CS_WAIT_STATUS_GONE:
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
fallthrough;
case CS_WAIT_STATUS_COMPLETED:
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
break;
case CS_WAIT_STATUS_BUSY:
default:
args->out.status = HL_WAIT_CS_STATUS_BUSY;
break;
}
return 0;
}
static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
struct hl_cb *cq_cb,
u64 ts_offset, u64 cq_offset, u64 target_value,
spinlock_t *wait_list_lock,
struct hl_user_pending_interrupt **pend)
{
struct hl_ts_buff *ts_buff = buf->private;
struct hl_user_pending_interrupt *requested_offset_record =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
ts_offset;
struct hl_user_pending_interrupt *cb_last =
(struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address +
(ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt));
unsigned long iter_counter = 0;
u64 current_cq_counter;
ktime_t timestamp;
/* Validate ts_offset not exceeding last max */
if (requested_offset_record >= cb_last) {
dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
(u64)(uintptr_t)cb_last);
return -EINVAL;
}
timestamp = ktime_get();
start_over:
spin_lock(wait_list_lock);
/* Unregister only if we didn't reach the target value
* since in this case there will be no handling in irq context
* and then it's safe to delete the node out of the interrupt list
* then re-use it on other interrupt
*/
if (requested_offset_record->ts_reg_info.in_use) {
current_cq_counter = *requested_offset_record->cq_kernel_addr;
if (current_cq_counter < requested_offset_record->cq_target_value) {
list_del(&requested_offset_record->wait_list_node);
spin_unlock(wait_list_lock);
hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf);
hl_cb_put(requested_offset_record->ts_reg_info.cq_cb);
dev_dbg(buf->mmg->dev,
"ts node removed from interrupt list now can re-use\n");
} else {
dev_dbg(buf->mmg->dev,
"ts node in middle of irq handling\n");
/* irq thread handling in the middle give it time to finish */
spin_unlock(wait_list_lock);
usleep_range(100, 1000);
if (++iter_counter == MAX_TS_ITER_NUM) {
dev_err(buf->mmg->dev,
"Timestamp offset processing reached timeout of %lld ms\n",
ktime_ms_delta(ktime_get(), timestamp));
return -EAGAIN;
}
goto start_over;
}
} else {
/* Fill up the new registration node info */
requested_offset_record->ts_reg_info.buf = buf;
requested_offset_record->ts_reg_info.cq_cb = cq_cb;
requested_offset_record->ts_reg_info.timestamp_kernel_addr =
(u64 *) ts_buff->user_buff_address + ts_offset;
requested_offset_record->cq_kernel_addr =
(u64 *) cq_cb->kernel_address + cq_offset;
requested_offset_record->cq_target_value = target_value;
spin_unlock(wait_list_lock);
}
*pend = requested_offset_record;
dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
requested_offset_record);
return 0;
}
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg,
u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset,
u64 target_value, struct hl_user_interrupt *interrupt,
bool register_ts_record, u64 ts_handle, u64 ts_offset,
u32 *status, u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
unsigned long timeout;
long completion_rc;
int rc = 0;
timeout = hl_usecs64_to_jiffies(timeout_us);
hl_ctx_get(ctx);
cq_cb = hl_cb_get(cb_mmg, cq_counters_handle);
if (!cq_cb) {
rc = -EINVAL;
goto put_ctx;
}
/* Validate the cq offset */
if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
rc = -EINVAL;
goto put_cq_cb;
}
if (register_ts_record) {
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
interrupt->interrupt_id, ts_offset, cq_counters_offset);
buf = hl_mmap_mem_buf_get(mmg, ts_handle);
if (!buf) {
rc = -EINVAL;
goto put_cq_cb;
}
/* get ts buffer record */
rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
cq_counters_offset, target_value,
&interrupt->wait_list_lock, &pend);
if (rc)
goto put_ts_buff;
} else {
pend = kzalloc(sizeof(*pend), GFP_KERNEL);
if (!pend) {
rc = -ENOMEM;
goto put_cq_cb;
}
hl_fence_init(&pend->fence, ULONG_MAX);
pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset;
pend->cq_target_value = target_value;
}
spin_lock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
*/
if (*pend->cq_kernel_addr >= target_value) {
if (register_ts_record)
pend->ts_reg_info.in_use = 0;
spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_COMPLETED;
if (register_ts_record) {
*pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
goto put_ts_buff;
} else {
pend->fence.timestamp = ktime_get();
goto set_timestamp;
}
} else if (!timeout_us) {
spin_unlock(&interrupt->wait_list_lock);
*status = HL_WAIT_CS_STATUS_BUSY;
pend->fence.timestamp = ktime_get();
goto set_timestamp;
}
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor.
* Note that we cannot have sorted list by target value,
* in order to shorten the list pass loop, since
* same list could have nodes for different cq counter handle.
* Note:
* Mark ts buff offset as in use here in the spinlock protection area
* to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
* before adding the node to the list. this scenario might happen when
* multiple threads are racing on same offset and one thread could
* set the ts buff in ts_buff_get_kernel_ts_record then the other thread
* takes over and get to ts_buff_get_kernel_ts_record and then we will try
* to re-use the same ts buff offset, and will try to delete a non existing
* node from the list.
*/
if (register_ts_record)
pend->ts_reg_info.in_use = 1;
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock(&interrupt->wait_list_lock);
if (register_ts_record) {
rc = *status = HL_WAIT_CS_STATUS_COMPLETED;
goto ts_registration_exit;
}
/* Wait for interrupt handler to signal completion */
completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
timeout);
if (completion_rc > 0) {
*status = HL_WAIT_CS_STATUS_COMPLETED;
} else {
if (completion_rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for interrupt ID %d\n",
interrupt->interrupt_id);
rc = -EINTR;
*status = HL_WAIT_CS_STATUS_ABORTED;
} else {
if (pend->fence.error == -EIO) {
dev_err_ratelimited(hdev->dev,
"interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
pend->fence.error);
rc = -EIO;
*status = HL_WAIT_CS_STATUS_ABORTED;
} else {
/* The wait has timed-out. We don't know anything beyond that
* because the workload wasn't submitted through the driver.
* Therefore, from driver's perspective, the workload is still
* executing.
*/
rc = 0;
*status = HL_WAIT_CS_STATUS_BUSY;
}
}
}
/*
* We keep removing the node from list here, and not at the irq handler
* for completion timeout case. and if it's a registration
* for ts record, the node will be deleted in the irq handler after
* we reach the target value.
*/
spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
spin_unlock(&interrupt->wait_list_lock);
set_timestamp:
*timestamp = ktime_to_ns(pend->fence.timestamp);
kfree(pend);
hl_cb_put(cq_cb);
ts_registration_exit:
hl_ctx_put(ctx);
return rc;
put_ts_buff:
hl_mmap_mem_buf_put(buf);
put_cq_cb:
hl_cb_put(cq_cb);
put_ctx:
hl_ctx_put(ctx);
return rc;
}
static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
u64 timeout_us, u64 user_address,
u64 target_value, struct hl_user_interrupt *interrupt,
u32 *status,
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
unsigned long timeout;
u64 completion_value;
long completion_rc;
int rc = 0;
timeout = hl_usecs64_to_jiffies(timeout_us);
hl_ctx_get(ctx);
pend = kzalloc(sizeof(*pend), GFP_KERNEL);
if (!pend) {
hl_ctx_put(ctx);
return -ENOMEM;
}
hl_fence_init(&pend->fence, ULONG_MAX);
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
spin_lock(&interrupt->wait_list_lock);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock(&interrupt->wait_list_lock);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
*/
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
goto remove_pending_user_interrupt;
}
if (completion_value >= target_value) {
*status = HL_WAIT_CS_STATUS_COMPLETED;
/* There was no interrupt, we assume the completion is now. */
pend->fence.timestamp = ktime_get();
} else {
*status = HL_WAIT_CS_STATUS_BUSY;
}
if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
goto remove_pending_user_interrupt;
wait_again:
/* Wait for interrupt handler to signal completion */
completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
timeout);
/* If timeout did not expire we need to perform the comparison.
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
spin_lock(&interrupt->wait_list_lock);
/* reinit_completion must be called before we check for user
* completion value, otherwise, if interrupt is received after
* the comparison and before the next wait_for_completion,
* we will reach timeout and fail
*/
reinit_completion(&pend->fence.completion);
spin_unlock(&interrupt->wait_list_lock);
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
goto remove_pending_user_interrupt;
}
if (completion_value >= target_value) {
*status = HL_WAIT_CS_STATUS_COMPLETED;
} else if (pend->fence.error) {
dev_err_ratelimited(hdev->dev,
"interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
pend->fence.error);
/* set the command completion status as ABORTED */
*status = HL_WAIT_CS_STATUS_ABORTED;
} else {
timeout = completion_rc;
goto wait_again;
}
} else if (completion_rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for interrupt ID %d\n",
interrupt->interrupt_id);
rc = -EINTR;
} else {
/* The wait has timed-out. We don't know anything beyond that
* because the workload wasn't submitted through the driver.
* Therefore, from driver's perspective, the workload is still
* executing.
*/
rc = 0;
*status = HL_WAIT_CS_STATUS_BUSY;
}
remove_pending_user_interrupt:
spin_lock(&interrupt->wait_list_lock);
list_del(&pend->wait_list_node);
spin_unlock(&interrupt->wait_list_lock);
*timestamp = ktime_to_ns(pend->fence.timestamp);
kfree(pend);
hl_ctx_put(ctx);
return rc;
}
static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
u16 interrupt_id, first_interrupt, last_interrupt;
struct hl_device *hdev = hpriv->hdev;
struct asic_fixed_properties *prop;
struct hl_user_interrupt *interrupt;
union hl_wait_cs_args *args = data;
u32 status = HL_WAIT_CS_STATUS_BUSY;
u64 timestamp = 0;
int rc, int_idx;
prop = &hdev->asic_prop;
if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
dev_err(hdev->dev, "no user interrupts allowed");
return -EPERM;
}
interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
first_interrupt = prop->first_available_user_interrupt;
last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
if (interrupt_id < prop->user_dec_intr_count) {
/* Check if the requested core is enabled */
if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
interrupt_id);
return -EINVAL;
}
interrupt = &hdev->user_interrupt[interrupt_id];
} else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
interrupt = &hdev->user_interrupt[int_idx];
} else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
interrupt = &hdev->common_user_cq_interrupt;
} else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
interrupt = &hdev->common_decoder_interrupt;
} else {
dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
return -EINVAL;
}
if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
args->in.interrupt_timeout_us, args->in.cq_counters_handle,
args->in.cq_counters_offset,
args->in.target, interrupt,
!!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT),
args->in.timestamp_handle, args->in.timestamp_offset,
&status, ×tamp);
else
rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
args->in.interrupt_timeout_us, args->in.addr,
args->in.target, interrupt, &status,
×tamp);
if (rc)
return rc;
memset(args, 0, sizeof(*args));
args->out.status = status;
if (timestamp) {
args->out.timestamp_nsec = timestamp;
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
}
return 0;
}
int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
u32 flags = args->in.flags;
int rc;
/* If the device is not operational, or if an error has happened and user should release the
* device, there is no point in waiting for any command submission or user interrupt.
*/
if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
return -EBUSY;
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
rc = hl_interrupt_wait_ioctl(hpriv, data);
else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
rc = hl_multi_cs_wait_ioctl(hpriv, data);
else
rc = hl_cs_wait_ioctl(hpriv, data);
return rc;
}
| linux-master | drivers/accel/habanalabs/common/command_submission.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#define pr_fmt(fmt) "habanalabs: " fmt
#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
[HL_DEBUG_OP_FUNNEL] = 0,
[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
[HL_DEBUG_OP_TIMESTAMP] = 0
};
static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_device_status dev_stat = {0};
u32 size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!size) || (!out))
return -EINVAL;
dev_stat.status = hl_device_status(hdev);
return copy_to_user(out, &dev_stat,
min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
}
static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_hw_ip_info hw_ip = {0};
u32 size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 sram_kmd_size, dram_kmd_size, dram_available_size;
if ((!size) || (!out))
return -EINVAL;
sram_kmd_size = (prop->sram_user_base_address -
prop->sram_base_address);
dram_kmd_size = (prop->dram_user_base_address -
prop->dram_base_address);
hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
hw_ip.sram_base_address = prop->sram_user_base_address;
hw_ip.dram_base_address =
prop->dram_supports_virtual_memory ?
prop->dmmu.start_addr : prop->dram_user_base_address;
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
dram_available_size = prop->dram_size - dram_kmd_size;
hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) *
prop->dram_page_size;
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.dram_page_size = prop->dram_page_size;
hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
hw_ip.security_enabled = prop->fw_security_enabled;
hw_ip.revision_id = hdev->pdev->revision;
hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask;
hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
hw_ip.reserved_dram_size = dram_kmd_size;
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
}
static int hw_events_info(struct hl_device *hdev, bool aggregate,
struct hl_info_args *args)
{
u32 size, max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
void *arr;
if ((!max_size) || (!out))
return -EINVAL;
arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
if (!arr) {
dev_err(hdev->dev, "Events info not supported\n");
return -EOPNOTSUPP;
}
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
}
static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
u32 max_size = args->return_size;
u64 events_mask;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((max_size < sizeof(u64)) || (!out))
return -EINVAL;
mutex_lock(&hpriv->notifier_event.lock);
events_mask = hpriv->notifier_event.events_mask;
hpriv->notifier_event.events_mask = 0;
mutex_unlock(&hpriv->notifier_event.lock);
return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
}
static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_info_dram_usage dram_usage = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 dram_kmd_size;
if ((!max_size) || (!out))
return -EINVAL;
dram_kmd_size = (prop->dram_user_base_address -
prop->dram_base_address);
dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
atomic64_read(&hdev->dram_used_mem);
if (hpriv->ctx)
dram_usage.ctx_dram_mem =
atomic64_read(&hpriv->ctx->dram_phys_mem);
return copy_to_user(out, &dram_usage,
min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
}
static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_hw_idle hw_idle = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
hw_idle.busy_engines_mask_ext,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
hw_idle.busy_engines_mask =
lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
}
static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
{
struct hl_debug_params *params;
void *input = NULL, *output = NULL;
int rc;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
params->reg_idx = args->reg_idx;
params->enable = args->enable;
params->op = args->op;
if (args->input_ptr && args->input_size) {
input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
if (!input) {
rc = -ENOMEM;
goto out;
}
if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
args->input_size)) {
rc = -EFAULT;
dev_err(hdev->dev, "failed to copy input debug data\n");
goto out;
}
params->input = input;
}
if (args->output_ptr && args->output_size) {
output = kzalloc(args->output_size, GFP_KERNEL);
if (!output) {
rc = -ENOMEM;
goto out;
}
params->output = output;
params->output_size = args->output_size;
}
rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
if (rc) {
dev_err(hdev->dev,
"debug coresight operation failed %d\n", rc);
goto out;
}
if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
output, args->output_size)) {
dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
rc = -EFAULT;
goto out;
}
out:
kfree(params);
kfree(output);
kfree(input);
return rc;
}
static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_device_utilization device_util = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_device_utilization(hdev, &device_util.utilization);
if (rc)
return -EINVAL;
return copy_to_user(out, &device_util,
min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
}
static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_clk_rate clk_rate = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
if (rc)
return rc;
return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate)))
? -EFAULT : 0;
}
static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_reset_count reset_count = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
return copy_to_user(out, &reset_count,
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
}
static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
{
struct hl_info_time_sync time_sync = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
time_sync.host_time = ktime_get_raw_ns();
return copy_to_user(out, &time_sync,
min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
}
static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_info_pci_counters pci_counters = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
if (rc)
return rc;
return copy_to_user(out, &pci_counters,
min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
}
static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
struct hl_info_clk_throttle clk_throttle = {0};
ktime_t end_time, zero_time = ktime_set(0, 0);
u32 max_size = args->return_size;
int i;
if ((!max_size) || (!out))
return -EINVAL;
mutex_lock(&hdev->clk_throttling.lock);
clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
continue;
clk_throttle.clk_throttling_timestamp_us[i] =
ktime_to_us(hdev->clk_throttling.timestamp[i].start);
if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
end_time = hdev->clk_throttling.timestamp[i].end;
else
end_time = ktime_get();
clk_throttle.clk_throttling_duration_ns[i] =
ktime_to_ns(ktime_sub(end_time,
hdev->clk_throttling.timestamp[i].start));
}
mutex_unlock(&hdev->clk_throttling.lock);
return copy_to_user(out, &clk_throttle,
min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
}
static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_info_cs_counters cs_counters = {0};
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_counters_atomic *cntr;
u32 max_size = args->return_size;
cntr = &hdev->aggregated_cs_counters;
if ((!max_size) || (!out))
return -EINVAL;
cs_counters.total_out_of_mem_drop_cnt =
atomic64_read(&cntr->out_of_mem_drop_cnt);
cs_counters.total_parsing_drop_cnt =
atomic64_read(&cntr->parsing_drop_cnt);
cs_counters.total_queue_full_drop_cnt =
atomic64_read(&cntr->queue_full_drop_cnt);
cs_counters.total_device_in_reset_drop_cnt =
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
cs_counters.total_validation_drop_cnt =
atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
cs_counters.ctx_parsing_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.parsing_drop_cnt);
cs_counters.ctx_queue_full_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.queue_full_drop_cnt);
cs_counters.ctx_device_in_reset_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
cs_counters.ctx_validation_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
}
static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_info_sync_manager sm_info = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
if (args->dcore_id >= HL_MAX_DCORES)
return -EINVAL;
sm_info.first_available_sync_object =
prop->first_available_user_sob[args->dcore_id];
sm_info.first_available_monitor =
prop->first_available_user_mon[args->dcore_id];
sm_info.first_available_cq =
prop->first_available_cq[args->dcore_id];
return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
sizeof(sm_info))) ? -EFAULT : 0;
}
static int total_energy_consumption_info(struct hl_fpriv *hpriv,
struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_info_energy total_energy = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_cpucp_total_energy_get(hdev,
&total_energy.total_energy_consumption);
if (rc)
return rc;
return copy_to_user(out, &total_energy,
min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
}
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
if (rc)
return rc;
return copy_to_user(out, &freq_info,
min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
}
static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct hl_power_info power_info = {0};
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
if (rc)
return rc;
return copy_to_user(out, &power_info,
min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
}
static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct hl_open_stats_info open_stats_info = {0};
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
open_stats_info.last_open_period_ms = jiffies64_to_msecs(
hdev->last_open_session_duration_jif);
open_stats_info.open_counter = hdev->open_counter;
open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
return copy_to_user(out, &open_stats_info,
min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
}
static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
u32 pend_rows_num = 0;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
if (rc)
return rc;
return copy_to_user(out, &pend_rows_num,
min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
}
static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct cpucp_hbm_row_info info = {0};
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
rc = hl_fw_dram_replaced_row_get(hdev, &info);
if (rc)
return rc;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_info_last_err_open_dev_time info = {0};
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_info_cs_timeout_event info = {0};
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
info.seq = hdev->captured_err_info.cs_timeout.seq;
info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct razwi_info *razwi_info;
if ((!max_size) || (!out))
return -EINVAL;
razwi_info = &hdev->captured_err_info.razwi_info;
if (!razwi_info->razwi_info_available)
return 0;
return copy_to_user(out, &razwi_info->razwi,
min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
}
static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct hl_info_undefined_opcode_event info = {0};
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
sizeof(info.cb_addr_streams));
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_info_dev_memalloc_page_sizes info = {0};
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
if ((!max_size) || (!out))
return -EINVAL;
/*
* Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
* pages (unlike some of the ASICs before supporting multiple page sizes).
* For this reason for all ASICs that not support multiple page size the function will
* return an empty bitmask indicating that multiple page sizes is not supported.
*/
info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct cpucp_sec_attest_info *sec_attest_info;
struct hl_info_sec_attest *info;
u32 max_size = args->return_size;
int rc;
if ((!max_size) || (!out))
return -EINVAL;
sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
if (!sec_attest_info)
return -ENOMEM;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
rc = -ENOMEM;
goto free_sec_attest_info;
}
rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
if (rc)
goto free_info;
info->nonce = le32_to_cpu(sec_attest_info->nonce);
info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
info->pcr_num_reg = sec_attest_info->pcr_num_reg;
info->pcr_reg_len = sec_attest_info->pcr_reg_len;
info->quote_sig_len = sec_attest_info->quote_sig_len;
memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
rc = copy_to_user(out, info,
min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
free_info:
kfree(info);
free_sec_attest_info:
kfree(sec_attest_info);
return rc;
}
static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
int rc;
/* check if there is already a registered on that process */
mutex_lock(&hpriv->notifier_event.lock);
if (hpriv->notifier_event.eventfd) {
mutex_unlock(&hpriv->notifier_event.lock);
return -EINVAL;
}
hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
if (IS_ERR(hpriv->notifier_event.eventfd)) {
rc = PTR_ERR(hpriv->notifier_event.eventfd);
hpriv->notifier_event.eventfd = NULL;
mutex_unlock(&hpriv->notifier_event.lock);
return rc;
}
mutex_unlock(&hpriv->notifier_event.lock);
return 0;
}
static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
mutex_lock(&hpriv->notifier_event.lock);
if (!hpriv->notifier_event.eventfd) {
mutex_unlock(&hpriv->notifier_event.lock);
return -EINVAL;
}
eventfd_ctx_put(hpriv->notifier_event.eventfd);
hpriv->notifier_event.eventfd = NULL;
mutex_unlock(&hpriv->notifier_event.lock);
return 0;
}
static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
u32 status_buf_size = args->return_size;
struct hl_device *hdev = hpriv->hdev;
struct engines_data eng_data;
int rc;
if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
return -EINVAL;
eng_data.actual_size = 0;
eng_data.allocated_buf_size = status_buf_size;
eng_data.buf = vmalloc(status_buf_size);
if (!eng_data.buf)
return -ENOMEM;
hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
if (eng_data.actual_size > eng_data.allocated_buf_size) {
dev_err(hdev->dev,
"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
eng_data.actual_size, status_buf_size);
vfree(eng_data.buf);
return -ENOMEM;
}
args->user_buffer_actual_size = eng_data.actual_size;
rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
-EFAULT : 0;
vfree(eng_data.buf);
return rc;
}
static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct page_fault_info *pgf_info;
if ((!max_size) || (!out))
return -EINVAL;
pgf_info = &hdev->captured_err_info.page_fault_info;
if (!pgf_info->page_fault_info_available)
return 0;
return copy_to_user(out, &pgf_info->page_fault,
min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
}
static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
u32 user_buf_size = args->return_size;
struct hl_device *hdev = hpriv->hdev;
struct page_fault_info *pgf_info;
u64 actual_size;
if (!out)
return -EINVAL;
pgf_info = &hdev->captured_err_info.page_fault_info;
if (!pgf_info->page_fault_info_available)
return 0;
args->array_size = pgf_info->num_of_user_mappings;
actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
if (user_buf_size < actual_size)
return -ENOMEM;
return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
}
static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 user_buf_size = args->return_size;
struct hw_err_info *info;
int rc;
if (!user_buf)
return -EINVAL;
info = &hdev->captured_err_info.hw_err;
if (!info->event_info_available)
return 0;
if (user_buf_size < sizeof(struct hl_info_hw_err_event))
return -ENOMEM;
rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
return rc ? -EFAULT : 0;
}
static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 user_buf_size = args->return_size;
struct fw_err_info *info;
int rc;
if (!user_buf)
return -EINVAL;
info = &hdev->captured_err_info.fw_err;
if (!info->event_info_available)
return 0;
if (user_buf_size < sizeof(struct hl_info_fw_err_event))
return -ENOMEM;
rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
return rc ? -EFAULT : 0;
}
static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
{
void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
u32 size = info_args->return_size;
dma_addr_t dma_handle;
bool need_input_buff;
void *fw_buff;
int rc = 0;
switch (info_args->fw_sub_opcode) {
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
default:
return -EINVAL;
}
if (size > SZ_1M) {
dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
return -EINVAL;
}
fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
if (!fw_buff)
return -ENOMEM;
if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
rc = -EFAULT;
goto free_buff;
}
rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
if (rc)
goto free_buff;
if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
rc = -EFAULT;
}
free_buff:
hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
return rc;
}
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
enum hl_device_status status;
struct hl_info_args *args = data;
struct hl_device *hdev = hpriv->hdev;
int rc;
if (args->pad) {
dev_dbg(hdev->dev, "Padding bytes must be 0\n");
return -EINVAL;
}
/*
* Information is returned for the following opcodes even if the device
* is disabled or in reset.
*/
switch (args->op) {
case HL_INFO_HW_IP_INFO:
return hw_ip_info(hdev, args);
case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
case HL_INFO_RESET_COUNT:
return get_reset_count(hdev, args);
case HL_INFO_HW_EVENTS:
return hw_events_info(hdev, false, args);
case HL_INFO_HW_EVENTS_AGGREGATE:
return hw_events_info(hdev, true, args);
case HL_INFO_CS_COUNTERS:
return cs_counters_info(hpriv, args);
case HL_INFO_CLK_THROTTLE_REASON:
return clk_throttle_info(hpriv, args);
case HL_INFO_SYNC_MANAGER:
return sync_manager_info(hpriv, args);
case HL_INFO_OPEN_STATS:
return open_stats_info(hpriv, args);
case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
return last_err_open_dev_info(hpriv, args);
case HL_INFO_CS_TIMEOUT_EVENT:
return cs_timeout_info(hpriv, args);
case HL_INFO_RAZWI_EVENT:
return razwi_info(hpriv, args);
case HL_INFO_UNDEFINED_OPCODE_EVENT:
return undefined_opcode_info(hpriv, args);
case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
return dev_mem_alloc_page_sizes_info(hpriv, args);
case HL_INFO_GET_EVENTS:
return events_info(hpriv, args);
case HL_INFO_PAGE_FAULT_EVENT:
return page_fault_info(hpriv, args);
case HL_INFO_USER_MAPPINGS:
return user_mappings_info(hpriv, args);
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
case HL_INFO_HW_ERR_EVENT:
return hw_err_info(hpriv, args);
case HL_INFO_FW_ERR_EVENT:
return fw_err_info(hpriv, args);
case HL_INFO_DRAM_USAGE:
return dram_usage_info(hpriv, args);
default:
break;
}
if (!hl_device_operational(hdev, &status)) {
dev_dbg_ratelimited(dev,
"Device is %s. Can't execute INFO IOCTL\n",
hdev->status[status]);
return -EBUSY;
}
switch (args->op) {
case HL_INFO_HW_IDLE:
rc = hw_idle(hdev, args);
break;
case HL_INFO_DEVICE_UTILIZATION:
rc = device_utilization(hdev, args);
break;
case HL_INFO_CLK_RATE:
rc = get_clk_rate(hdev, args);
break;
case HL_INFO_TIME_SYNC:
return time_sync_info(hdev, args);
case HL_INFO_PCI_COUNTERS:
return pci_counters_info(hpriv, args);
case HL_INFO_TOTAL_ENERGY:
return total_energy_consumption_info(hpriv, args);
case HL_INFO_PLL_FREQUENCY:
return pll_frequency_info(hpriv, args);
case HL_INFO_POWER:
return power_info(hpriv, args);
case HL_INFO_DRAM_REPLACED_ROWS:
return dram_replaced_rows_info(hpriv, args);
case HL_INFO_DRAM_PENDING_ROWS:
return dram_pending_rows_info(hpriv, args);
case HL_INFO_SECURED_ATTESTATION:
return sec_attest_info(hpriv, args);
case HL_INFO_REGISTER_EVENTFD:
return eventfd_register(hpriv, args);
case HL_INFO_ENGINE_STATUS:
return engine_status_info(hpriv, args);
case HL_INFO_FW_GENERIC_REQ:
return send_fw_generic_request(hdev, args);
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
break;
}
return rc;
}
static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
{
return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
}
static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
{
return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
}
static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_debug_args *args = data;
struct hl_device *hdev = hpriv->hdev;
enum hl_device_status status;
int rc = 0;
if (!hl_device_operational(hdev, &status)) {
dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute DEBUG IOCTL\n",
hdev->status[status]);
return -EBUSY;
}
switch (args->op) {
case HL_DEBUG_OP_ETR:
case HL_DEBUG_OP_ETF:
case HL_DEBUG_OP_STM:
case HL_DEBUG_OP_FUNNEL:
case HL_DEBUG_OP_BMON:
case HL_DEBUG_OP_SPMU:
case HL_DEBUG_OP_TIMESTAMP:
if (!hdev->in_debug) {
dev_err_ratelimited(hdev->dev,
"Rejecting debug configuration request because device not in debug mode\n");
return -EFAULT;
}
args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
rc = debug_coresight(hdev, hpriv->ctx, args);
break;
case HL_DEBUG_OP_SET_MODE:
rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
break;
default:
dev_err(hdev->dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
break;
}
return rc;
}
#define HL_IOCTL_DEF(ioctl, _func) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
static const struct hl_ioctl_desc hl_ioctls[] = {
HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
};
static const struct hl_ioctl_desc hl_ioctls_control[] = {
HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
};
static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
const struct hl_ioctl_desc *ioctl, struct device *dev)
{
struct hl_fpriv *hpriv = filep->private_data;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128] = {0};
char *kdata = NULL;
unsigned int usize, asize;
hl_ioctl_t *func;
u32 hl_size;
int retcode;
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
dev_dbg(dev, "no function\n");
retcode = -ENOTTY;
goto out_err;
}
hl_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd);
if (hl_size > asize)
asize = hl_size;
cmd = ioctl->cmd;
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kzalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto out_err;
}
}
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg, usize)) {
retcode = -EFAULT;
goto out_err;
}
}
retcode = func(hpriv, kdata);
if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
retcode = -EFAULT;
out_err:
if (retcode)
dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current), cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
return retcode;
}
long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct hl_fpriv *hpriv = filep->private_data;
struct hl_device *hdev = hpriv->hdev;
const struct hl_ioctl_desc *ioctl = NULL;
unsigned int nr = _IOC_NR(cmd);
if (!hdev) {
pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
return -ENODEV;
}
if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
ioctl = &hl_ioctls[nr];
} else {
dev_dbg_ratelimited(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
task_pid_nr(current), nr);
return -ENOTTY;
}
return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
}
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct hl_fpriv *hpriv = filep->private_data;
struct hl_device *hdev = hpriv->hdev;
const struct hl_ioctl_desc *ioctl = NULL;
unsigned int nr = _IOC_NR(cmd);
if (!hdev) {
pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
return -ENODEV;
}
if (nr == _IOC_NR(HL_IOCTL_INFO)) {
ioctl = &hl_ioctls_control[nr];
} else {
dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
task_pid_nr(current), nr);
return -ENOTTY;
}
return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
}
| linux-master | drivers/accel/habanalabs/common/habanalabs_ioctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "../habanalabs.h"
#include "../../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
#include <trace/events/habanalabs.h>
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100)
#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
/**
* hl_pci_bars_map() - Map PCI BARs.
* @hdev: Pointer to hl_device structure.
* @name: Array of BAR names.
* @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
*
* Request PCI regions and map them to kernel virtual addresses.
*
* Return: 0 on success, non-zero for failure.
*/
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3])
{
struct pci_dev *pdev = hdev->pdev;
int rc, i, bar;
rc = pci_request_regions(pdev, HL_NAME);
if (rc) {
dev_err(hdev->dev, "Cannot obtain PCI resources\n");
return rc;
}
for (i = 0 ; i < 3 ; i++) {
bar = i * 2; /* 64-bit BARs */
hdev->pcie_bar[bar] = is_wc[i] ?
pci_ioremap_wc_bar(pdev, bar) :
pci_ioremap_bar(pdev, bar);
if (!hdev->pcie_bar[bar]) {
dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
is_wc[i] ? "_wc" : "", name[i]);
rc = -ENODEV;
goto err;
}
}
return 0;
err:
for (i = 2 ; i >= 0 ; i--) {
bar = i * 2; /* 64-bit BARs */
if (hdev->pcie_bar[bar])
iounmap(hdev->pcie_bar[bar]);
}
pci_release_regions(pdev);
return rc;
}
/**
* hl_pci_bars_unmap() - Unmap PCI BARS.
* @hdev: Pointer to hl_device structure.
*
* Release all PCI BARs and unmap their virtual addresses.
*/
static void hl_pci_bars_unmap(struct hl_device *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int i, bar;
for (i = 2 ; i >= 0 ; i--) {
bar = i * 2; /* 64-bit BARs */
iounmap(hdev->pcie_bar[bar]);
}
pci_release_regions(pdev);
}
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
{
struct pci_dev *pdev = hdev->pdev;
ktime_t timeout;
u64 msec;
u32 val;
if (hdev->pldm)
msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
else
msec = HL_PCI_ELBI_TIMEOUT_MSEC;
/* Clear previous status */
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 0);
timeout = ktime_add_ms(ktime_get(), msec);
for (;;) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
if (val & PCI_CONFIG_ELBI_STS_MASK)
break;
if (ktime_compare(ktime_get(), timeout) > 0) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
&val);
break;
}
usleep_range(300, 500);
}
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
if (unlikely(trace_habanalabs_elbi_read_enabled()))
trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val);
return 0;
}
if (val & PCI_CONFIG_ELBI_STS_ERR) {
dev_err(hdev->dev, "Error reading from ELBI\n");
return -EIO;
}
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI read didn't finish in time\n");
return -EIO;
}
dev_err(hdev->dev, "ELBI read has undefined bits in status\n");
return -EIO;
}
/**
* hl_pci_elbi_write() - Write through the ELBI interface.
* @hdev: Pointer to hl_device structure.
* @addr: Address to write to
* @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
{
struct pci_dev *pdev = hdev->pdev;
ktime_t timeout;
u64 msec;
u32 val;
if (hdev->pldm)
msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
else
msec = HL_PCI_ELBI_TIMEOUT_MSEC;
/* Clear previous status */
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
PCI_CONFIG_ELBI_CTRL_WRITE);
timeout = ktime_add_ms(ktime_get(), msec);
for (;;) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
if (val & PCI_CONFIG_ELBI_STS_MASK)
break;
if (ktime_compare(ktime_get(), timeout) > 0) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
&val);
break;
}
usleep_range(300, 500);
}
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
if (unlikely(trace_habanalabs_elbi_write_enabled()))
trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val);
return 0;
}
if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
return -EIO;
}
dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
return -EIO;
}
/**
* hl_pci_iatu_write() - iatu write routine.
* @hdev: Pointer to hl_device structure.
* @addr: Address to write to
* @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 dbi_offset;
int rc;
dbi_offset = addr & 0xFFF;
/* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
* in case the firmware security is enabled
*/
hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
return -EIO;
return 0;
}
/**
* hl_pci_set_inbound_region() - Configure inbound region
* @hdev: Pointer to hl_device structure.
* @region: Inbound region number.
* @pci_region: Inbound region parameters.
*
* Configure the iATU inbound region.
*
* Return: 0 on success, negative value for failure.
*/
int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
struct hl_inbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 bar_phys_base, region_base, region_end_address;
u32 offset, ctrl_reg_val;
int rc = 0;
/* region offset */
offset = (0x200 * region) + 0x100;
if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
region_base = bar_phys_base + pci_region->offset_in_bar;
region_end_address = region_base + pci_region->size - 1;
rc |= hl_pci_iatu_write(hdev, offset + 0x8,
lower_32_bits(region_base));
rc |= hl_pci_iatu_write(hdev, offset + 0xC,
upper_32_bits(region_base));
rc |= hl_pci_iatu_write(hdev, offset + 0x10,
lower_32_bits(region_end_address));
}
/* Point to the specified address */
rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr));
/* Set bar type as memory */
rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
/* Enable + bar/address match + match enable + bar number */
ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, pci_region->mode);
ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
if (pci_region->mode == PCI_BAR_MATCH_MODE)
ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, pci_region->bar);
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
/* Return the DBI window to the default location
* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
* in case the firmware security is enabled
*/
hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
pci_region->bar, pci_region->addr);
return rc;
}
/**
* hl_pci_set_outbound_region() - Configure outbound region 0
* @hdev: Pointer to hl_device structure.
* @pci_region: Outbound region parameters.
*
* Configure the iATU outbound region 0.
*
* Return: 0 on success, negative value for failure.
*/
int hl_pci_set_outbound_region(struct hl_device *hdev,
struct hl_outbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 outbound_region_end_address;
int rc = 0;
/* Outbound Region 0 */
outbound_region_end_address =
pci_region->addr + pci_region->size - 1;
rc |= hl_pci_iatu_write(hdev, 0x008,
lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, 0x00C,
upper_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, 0x010,
lower_32_bits(outbound_region_end_address));
rc |= hl_pci_iatu_write(hdev, 0x014, 0);
rc |= hl_pci_iatu_write(hdev, 0x018, 0);
rc |= hl_pci_iatu_write(hdev, 0x020,
upper_32_bits(outbound_region_end_address));
/* Increase region size */
rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
/* Return the DBI window to the default location
* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
* in case the firmware security is enabled
*/
hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
/**
* hl_get_pci_memory_region() - get PCI region for given address
* @hdev: Pointer to hl_device structure.
* @addr: device address
*
* @return region index on success, otherwise PCI_REGION_NUMBER (invalid
* region index)
*/
enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr)
{
int i;
for (i = 0 ; i < PCI_REGION_NUMBER ; i++) {
struct pci_mem_region *region = &hdev->pci_mem_region[i];
if (!region->used)
continue;
if ((addr >= region->region_base) &&
(addr < region->region_base + region->region_size))
return i;
}
return PCI_REGION_NUMBER;
}
/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
*
* Set DMA masks, initialize the PCI controller and map the PCI BARs.
*
* Return: 0 on success, non-zero for failure.
*/
int hl_pci_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
int rc;
rc = pci_enable_device_mem(pdev);
if (rc) {
dev_err(hdev->dev, "can't enable PCI device\n");
return rc;
}
pci_set_master(pdev);
rc = hdev->asic_funcs->pci_bars_map(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to map PCI BAR addresses\n");
goto disable_device;
}
rc = hdev->asic_funcs->init_iatu(hdev);
if (rc) {
dev_err(hdev->dev, "PCI controller was not initialized successfully\n");
goto unmap_pci_bars;
}
/* Driver must sleep in order for FW to finish the iATU configuration */
if (hdev->asic_prop.iatu_done_by_fw)
usleep_range(2000, 3000);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask));
if (rc) {
dev_err(hdev->dev,
"Failed to set dma mask to %d bits, error %d\n",
prop->dma_mask, rc);
goto unmap_pci_bars;
}
dma_set_max_seg_size(&pdev->dev, U32_MAX);
return 0;
unmap_pci_bars:
hl_pci_bars_unmap(hdev);
disable_device:
pci_disable_device(pdev);
return rc;
}
/**
* hl_pci_fini() - PCI finalization code.
* @hdev: Pointer to hl_device structure
*
* Unmap PCI bars and disable PCI device.
*/
void hl_pci_fini(struct hl_device *hdev)
{
hl_pci_bars_unmap(hdev);
pci_disable_device(hdev->pdev);
}
| linux-master | drivers/accel/habanalabs/common/pci/pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "../habanalabs.h"
#include "../../include/hw_ip/mmu/mmu_general.h"
#include <linux/slab.h>
#define MMU_V1_MAX_HOPS (MMU_HOP4 + 1)
static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = NULL;
hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
(unsigned long) hop_addr)
if (hop_addr == pgt_info->shadow_addr)
break;
return pgt_info;
}
static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{
struct hl_device *hdev = ctx->hdev;
gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
hdev->asic_prop.mmu_hop_table_size);
hash_del(&pgt_info->node);
kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
kfree(pgt_info);
}
static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
_free_hop(ctx, pgt_info);
}
static u64 alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pgt_info *pgt_info;
u64 phys_addr, shadow_addr;
pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
if (!pgt_info)
return ULLONG_MAX;
phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
prop->mmu_hop_table_size);
if (!phys_addr) {
dev_err(hdev->dev, "failed to allocate page\n");
goto pool_add_err;
}
shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
GFP_KERNEL);
if (!shadow_addr)
goto shadow_err;
pgt_info->phys_addr = phys_addr;
pgt_info->shadow_addr = shadow_addr;
pgt_info->ctx = ctx;
pgt_info->num_of_ptes = 0;
hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
return shadow_addr;
shadow_err:
gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
prop->mmu_hop_table_size);
pool_add_err:
kfree(pgt_info);
return ULLONG_MAX;
}
static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
{
return ctx->hdev->asic_prop.mmu_pgt_addr +
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
static inline u64 get_hop0_addr(struct hl_ctx *ctx)
{
return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
static void flush(struct hl_ctx *ctx)
{
/* flush all writes from all cores to reach PCI */
mb();
ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
}
/* transform the value to physical address when writing to H/W */
static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
{
/*
* The value to write is actually the address of the next shadow hop +
* flags at the 12 LSBs.
* Hence in order to get the value to write to the physical PTE, we
* clear the 12 LSBs and translate the shadow hop to its associated
* physical hop, and add back the original 12 LSBs.
*/
u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
(val & FLAGS_MASK);
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
get_phys_addr(ctx, shadow_pte_addr),
phys_val);
*(u64 *) (uintptr_t) shadow_pte_addr = val;
}
/* do not transform the value to physical address when writing to H/W */
static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
u64 val)
{
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
get_phys_addr(ctx, shadow_pte_addr),
val);
*(u64 *) (uintptr_t) shadow_pte_addr = val;
}
/* clear the last and present bits */
static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
{
/* no need to transform the value to physical address */
write_final_pte(ctx, pte_addr, 0);
}
static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
{
get_pgt_info(ctx, hop_addr)->num_of_ptes++;
}
/*
* put_pte - decrement the num of ptes and free the hop if possible
*
* @ctx: pointer to the context structure
* @hop_addr: addr of the hop
*
* This function returns the number of ptes left on this hop. If the number is
* 0, it means the pte was freed.
*/
static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
int num_of_ptes_left;
pgt_info->num_of_ptes--;
/*
* Need to save the number of ptes left because free_hop might free
* the pgt_info
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
_free_hop(ctx, pgt_info);
return num_of_ptes_left;
}
static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
{
u64 mask, shift;
mask = mmu_prop->hop_masks[hop_idx];
shift = mmu_prop->hop_shifts[hop_idx];
return hop_addr_arr[hop_idx] +
ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
bool *is_new_hop)
{
u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr == ULLONG_MAX) {
hop_addr = alloc_hop(ctx);
*is_new_hop = (hop_addr != ULLONG_MAX);
}
return hop_addr;
}
/* translates shadow address inside hop to a physical address */
static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
{
u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
u64 shadow_hop_addr = shadow_addr & ~page_mask;
u64 pte_offset = shadow_addr & page_mask;
u64 phys_hop_addr;
if (shadow_hop_addr != get_hop0_addr(ctx))
phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
else
phys_hop_addr = get_phys_hop0_addr(ctx);
return phys_hop_addr + pte_offset;
}
static int dram_default_mapping_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
hop2_pte_addr, hop3_pte_addr, pte_val;
int rc, i, j, hop3_allocated = 0;
if ((!prop->dram_supports_virtual_memory) ||
(!hdev->dram_default_page_mapping) ||
(ctx->asid == HL_KERNEL_ASID_ID))
return 0;
num_of_hop3 = prop->dram_size_for_default_page_mapping;
do_div(num_of_hop3, prop->dram_page_size);
do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
if (!ctx->dram_default_hops)
return -ENOMEM;
hop0_addr = get_hop0_addr(ctx);
hop1_addr = alloc_hop(ctx);
if (hop1_addr == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 1\n");
rc = -ENOMEM;
goto hop1_err;
}
ctx->dram_default_hops[total_hops - 1] = hop1_addr;
hop2_addr = alloc_hop(ctx);
if (hop2_addr == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 2\n");
rc = -ENOMEM;
goto hop2_err;
}
ctx->dram_default_hops[total_hops - 2] = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
ctx->dram_default_hops[i] = alloc_hop(ctx);
if (ctx->dram_default_hops[i] == ULLONG_MAX) {
dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
rc = -ENOMEM;
goto hop3_err;
}
hop3_allocated++;
}
/* need only pte 0 in hops 0 and 1 */
pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_addr, pte_val);
pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_addr, pte_val);
get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, pte_val);
get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
LAST_MASK | PAGE_PRESENT_MASK;
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
write_final_pte(ctx, hop3_pte_addr, pte_val);
get_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
flush(ctx);
return 0;
hop3_err:
for (i = 0 ; i < hop3_allocated ; i++)
free_hop(ctx, ctx->dram_default_hops[i]);
free_hop(ctx, hop2_addr);
hop2_err:
free_hop(ctx, hop1_addr);
hop1_err:
kfree(ctx->dram_default_hops);
return rc;
}
static void dram_default_mapping_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
hop2_pte_addr, hop3_pte_addr;
int i, j;
if ((!prop->dram_supports_virtual_memory) ||
(!hdev->dram_default_page_mapping) ||
(ctx->asid == HL_KERNEL_ASID_ID))
return;
num_of_hop3 = prop->dram_size_for_default_page_mapping;
do_div(num_of_hop3, prop->dram_page_size);
do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
hop0_addr = get_hop0_addr(ctx);
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
hop1_addr = ctx->dram_default_hops[total_hops - 1];
hop2_addr = ctx->dram_default_hops[total_hops - 2];
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
clear_pte(ctx, hop3_pte_addr);
put_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
}
}
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
clear_pte(ctx, hop2_pte_addr);
put_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
clear_pte(ctx, hop1_addr);
put_pte(ctx, hop1_addr);
clear_pte(ctx, hop0_addr);
kfree(ctx->dram_default_hops);
flush(ctx);
}
/**
* hl_mmu_v1_init() - initialize the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Create a pool of pages for pgt_infos.
* - Create a shadow table for pgt
*
* Return: 0 for success, non-zero for failure.
*/
static int hl_mmu_v1_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
hdev->mmu_priv.dr.mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
dev_err(hdev->dev, "Failed to create page gen pool\n");
return -ENOMEM;
}
rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
prop->mmu_hop0_tables_total_size,
prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
-1);
if (rc) {
dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
goto err_pool_add;
}
hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
rc = -ENOMEM;
goto err_pool_add;
}
/* MMU H/W init will be done in device hw_init() */
return 0;
err_pool_add:
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
return rc;
}
/**
* hl_mmu_v1_fini() - release the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Disable MMU in H/W.
* - Free the pgt_infos pool.
*
* All contexts should be freed before calling this function.
*/
static void hl_mmu_v1_fini(struct hl_device *hdev)
{
/* MMU H/W fini was already done in device hw_fini() */
if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
/* Make sure that if we arrive here again without init was
* called we won't cause kernel panic. This can happen for
* example if we fail during hard reset code at certain points
*/
hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
}
/**
* hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
* page tables hops related to this context.
* Return: 0 on success, non-zero otherwise.
*/
static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
{
hash_init(ctx->mmu_shadow_hash);
return dram_default_mapping_init(ctx);
}
/*
* hl_mmu_ctx_fini - disable a ctx from using the mmu module
*
* @ctx: pointer to the context structure
*
* This function does the following:
* - Free any pgts which were not freed yet
* - Free the mutex
* - Free DRAM default page mapping hops
*/
static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct pgt_info *pgt_info;
struct hlist_node *tmp;
int i;
dram_default_mapping_fini(ctx);
if (!hash_empty(ctx->mmu_shadow_hash))
dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
ctx->asid);
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
_free_hop(ctx, pgt_info);
}
}
static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
u64 virt_addr, bool is_dram_addr)
{
u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
bool is_huge, clear_hop3 = true;
int hop_idx;
/* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
if (hop_idx == MMU_HOP0) {
hop_addr[hop_idx] = get_hop0_addr(ctx);
} else {
hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr[hop_idx] == ULLONG_MAX)
goto not_mapped;
}
hop_pte_addr[hop_idx] =
get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
}
is_huge = curr_pte & mmu_prop->last_mask;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
return -EFAULT;
}
if (!is_huge) {
hop_idx = MMU_HOP4;
hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr[hop_idx] == ULLONG_MAX)
goto not_mapped;
hop_pte_addr[hop_idx] =
get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
clear_hop3 = false;
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
"DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
virt_addr);
goto not_mapped;
}
if (!(curr_pte & PAGE_PRESENT_MASK)) {
dev_err(hdev->dev,
"DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
virt_addr);
goto not_mapped;
}
hop_idx = MMU_HOP3;
write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
put_pte(ctx, hop_addr[hop_idx]);
} else {
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
if (hop_addr[MMU_HOP4])
clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
else
clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
clear_hop3 = true;
if (!clear_hop3)
goto mapped;
for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
clear_pte(ctx, hop_pte_addr[hop_idx]);
if (hop_idx == MMU_HOP0)
break;
if (put_pte(ctx, hop_addr[hop_idx]))
goto mapped;
}
}
mapped:
return 0;
not_mapped:
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
virt_addr);
return -EINVAL;
}
static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
u32 page_size, bool is_dram_addr)
{
u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
/*
* This mapping function can map a page or a huge page. For huge page
* there are only 3 hops rather than 4. Currently the DRAM allocation
* uses huge pages only but user memory could have been allocated with
* one of the two page sizes. Since this is a common code for all the
* three cases, we need this hugs page check.
*/
if (is_dram_addr) {
mmu_prop = &prop->dmmu;
is_huge = true;
} else if (page_size == prop->pmmu_huge.page_size) {
mmu_prop = &prop->pmmu_huge;
is_huge = true;
} else {
mmu_prop = &prop->pmmu;
is_huge = false;
}
num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
if (hop_idx == MMU_HOP0) {
hop_addr[hop_idx] = get_hop0_addr(ctx);
} else {
hop_addr[hop_idx] =
get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
if (hop_addr[hop_idx] == ULLONG_MAX)
goto err;
}
hop_pte_addr[hop_idx] =
get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
dev_err(hdev->dev,
"DRAM: mapping already exists for virt_addr 0x%llx\n",
virt_addr);
rc = -EINVAL;
goto err;
}
for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
if (hop_new[hop_idx]) {
dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
rc = -EFAULT;
goto err;
}
}
} else if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev,
"mapping already exists for virt_addr 0x%llx\n",
virt_addr);
for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
*(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
hop_pte_addr[hop_idx]);
rc = -EINVAL;
goto err;
}
curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
prev_hop = hop_idx - 1;
if (hop_new[hop_idx]) {
curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
if (hop_idx != MMU_HOP1)
get_pte(ctx, hop_addr[prev_hop]);
}
}
get_pte(ctx, hop_addr[num_hops - 1]);
return 0;
err:
for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
if (hop_new[hop_idx])
free_hop(ctx, hop_addr[hop_idx]);
}
return rc;
}
/*
* hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
{
}
/*
* hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
{
}
static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
int i, used_hops;
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
prop->pmmu.start_addr,
prop->pmmu.end_addr);
is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
prop->pmmu_huge.page_size,
prop->pmmu_huge.start_addr,
prop->pmmu_huge.end_addr);
if (is_dram_addr) {
mmu_prop = &prop->dmmu;
is_huge = true;
} else if (is_pmmu_addr) {
mmu_prop = &prop->pmmu;
is_huge = false;
} else if (is_pmmu_h_addr) {
mmu_prop = &prop->pmmu_huge;
is_huge = true;
} else {
return -EINVAL;
}
used_hops = mmu_prop->num_hops;
/* huge pages use lesser hops */
if (is_huge)
used_hops--;
hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
hops->hop_info[0].hop_pte_addr =
hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
hops->hop_info[0].hop_addr, virt_addr);
hops->hop_info[0].hop_pte_val =
hdev->asic_funcs->read_pte(hdev,
hops->hop_info[0].hop_pte_addr);
for (i = 1 ; i < used_hops ; i++) {
hops->hop_info[i].hop_addr =
hl_mmu_get_next_hop_addr(ctx,
hops->hop_info[i - 1].hop_pte_val);
if (hops->hop_info[i].hop_addr == ULLONG_MAX)
return -EFAULT;
hops->hop_info[i].hop_pte_addr =
hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops->hop_info[i].hop_addr,
virt_addr);
hops->hop_info[i].hop_pte_val =
hdev->asic_funcs->read_pte(hdev,
hops->hop_info[i].hop_pte_addr);
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
break;
}
/* if passed over all hops then no last hop was found */
if (i == mmu_prop->num_hops)
return -EFAULT;
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
hops->used_hops = i + 1;
return 0;
}
/*
* hl_mmu_v1_prepare - prepare mmu for working with mmu v1
*
* @hdev: pointer to the device structure
*/
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
{
mmu->init = hl_mmu_v1_init;
mmu->fini = hl_mmu_v1_fini;
mmu->ctx_init = hl_mmu_v1_ctx_init;
mmu->ctx_fini = hl_mmu_v1_ctx_fini;
mmu->map = hl_mmu_v1_map;
mmu->unmap = hl_mmu_v1_unmap;
mmu->flush = flush;
mmu->swap_out = hl_mmu_v1_swap_out;
mmu->swap_in = hl_mmu_v1_swap_in;
mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
}
| linux-master | drivers/accel/habanalabs/common/mmu/mmu_v1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "../habanalabs.h"
#include "../../include/hw_ip/mmu/mmu_general.h"
#include <linux/slab.h>
static struct pgt_info *hl_mmu_v2_hr_get_pgt_info(struct hl_ctx *ctx, u64 phys_hop_addr)
{
struct pgt_info *pgt_info = NULL;
hash_for_each_possible(ctx->hr_mmu_phys_hash, pgt_info, node,
(unsigned long) phys_hop_addr)
if (phys_hop_addr == pgt_info->phys_addr)
break;
return pgt_info;
}
static void hl_mmu_v2_hr_add_pgt_info(struct hl_ctx *ctx, struct pgt_info *pgt_info,
dma_addr_t phys_addr)
{
hash_add(ctx->hr_mmu_phys_hash, &pgt_info->node, phys_addr);
}
static struct pgt_info *hl_mmu_v2_hr_get_hop0_pgt_info(struct hl_ctx *ctx)
{
return &ctx->hdev->mmu_priv.hr.mmu_asid_hop0[ctx->asid];
}
/**
* hl_mmu_v2_hr_init() - initialize the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Create a pool of pages for pgt_infos.
* - Create a shadow table for pgt
*
* Return: 0 for success, non-zero for failure.
*/
static inline int hl_mmu_v2_hr_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size,
prop->mmu_pgt_size);
}
/**
* hl_mmu_v2_hr_fini() - release the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Disable MMU in H/W.
* - Free the pgt_infos pool.
*
* All contexts should be freed before calling this function.
*/
static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size);
}
/**
* hl_mmu_v2_hr_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
* page tables hops related to this context.
* Return: 0 on success, non-zero otherwise.
*/
static int hl_mmu_v2_hr_ctx_init(struct hl_ctx *ctx)
{
hash_init(ctx->hr_mmu_phys_hash);
return 0;
}
/*
* hl_mmu_v2_hr_ctx_fini - disable a ctx from using the mmu module
*
* @ctx: pointer to the context structure
*
* This function does the following:
* - Free any pgts which were not freed yet
* - Free the mutex
* - Free DRAM default page mapping hops
*/
static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct pgt_info *pgt_info;
struct hlist_node *tmp;
int i;
if (!hash_empty(ctx->hr_mmu_phys_hash))
dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
ctx->asid);
hash_for_each_safe(ctx->hr_mmu_phys_hash, i, tmp, pgt_info, node) {
dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr,
ctx->hdev->asic_prop.mmu_hop_table_size);
}
}
static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
u64 virt_addr, bool is_dram_addr)
{
u64 curr_pte, scrambled_virt_addr, hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 };
struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop;
struct hl_mmu_properties *mmu_prop;
bool is_huge = false;
int i, hop_last;
prop = &hdev->asic_prop;
/* shifts and masks are the same in PMMU and HMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop_last = mmu_prop->num_hops - 1;
scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
curr_pte = 0;
for (i = 0 ; i < mmu_prop->num_hops ; i++) {
/* we get HOP0 differently, it doesn't need curr_pte */
if (i == 0)
hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
else
hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx,
&ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs, curr_pte);
if (!hops_pgt_info[i])
goto not_mapped;
hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops_pgt_info[i]->phys_addr,
scrambled_virt_addr);
if (hop_pte_phys_addr[i] == U64_MAX)
return -EFAULT;
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size);
if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) {
hop_last = i;
is_huge = true;
break;
}
}
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
return -EFAULT;
}
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
for (i = hop_last ; i > 0 ; i--) {
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size);
if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
ctx->hdev->asic_prop.mmu_hop_table_size))
goto mapped;
}
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0],
ctx->hdev->asic_prop.mmu_hop_table_size);
mapped:
return 0;
not_mapped:
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", virt_addr);
return -EINVAL;
}
static int hl_mmu_v2_get_last_hop(struct hl_mmu_properties *mmu_prop, u32 page_size)
{
int hop;
for (hop = (mmu_prop->num_hops - 1); hop; hop--) {
if (mmu_prop->hop_shifts[hop] == 0)
continue;
if (page_size <= (1 << mmu_prop->hop_shifts[hop]))
break;
}
return hop;
}
static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
u64 virt_addr, u64 phys_addr,
u32 page_size, bool is_dram_addr)
{
u64 hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 },
curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
bool hop_new[MMU_ARCH_6_HOPS] = { false };
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
int i, hop_last, rc = -ENOMEM;
/*
* This mapping function can map a page or a huge page. For huge page
* there are only 4 hops rather than 5. Currently the DRAM allocation
* uses huge pages only but user memory could have been allocated with
* one of the two page sizes. Since this is a common code for all the
* three cases, we need this hugs page check.
*/
if (is_dram_addr)
mmu_prop = &prop->dmmu;
else if (page_size == prop->pmmu_huge.page_size)
mmu_prop = &prop->pmmu_huge;
else
mmu_prop = &prop->pmmu;
hop_last = hl_mmu_v2_get_last_hop(mmu_prop, page_size);
if (hop_last <= 0) {
dev_err(ctx->hdev->dev, "Invalid last HOP %d\n", hop_last);
return -EFAULT;
}
scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
for (i = 0 ; i <= hop_last ; i++) {
if (i == 0)
hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
else
hops_pgt_info[i] = hl_mmu_hr_get_alloc_next_hop(ctx,
&ctx->hdev->mmu_priv.hr,
&ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
mmu_prop, curr_pte, &hop_new[i]);
if (!hops_pgt_info[i])
goto err;
hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops_pgt_info[i]->phys_addr,
scrambled_virt_addr);
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size);
}
if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev, "mapping already exists for virt_addr 0x%llx\n",
scrambled_virt_addr);
for (i = 0 ; i <= hop_last ; i++)
dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
i,
*(u64 *) (uintptr_t)
hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size),
hop_pte_phys_addr[i]);
rc = -EINVAL;
goto err;
}
curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
/* Write the PTEs */
hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte,
ctx->hdev->asic_prop.mmu_hop_table_size);
/* for each new hop, add its address to the table of previous-hop */
for (i = 1 ; i <= hop_last ; i++) {
if (hop_new[i]) {
curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1],
curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size);
if (i - 1)
hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
hops_pgt_info[i - 1]->phys_addr);
}
}
hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
hops_pgt_info[hop_last]->phys_addr);
return 0;
err:
for (i = 1 ; i <= hop_last ; i++)
if (hop_new[i] && hops_pgt_info[i])
hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
ctx->hdev->asic_prop.mmu_hop_table_size);
return rc;
}
/*
* hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v2_hr_swap_out(struct hl_ctx *ctx)
{
}
/*
* hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v2_hr_swap_in(struct hl_ctx *ctx)
{
}
static int hl_mmu_v2_hr_get_tlb_mapping_params(struct hl_device *hdev,
struct hl_mmu_properties **mmu_prop,
struct hl_mmu_hop_info *hops,
u64 virt_addr, bool *is_huge)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr;
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
prop->pmmu.start_addr,
prop->pmmu.end_addr);
is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
prop->pmmu_huge.page_size,
prop->pmmu_huge.start_addr,
prop->pmmu_huge.end_addr);
if (is_dram_addr) {
*mmu_prop = &prop->dmmu;
*is_huge = true;
hops->range_type = HL_VA_RANGE_TYPE_DRAM;
} else if (is_pmmu_addr) {
*mmu_prop = &prop->pmmu;
*is_huge = false;
hops->range_type = HL_VA_RANGE_TYPE_HOST;
} else if (is_pmmu_h_addr) {
*mmu_prop = &prop->pmmu_huge;
*is_huge = true;
hops->range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
} else {
return -EINVAL;
}
return 0;
}
static int hl_mmu_v2_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops)
{
return hl_mmu_hr_get_tlb_info(ctx, virt_addr, hops,
&ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs);
}
/*
* hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
*
* @hdev: pointer to the device structure
* @mmu_if: pointer to the mmu interface structure
*/
void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
{
mmu->init = hl_mmu_v2_hr_init;
mmu->fini = hl_mmu_v2_hr_fini;
mmu->ctx_init = hl_mmu_v2_hr_ctx_init;
mmu->ctx_fini = hl_mmu_v2_hr_ctx_fini;
mmu->map = _hl_mmu_v2_hr_map;
mmu->unmap = _hl_mmu_v2_hr_unmap;
mmu->flush = hl_mmu_hr_flush;
mmu->swap_out = hl_mmu_v2_hr_swap_out;
mmu->swap_in = hl_mmu_v2_hr_swap_in;
mmu->get_tlb_info = hl_mmu_v2_hr_get_tlb_info;
mmu->hr_funcs.get_hop0_pgt_info = hl_mmu_v2_hr_get_hop0_pgt_info;
mmu->hr_funcs.get_pgt_info = hl_mmu_v2_hr_get_pgt_info;
mmu->hr_funcs.add_pgt_info = hl_mmu_v2_hr_add_pgt_info;
mmu->hr_funcs.get_tlb_mapping_params = hl_mmu_v2_hr_get_tlb_mapping_params;
}
| linux-master | drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include <linux/slab.h>
#include "../habanalabs.h"
#include <trace/events/habanalabs.h>
/**
* hl_mmu_get_funcs() - get MMU functions structure
* @hdev: habanalabs device structure.
* @pgt_residency: page table residency.
* @is_dram_addr: true if we need HMMU functions
*
* @return appropriate MMU functions structure
*/
static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
bool is_dram_addr)
{
return &hdev->mmu_func[pgt_residency];
}
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
}
/**
* hl_mmu_init() - initialize the MMU module.
* @hdev: habanalabs device structure.
*
* Return: 0 for success, non-zero for failure.
*/
int hl_mmu_init(struct hl_device *hdev)
{
int rc = -EOPNOTSUPP;
if (hdev->mmu_disable)
return 0;
mutex_init(&hdev->mmu_lock);
if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
if (rc)
return rc;
}
if (hdev->mmu_func[MMU_HR_PGT].init != NULL) {
rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
if (rc)
goto fini_dr_mmu;
}
return 0;
fini_dr_mmu:
if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
hdev->mmu_func[MMU_DR_PGT].fini(hdev);
return rc;
}
/**
* hl_mmu_fini() - release the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Disable MMU in H/W.
* - Free the pgt_infos pool.
*
* All contexts should be freed before calling this function.
*/
void hl_mmu_fini(struct hl_device *hdev)
{
if (hdev->mmu_disable)
return;
if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
hdev->mmu_func[MMU_DR_PGT].fini(hdev);
if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
hdev->mmu_func[MMU_HR_PGT].fini(hdev);
mutex_destroy(&hdev->mmu_lock);
}
/**
* hl_mmu_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
* page tables hops related to this context.
* Return: 0 on success, non-zero otherwise.
*/
int hl_mmu_ctx_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
int rc = -EOPNOTSUPP;
if (hdev->mmu_disable)
return 0;
if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
if (rc)
return rc;
}
if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) {
rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
if (rc)
goto fini_dr_ctx;
}
return 0;
fini_dr_ctx:
if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
hdev->mmu_func[MMU_DR_PGT].fini(hdev);
return rc;
}
/*
* hl_mmu_ctx_fini - disable a ctx from using the mmu module
*
* @ctx: pointer to the context structure
*
* This function does the following:
* - Free any pgts which were not freed yet
* - Free the mutex
* - Free DRAM default page mapping hops
*/
void hl_mmu_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
if (hdev->mmu_disable)
return;
if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL)
hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx);
if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
}
/*
* hl_mmu_get_real_page_size - get real page size to use in map/unmap operation
*
* @hdev: pointer to device data.
* @mmu_prop: MMU properties.
* @page_size: page size
* @real_page_size: set here the actual page size to use for the operation
* @is_dram_addr: true if DRAM address, otherwise false.
*
* @return 0 on success, otherwise non 0 error code
*
* note that this is general implementation that can fit most MMU arch. but as this is used as an
* MMU function:
* 1. it shall not be called directly- only from mmu_func structure instance
* 2. each MMU may modify the implementation internally
*/
int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
u32 page_size, u32 *real_page_size, bool is_dram_addr)
{
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and map them separately.
*/
if ((page_size % mmu_prop->page_size) == 0) {
*real_page_size = mmu_prop->page_size;
return 0;
}
dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
bool is_dram_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (is_dram_addr)
return &prop->dmmu;
else if ((page_size % prop->pmmu_huge.page_size) == 0)
return &prop->pmmu_huge;
return &prop->pmmu;
}
/*
* hl_mmu_unmap_page - unmaps a virtual addr
*
* @ctx: pointer to the context structure
* @virt_addr: virt addr to map from
* @page_size: size of the page to unmap
* @flush_pte: whether to do a PCI flush
*
* This function does the following:
* - Check that the virt addr is mapped
* - Unmap the virt addr and frees pgts if possible
* - Returns 0 on success, -EINVAL if the given addr is not mapped
*
* Because this function changes the page tables in the device and because it
* changes the MMU hash, it must be protected by a lock.
* However, because it maps only a single page, the lock should be implemented
* in a higher level in order to protect the entire mapping of the memory area
*
* For optimization reasons PCI flush may be requested once after unmapping of
* large area.
*/
int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
struct hl_mmu_properties *mmu_prop;
struct hl_mmu_funcs *mmu_funcs;
int i, pgt_residency, rc = 0;
u32 real_page_size, npages;
u64 real_virt_addr;
bool is_dram_addr;
if (hdev->mmu_disable)
return 0;
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
is_dram_addr);
if (rc)
return rc;
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
break;
real_virt_addr += real_page_size;
}
if (flush_pte)
mmu_funcs->flush(ctx);
if (trace_habanalabs_mmu_unmap_enabled() && !rc)
trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
return rc;
}
/*
* hl_mmu_map_page - maps a virtual addr to physical addr
*
* @ctx: pointer to the context structure
* @virt_addr: virt addr to map from
* @phys_addr: phys addr to map to
* @page_size: physical page size
* @flush_pte: whether to do a PCI flush
*
* This function does the following:
* - Check that the virt addr is not mapped
* - Allocate pgts as necessary in order to map the virt addr to the phys
* - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
*
* Because this function changes the page tables in the device and because it
* changes the MMU hash, it must be protected by a lock.
* However, because it maps only a single page, the lock should be implemented
* in a higher level in order to protect the entire mapping of the memory area
*
* For optimization reasons PCI flush may be requested once after mapping of
* large area.
*/
int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
bool flush_pte)
{
int i, rc, pgt_residency, mapped_cnt = 0;
struct hl_device *hdev = ctx->hdev;
struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
struct hl_mmu_funcs *mmu_funcs;
u32 real_page_size, npages;
bool is_dram_addr;
if (hdev->mmu_disable)
return 0;
is_dram_addr = hl_is_dram_va(hdev, virt_addr);
mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
is_dram_addr);
if (rc)
return rc;
/*
* Verify that the phys and virt addresses are aligned with the
* MMU page size (in dram this means checking the address and MMU
* after scrambling)
*/
if ((is_dram_addr &&
((hdev->asic_funcs->scramble_addr(hdev, phys_addr) &
(mmu_prop->page_size - 1)) ||
(hdev->asic_funcs->scramble_addr(hdev, virt_addr) &
(mmu_prop->page_size - 1)))) ||
(!is_dram_addr && ((phys_addr & (real_page_size - 1)) ||
(virt_addr & (real_page_size - 1)))))
dev_crit(hdev->dev,
"Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size",
phys_addr, virt_addr, real_page_size);
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size,
is_dram_addr);
if (rc)
goto err;
real_virt_addr += real_page_size;
real_phys_addr += real_page_size;
mapped_cnt++;
}
if (flush_pte)
mmu_funcs->flush(ctx);
trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
return 0;
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
real_virt_addr += real_page_size;
}
mmu_funcs->flush(ctx);
return rc;
}
/*
* hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page
* for mapping contiguous physical memory
*
* @ctx: pointer to the context structure
* @virt_addr: virt addr to map from
* @phys_addr: phys addr to map to
* @size: size to map
*
*/
int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
u64 phys_addr, u32 size)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 curr_va, curr_pa;
u32 page_size;
bool flush_pte;
int rc = 0, off;
if (hl_mem_area_inside_range(virt_addr, size,
prop->dmmu.start_addr, prop->dmmu.end_addr))
page_size = prop->dmmu.page_size;
else if (hl_mem_area_inside_range(virt_addr, size,
prop->pmmu.start_addr, prop->pmmu.end_addr))
page_size = prop->pmmu.page_size;
else if (hl_mem_area_inside_range(virt_addr, size,
prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
page_size = prop->pmmu_huge.page_size;
else
return -EINVAL;
for (off = 0 ; off < size ; off += page_size) {
curr_va = virt_addr + off;
curr_pa = phys_addr + off;
flush_pte = (off + page_size) >= size;
rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size,
flush_pte);
if (rc) {
dev_err(hdev->dev,
"Map failed for va 0x%llx to pa 0x%llx\n",
curr_va, curr_pa);
/* last mapping failed so don't try to unmap it - reduce off by page_size */
off -= page_size;
goto unmap;
}
}
return rc;
unmap:
for (; off >= 0 ; off -= page_size) {
curr_va = virt_addr + off;
flush_pte = (off - (s32) page_size) < 0;
if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va 0x%llx\n", curr_va);
}
return rc;
}
/*
* hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page
* for unmapping contiguous physical memory
*
* @ctx: pointer to the context structure
* @virt_addr: virt addr to unmap
* @size: size to unmap
*
*/
int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 curr_va;
u32 page_size;
bool flush_pte;
int rc = 0, off;
if (hl_mem_area_inside_range(virt_addr, size,
prop->dmmu.start_addr, prop->dmmu.end_addr))
page_size = prop->dmmu.page_size;
else if (hl_mem_area_inside_range(virt_addr, size,
prop->pmmu.start_addr, prop->pmmu.end_addr))
page_size = prop->pmmu.page_size;
else if (hl_mem_area_inside_range(virt_addr, size,
prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
page_size = prop->pmmu_huge.page_size;
else
return -EINVAL;
for (off = 0 ; off < size ; off += page_size) {
curr_va = virt_addr + off;
flush_pte = (off + page_size) >= size;
rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte);
if (rc)
dev_warn_ratelimited(hdev->dev,
"Unmap failed for va 0x%llx\n", curr_va);
}
return rc;
}
static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops,
u64 *phys_addr)
{
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr;
struct hl_mmu_properties *mmu_prop;
/* last hop holds the phys address and flags */
if (hops->unscrambled_paddr)
tmp_phys_addr = hops->unscrambled_paddr;
else
tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val;
if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE)
mmu_prop = &prop->pmmu_huge;
else if (hops->range_type == HL_VA_RANGE_TYPE_HOST)
mmu_prop = &prop->pmmu;
else /* HL_VA_RANGE_TYPE_DRAM */
mmu_prop = &prop->dmmu;
if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
!is_power_of_2(prop->dram_page_size)) {
u64 dram_page_size, dram_base, abs_phys_addr, abs_virt_addr,
page_id, page_start;
u32 page_off;
/*
* Bit arithmetic cannot be used for non power of two page
* sizes. In addition, since bit arithmetic is not used,
* we cannot ignore dram base. All that shall be considered.
*/
dram_page_size = prop->dram_page_size;
dram_base = prop->dram_base_address;
abs_phys_addr = tmp_phys_addr - dram_base;
abs_virt_addr = virt_addr - dram_base;
page_id = DIV_ROUND_DOWN_ULL(abs_phys_addr, dram_page_size);
page_start = page_id * dram_page_size;
div_u64_rem(abs_virt_addr, dram_page_size, &page_off);
*phys_addr = page_start + page_off + dram_base;
} else {
/*
* find the correct hop shift field in hl_mmu_properties
* structure in order to determine the right masks
* for the page offset.
*/
hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1];
offset_mask = (1ull << hop_shift) - 1;
addr_mask = ~(offset_mask);
*phys_addr = (tmp_phys_addr & addr_mask) |
(virt_addr & offset_mask);
}
}
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
{
struct hl_mmu_hop_info hops;
int rc;
memset(&hops, 0, sizeof(hops));
rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
if (rc)
return rc;
hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops, phys_addr);
return 0;
}
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop;
struct hl_mmu_properties *mmu_prop;
struct hl_mmu_funcs *mmu_funcs;
int pgt_residency, rc;
bool is_dram_addr;
if (hdev->mmu_disable)
return -EOPNOTSUPP;
prop = &hdev->asic_prop;
hops->scrambled_vaddr = virt_addr; /* assume no scrambling */
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
/* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
mutex_lock(&hdev->mmu_lock);
rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
mutex_unlock(&hdev->mmu_lock);
if (rc)
return rc;
/* add page offset to physical address */
if (hops->unscrambled_paddr)
hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr);
return 0;
}
int hl_mmu_if_set_funcs(struct hl_device *hdev)
{
if (hdev->mmu_disable)
return 0;
switch (hdev->asic_type) {
case ASIC_GOYA:
case ASIC_GAUDI:
case ASIC_GAUDI_SEC:
hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
break;
case ASIC_GAUDI2:
case ASIC_GAUDI2B:
/* MMUs in Gaudi2 are always host resident */
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
return -EOPNOTSUPP;
}
return 0;
}
/**
* hl_mmu_scramble_addr() - The generic mmu address scrambling routine.
* @hdev: pointer to device data.
* @addr: The address to scramble.
*
* Return: The scrambled address.
*/
u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
{
return addr;
}
/**
* hl_mmu_descramble_addr() - The generic mmu address descrambling
* routine.
* @hdev: pointer to device data.
* @addr: The address to descramble.
*
* Return: The un-scrambled address.
*/
u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
{
return addr;
}
int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
{
int rc;
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
if (rc)
dev_err_ratelimited(hdev->dev,
"%s cache invalidation failed, rc=%d\n",
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
return rc;
}
int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
u32 flags, u32 asid, u64 va, u64 size)
{
int rc;
rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
asid, va, size);
if (rc)
dev_err_ratelimited(hdev->dev,
"%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
return rc;
}
static void hl_mmu_prefetch_work_function(struct work_struct *work)
{
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work);
struct hl_ctx *ctx = pfw->ctx;
struct hl_device *hdev = ctx->hdev;
if (!hl_device_operational(hdev, NULL))
goto put_ctx;
mutex_lock(&hdev->mmu_lock);
hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, pfw->va, pfw->size);
mutex_unlock(&hdev->mmu_lock);
put_ctx:
/*
* context was taken in the common mmu prefetch function- see comment there about
* context handling.
*/
hl_ctx_put(ctx);
kfree(pfw);
}
int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
{
struct hl_prefetch_work *handle_prefetch_work;
handle_prefetch_work = kmalloc(sizeof(*handle_prefetch_work), GFP_KERNEL);
if (!handle_prefetch_work)
return -ENOMEM;
INIT_WORK(&handle_prefetch_work->prefetch_work, hl_mmu_prefetch_work_function);
handle_prefetch_work->ctx = ctx;
handle_prefetch_work->va = va;
handle_prefetch_work->size = size;
handle_prefetch_work->flags = flags;
handle_prefetch_work->asid = asid;
/*
* as actual prefetch is done in a WQ we must get the context (and put it
* at the end of the work function)
*/
hl_ctx_get(ctx);
queue_work(ctx->hdev->prefetch_wq, &handle_prefetch_work->prefetch_work);
return 0;
}
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX;
}
/**
* hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP
* @ctx: pointer to the context structure to initialize.
* @mmu_prop: MMU properties.
* @hop_idx: HOP index.
* @hop_addr: HOP address.
* @virt_addr: virtual address for the translation.
*
* @return the matching PTE value on success, otherwise U64_MAX.
*/
u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u8 hop_idx, u64 hop_addr, u64 virt_addr)
{
u64 mask, shift;
if (hop_idx >= mmu_prop->num_hops) {
dev_err_ratelimited(ctx->hdev->dev, "Invalid hop index %d\n", hop_idx);
return U64_MAX;
}
shift = mmu_prop->hop_shifts[hop_idx];
mask = mmu_prop->hop_masks[hop_idx];
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
struct gen_pool_chunk *chunk,
void *data)
{
struct hl_device *hdev = data;
hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
(void *)chunk->start_addr, chunk->phys_addr);
}
void hl_mmu_hr_flush(struct hl_ctx *ctx)
{
/* a flush operation requires memory barrier */
mb();
}
/**
* hl_mmu_hr_pool_destroy() - destroy genpool
* @hdev: habanalabs device structure.
* @hr_priv: MMU HR private data.
* @hop_table_size: HOP table size.
*
* This function does the following:
* - free entries allocated for shadow HOP0
* - free pool chunks
* - free pool
*/
static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv,
u32 hop_table_size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gen_pool **pool = &hr_priv->mmu_pgt_pool;
struct pgt_info *hop0_pgt;
int asid;
if (ZERO_OR_NULL_PTR(*pool))
return;
/* Free the Fixed allocation of HOPs0 */
if (hr_priv->mmu_asid_hop0) {
for (asid = 0 ; asid < prop->max_asid ; asid++) {
hop0_pgt = &hr_priv->mmu_asid_hop0[asid];
if (ZERO_OR_NULL_PTR(hop0_pgt->virt_addr))
continue;
gen_pool_free(*pool, (uintptr_t) hop0_pgt->virt_addr, hop_table_size);
}
}
gen_pool_for_each_chunk(*pool, mmu_dma_mem_free_from_chunk, hdev);
gen_pool_destroy(*pool);
/* Make sure that if we arrive here again without init was called we
* won't cause kernel panic. This can happen for example if we fail
* during hard reset code at certain points
*/
*pool = NULL;
}
/**
* hl_mmu_hr_init() - initialize the MMU module.
* @hdev: habanalabs device structure.
* @hr_priv: MMU HR private data.
* @hop_table_size: HOP table size.
* @pgt_size: memory size allocated for the page table
*
* @return 0 on success otherwise non-zero error code
*
* This function does the following:
* - Create a pool of pages for pgt_infos.
* - Create a shadow table for pgt
*/
int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
u64 pgt_size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
size_t pool_chunk_size = SZ_4M;
struct pgt_info *hop0_pgt;
dma_addr_t dma_addr;
u64 virt_addr;
int i, rc;
/*
* we set alloc size as PAGE_SIZE (sine dma_alloc_coherent allocation order/size is
* PAGE_SHIFT/PAGE_SIZE) in order to be able to control the allocations alignment.
* This way we can call "DMA alloc align" according to dma_alloc granularity and supply
* allocations with higher-order alignment restrictions
*/
hr_priv->mmu_pgt_pool = gen_pool_create(PAGE_SHIFT, -1);
if (ZERO_OR_NULL_PTR(hr_priv->mmu_pgt_pool)) {
dev_err(hdev->dev, "Failed to create hr page pool\n");
return -ENOMEM;
}
hr_priv->mmu_asid_hop0 = kvcalloc(prop->max_asid, sizeof(struct pgt_info), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
rc = -ENOMEM;
goto destroy_mmu_pgt_pool;
}
for (i = 0 ; i < pgt_size ; i += pool_chunk_size) {
virt_addr = (uintptr_t) hl_asic_dma_alloc_coherent(hdev, pool_chunk_size,
&dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (ZERO_OR_NULL_PTR(virt_addr)) {
dev_err(hdev->dev,
"Failed to allocate memory for host-resident page pool\n");
rc = -ENOMEM;
goto destroy_mmu_pgt_pool;
}
rc = gen_pool_add_virt(hr_priv->mmu_pgt_pool, virt_addr, (phys_addr_t) dma_addr,
pool_chunk_size, -1);
if (rc) {
dev_err(hdev->dev, "Failed to fill host-resident page pool\n");
goto destroy_mmu_pgt_pool;
}
}
for (i = 0 ; i < prop->max_asid ; i++) {
hop0_pgt = &hr_priv->mmu_asid_hop0[i];
hop0_pgt->virt_addr = (uintptr_t)
gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
hop_table_size,
(dma_addr_t *) &hop0_pgt->phys_addr,
hop_table_size);
if (!hop0_pgt->virt_addr) {
dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n");
rc = -ENOMEM;
goto destroy_mmu_pgt_pool;
}
}
/* MMU H/W init will be done in device hw_init() */
return 0;
destroy_mmu_pgt_pool:
hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0))
kvfree(hr_priv->mmu_asid_hop0);
return rc;
}
/**
* hl_mmu_hr_fini() - release the MMU module.
* @hdev: habanalabs device structure.
* @hr_priv: MMU host resident private info.
* @hop_table_size: HOP table size
*
* This function does the following:
* - Disable MMU in H/W.
* - Free the pgt_infos pool.
*
* All contexts should be freed before calling this function.
*/
void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size)
{
/* MMU H/W fini was already done in device hw_fini() */
hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
kvfree(hr_priv->mmu_asid_hop0);
/* Make sure that if we arrive here again without init was
* called we won't cause kernel panic. This can happen for
* example if we fail during hard reset code at certain points
*/
hr_priv->mmu_asid_hop0 = NULL;
}
}
/**
* hl_mmu_hr_free_hop_remove_pgt() - free HOP and remove PGT from hash
* @pgt_info: page table info structure.
* @hr_priv: MMU HR private data.
* @hop_table_size: HOP table size.
*/
void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
u32 hop_table_size)
{
gen_pool_free(hr_priv->mmu_pgt_pool, pgt_info->virt_addr, hop_table_size);
hash_del(&pgt_info->node);
kfree(pgt_info);
}
/**
* hl_mmu_hr_pte_phys_to_virt() - translate PTE phys addr to virt addr
* @ctx: pointer to the context structure
* @pgt: pgt_info for the HOP hosting the PTE
* @phys_pte_addr: phys address of the PTE
* @hop_table_size: HOP table size
*
* @return PTE virtual address
*
* The function use the pgt_info to get HOP base virt addr and obtain the PTE's virt addr
* by adding the PTE offset.
*/
u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt,
u64 phys_pte_addr, u32 hop_table_size)
{
u64 page_mask = (hop_table_size - 1);
u64 pte_offset = phys_pte_addr & page_mask;
return pgt->virt_addr + pte_offset;
}
/**
* hl_mmu_hr_write_pte() - write HR PTE
* @ctx: pointer to the context structure
* @pgt_info: HOP's page table info structure
* @phys_pte_addr: phys PTE address
* @val: raw PTE data
* @hop_table_size: HOP table size
*/
void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
u64 val, u32 hop_table_size)
{
/*
* The value to write is the phys address of the next hop +
* flags at the 12 LSBs.
*/
u64 virt_addr = hl_mmu_hr_pte_phys_to_virt(ctx, pgt_info, phys_pte_addr, hop_table_size);
*((u64 *) (uintptr_t) virt_addr) = val;
}
/**
* hl_mmu_hr_clear_pte() - clear HR PTE
* @ctx: pointer to the context structure
* @pgt_info: HOP's page table info structure
* @phys_pte_addr: phys PTE address
* @hop_table_size: HOP table size
*/
void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
u32 hop_table_size)
{
/* no need to transform the value to physical address */
hl_mmu_hr_write_pte(ctx, pgt_info, phys_pte_addr, 0, hop_table_size);
}
/**
* hl_mmu_hr_put_pte() - put HR PTE and remove it if necessary (no more PTEs)
* @ctx: pointer to the context structure
* @pgt_info: HOP's page table info structure
* @hr_priv: HR MMU private info
* @hop_table_size: HOP table size
*
* @return number of PTEs still in the HOP
*/
int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info,
struct hl_mmu_hr_priv *hr_priv,
u32 hop_table_size)
{
int num_of_ptes_left;
pgt_info->num_of_ptes--;
/*
* Need to save the number of ptes left because free_hop might free
* the pgt_info
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
hl_mmu_hr_free_hop_remove_pgt(pgt_info, hr_priv, hop_table_size);
return num_of_ptes_left;
}
/**
* hl_mmu_hr_get_pte() - increase PGT PTE count
* @ctx: pointer to the context structure
* @hr_func: host resident functions
* @phys_hop_addr: HOP phys address
*/
void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr)
{
hr_func->get_pgt_info(ctx, phys_hop_addr)->num_of_ptes++;
}
/**
* hl_mmu_hr_get_next_hop_pgt_info() - get pgt_info structure for the next HOP
* @ctx: pointer to the context structure.
* @hr_func: host resident functions.
* @curr_pte: current PTE value.
*
* @return pgt_info structure on success, otherwise NULL.
*/
struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
struct hl_hr_mmu_funcs *hr_func,
u64 curr_pte)
{
u64 next_hop_phys_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (next_hop_phys_addr == ULLONG_MAX)
return NULL;
return hr_func->get_pgt_info(ctx, next_hop_phys_addr);
}
/**
* hl_mmu_hr_alloc_hop() - allocate HOP
* @ctx: pointer to the context structure.
* @hr_priv: host resident private info structure.
* @hr_func: host resident functions.
* @mmu_prop: MMU properties.
*
* @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
*/
struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
struct hl_hr_mmu_funcs *hr_func,
struct hl_mmu_properties *mmu_prop)
{
struct hl_device *hdev = ctx->hdev;
struct pgt_info *pgt_info;
dma_addr_t phys_addr;
void *virt_addr;
int i, retry = 1;
pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
if (!pgt_info)
return NULL;
for (i = 0; i <= retry; i++) {
virt_addr = gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
mmu_prop->hop_table_size,
&phys_addr,
mmu_prop->hop_table_size);
if (virt_addr)
break;
/* No memory in pool - get some and try again */
virt_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &phys_addr,
GFP_KERNEL | __GFP_ZERO);
if (ZERO_OR_NULL_PTR(virt_addr))
break;
if (gen_pool_add_virt(hr_priv->mmu_pgt_pool, (unsigned long)virt_addr,
phys_addr, SZ_2M, -1)) {
hl_asic_dma_free_coherent(hdev, SZ_2M, virt_addr, phys_addr);
virt_addr = NULL;
break;
}
}
if (ZERO_OR_NULL_PTR(virt_addr)) {
dev_err(hdev->dev, "failed to allocate page\n");
goto pool_alloc_err;
}
pgt_info->phys_addr = phys_addr;
pgt_info->shadow_addr = (unsigned long) NULL;
pgt_info->virt_addr = (unsigned long)virt_addr;
pgt_info->ctx = ctx;
pgt_info->num_of_ptes = 0;
hr_func->add_pgt_info(ctx, pgt_info, phys_addr);
return pgt_info;
pool_alloc_err:
kfree(pgt_info);
return NULL;
}
/**
* hl_mmu_hr_get_alloc_next_hop() - get the next HOP, allocate it if it does not exist
* @ctx: pointer to the context structure.
* @hr_priv: host resident private info structure.
* @hr_func: host resident functions.
* @mmu_prop: MMU properties.
* @curr_pte: current PTE value.
* @is_new_hop: set to true if HOP is new (caller responsibility to set it to false).
*
* @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
*/
struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
struct hl_mmu_hr_priv *hr_priv,
struct hl_hr_mmu_funcs *hr_func,
struct hl_mmu_properties *mmu_prop,
u64 curr_pte, bool *is_new_hop)
{
u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr != ULLONG_MAX)
return hr_func->get_pgt_info(ctx, hop_addr);
*is_new_hop = true;
return hl_mmu_hr_alloc_hop(ctx, hr_priv, hr_func, mmu_prop);
}
/**
* hl_mmu_hr_get_tlb_info() - get the TLB info (info for a specific mapping)
* @ctx: pointer to the context structure.
* @virt_addr: the virt address for which to get info.
* @hops: HOPs info structure.
* @hr_func: host resident functions.
*
* @return 0 on success, otherwise non 0 error code..
*/
int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
struct hl_hr_mmu_funcs *hr_func)
{
/* using 6 HOPs as this is the maximum number of HOPs */
struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
struct hl_device *hdev = ctx->hdev;
struct hl_mmu_properties *mmu_prop;
int rc, i, used_hops;
bool is_huge;
rc = hr_func->get_tlb_mapping_params(hdev, &mmu_prop, hops, virt_addr, &is_huge);
if (rc)
return rc;
used_hops = mmu_prop->num_hops;
/* huge pages use one less hop */
if (is_huge)
used_hops--;
hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
for (i = 0 ; i < used_hops ; i++) {
if (i == 0)
hops_pgt_info[i] = hr_func->get_hop0_pgt_info(ctx);
else
hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx, hr_func,
hops->hop_info[i - 1].hop_pte_val);
if (!hops_pgt_info[i])
return -EFAULT;
hops->hop_info[i].hop_addr = hops_pgt_info[i]->phys_addr;
hops->hop_info[i].hop_pte_addr =
hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops->hop_info[i].hop_addr,
hops->scrambled_vaddr);
hops->hop_info[i].hop_pte_val = *(u64 *) (uintptr_t)
hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hops->hop_info[i].hop_pte_addr,
mmu_prop->hop_table_size);
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
break;
}
/* if passed over all hops then no last hop was found */
if (i == mmu_prop->num_hops)
return -EFAULT;
if (hops->scrambled_vaddr != virt_addr)
hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
(hdev, hops->hop_info[i].hop_pte_val);
else
hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
hops->used_hops = i + 1;
return 0;
}
| linux-master | drivers/accel/habanalabs/common/mmu/mmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "gaudi2P.h"
#include "gaudi2_masks.h"
#include "../include/gaudi2/gaudi2_special_blocks.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include "../include/hw_ip/mmu/mmu_v2_0.h"
#include "../include/gaudi2/gaudi2_packets.h"
#include "../include/gaudi2/gaudi2_reg_map.h"
#include "../include/gaudi2/gaudi2_async_ids_map_extended.h"
#include "../include/gaudi2/arc/gaudi2_arc_common_packets.h"
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/iommu.h>
#define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */
#define GAUDI2_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
#define GAUDI2_RESET_POLL_TIMEOUT_USEC 500000 /* 500ms */
#define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC 3000000 /* 3s */
#define GAUDI2_RESET_POLL_CNT 3
#define GAUDI2_RESET_WAIT_MSEC 1 /* 1ms */
#define GAUDI2_CPU_RESET_WAIT_MSEC 100 /* 100ms */
#define GAUDI2_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
#define GAUDI2_CB_POOL_CB_CNT 512
#define GAUDI2_CB_POOL_CB_SIZE SZ_128K /* 128KB */
#define GAUDI2_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI2_WAIT_FOR_BL_TIMEOUT_USEC 25000000 /* 25s */
#define GAUDI2_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
#define GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
#define GAUDI2_ALLOC_CPU_MEM_RETRY_CNT 3
/*
* since the code already has built-in support for binning of up to MAX_FAULTY_TPCS TPCs
* and the code relies on that value (for array size etc..) we define another value
* for MAX faulty TPCs which reflects the cluster binning requirements
*/
#define MAX_CLUSTER_BINNING_FAULTY_TPCS 1
#define MAX_FAULTY_XBARS 1
#define MAX_FAULTY_EDMAS 1
#define MAX_FAULTY_DECODERS 1
#define GAUDI2_TPC_FULL_MASK 0x1FFFFFF
#define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF
#define GAUDI2_DECODER_FULL_MASK 0x3FF
#define GAUDI2_NA_EVENT_CAUSE 0xFF
#define GAUDI2_NUM_OF_QM_ERR_CAUSE 18
#define GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE 25
#define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3
#define GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE 14
#define GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE 3
#define GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE 2
#define GAUDI2_NUM_OF_ROT_ERR_CAUSE 22
#define GAUDI2_NUM_OF_TPC_INTR_CAUSE 31
#define GAUDI2_NUM_OF_DEC_ERR_CAUSE 25
#define GAUDI2_NUM_OF_MME_ERR_CAUSE 16
#define GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE 5
#define GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE 7
#define GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE 8
#define GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE 19
#define GAUDI2_NUM_OF_HBM_SEI_CAUSE 9
#define GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE 3
#define GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE 3
#define GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE 2
#define GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE 2
#define GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE 2
#define GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE 5
#define GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 10)
#define GAUDI2_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 200)
#define GAUDI2_ARB_WDT_TIMEOUT (0x1000000)
#define GAUDI2_VDEC_TIMEOUT_USEC 10000 /* 10ms */
#define GAUDI2_PLDM_VDEC_TIMEOUT_USEC (GAUDI2_VDEC_TIMEOUT_USEC * 100)
#define KDMA_TIMEOUT_USEC USEC_PER_SEC
#define IS_DMA_IDLE(dma_core_sts0) \
(!((dma_core_sts0) & (DCORE0_EDMA0_CORE_STS0_BUSY_MASK)))
#define IS_DMA_HALTED(dma_core_sts1) \
((dma_core_sts1) & (DCORE0_EDMA0_CORE_STS1_IS_HALT_MASK))
#define IS_MME_IDLE(mme_arch_sts) (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
#define IS_TPC_IDLE(tpc_cfg_sts) (((tpc_cfg_sts) & (TPC_IDLE_MASK)) == (TPC_IDLE_MASK))
#define IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) \
((((qm_glbl_sts0) & (QM_IDLE_MASK)) == (QM_IDLE_MASK)) && \
(((qm_glbl_sts1) & (QM_ARC_IDLE_MASK)) == (QM_ARC_IDLE_MASK)) && \
(((qm_cgm_sts) & (CGM_IDLE_MASK)) == (CGM_IDLE_MASK)))
#define PCIE_DEC_EN_MASK 0x300
#define DEC_WORK_STATE_IDLE 0
#define DEC_WORK_STATE_PEND 3
#define IS_DEC_IDLE(dec_swreg15) \
(((dec_swreg15) & DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK) == DEC_WORK_STATE_IDLE || \
((dec_swreg15) & DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK) == DEC_WORK_STATE_PEND)
/* HBM MMU address scrambling parameters */
#define GAUDI2_HBM_MMU_SCRM_MEM_SIZE SZ_8M
#define GAUDI2_HBM_MMU_SCRM_DIV_SHIFT 26
#define GAUDI2_HBM_MMU_SCRM_MOD_SHIFT 0
#define GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK DRAM_VA_HINT_MASK
#define GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR 16
#define MMU_RANGE_INV_VA_LSB_SHIFT 12
#define MMU_RANGE_INV_VA_MSB_SHIFT 44
#define MMU_RANGE_INV_EN_SHIFT 0
#define MMU_RANGE_INV_ASID_EN_SHIFT 1
#define MMU_RANGE_INV_ASID_SHIFT 2
/* The last SPI_SEI cause bit, "burst_fifo_full", is expected to be triggered in PMMU because it has
* a 2 entries FIFO, and hence it is not enabled for it.
*/
#define GAUDI2_PMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 2, 0)
#define GAUDI2_HMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 1, 0)
#define GAUDI2_MAX_STRING_LEN 64
#define GAUDI2_VDEC_MSIX_ENTRIES (GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM - \
GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 1)
#define ENGINE_ID_DCORE_OFFSET (GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
/* RAZWI initiator coordinates */
#define RAZWI_GET_AXUSER_XY(x) \
((x & 0xF8001FF0) >> 4)
#define RAZWI_GET_AXUSER_LOW_XY(x) \
((x & 0x00001FF0) >> 4)
#define RAZWI_INITIATOR_AXUER_L_X_SHIFT 0
#define RAZWI_INITIATOR_AXUER_L_X_MASK 0x1F
#define RAZWI_INITIATOR_AXUER_L_Y_SHIFT 5
#define RAZWI_INITIATOR_AXUER_L_Y_MASK 0xF
#define RAZWI_INITIATOR_AXUER_H_X_SHIFT 23
#define RAZWI_INITIATOR_AXUER_H_X_MASK 0x1F
#define RAZWI_INITIATOR_ID_X_Y_LOW(x, y) \
((((y) & RAZWI_INITIATOR_AXUER_L_Y_MASK) << RAZWI_INITIATOR_AXUER_L_Y_SHIFT) | \
(((x) & RAZWI_INITIATOR_AXUER_L_X_MASK) << RAZWI_INITIATOR_AXUER_L_X_SHIFT))
#define RAZWI_INITIATOR_ID_X_HIGH(x) \
(((x) & RAZWI_INITIATOR_AXUER_H_X_MASK) << RAZWI_INITIATOR_AXUER_H_X_SHIFT)
#define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \
(RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh))
#define PSOC_RAZWI_ENG_STR_SIZE 128
#define PSOC_RAZWI_MAX_ENG_PER_RTR 5
/* HW scrambles only bits 0-25 */
#define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26)
struct gaudi2_razwi_info {
u32 axuser_xy;
u32 rtr_ctrl;
u16 eng_id;
char *eng_name;
};
static struct gaudi2_razwi_info common_razwi_info[] = {
{RAZWI_INITIATOR_ID_X_Y(2, 4, 0), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_DEC_0, "DEC0"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_DEC_1, "DEC1"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 18), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_DEC_0, "DEC2"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_DEC_1, "DEC3"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 0), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_DEC_0, "DEC4"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_DEC_1, "DEC5"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 18), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_DEC_0, "DEC6"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_DEC_1, "DEC7"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 6), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC8"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 7), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC9"},
{RAZWI_INITIATOR_ID_X_Y(3, 4, 2), mmDCORE0_RTR1_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_TPC_0, "TPC0"},
{RAZWI_INITIATOR_ID_X_Y(3, 4, 4), mmDCORE0_RTR1_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_TPC_1, "TPC1"},
{RAZWI_INITIATOR_ID_X_Y(4, 4, 2), mmDCORE0_RTR2_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_TPC_2, "TPC2"},
{RAZWI_INITIATOR_ID_X_Y(4, 4, 4), mmDCORE0_RTR2_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_TPC_3, "TPC3"},
{RAZWI_INITIATOR_ID_X_Y(5, 4, 2), mmDCORE0_RTR3_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_TPC_4, "TPC4"},
{RAZWI_INITIATOR_ID_X_Y(5, 4, 4), mmDCORE0_RTR3_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_TPC_5, "TPC5"},
{RAZWI_INITIATOR_ID_X_Y(16, 4, 14), mmDCORE1_RTR6_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_TPC_0, "TPC6"},
{RAZWI_INITIATOR_ID_X_Y(16, 4, 16), mmDCORE1_RTR6_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_TPC_1, "TPC7"},
{RAZWI_INITIATOR_ID_X_Y(15, 4, 14), mmDCORE1_RTR5_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_TPC_2, "TPC8"},
{RAZWI_INITIATOR_ID_X_Y(15, 4, 16), mmDCORE1_RTR5_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_TPC_3, "TPC9"},
{RAZWI_INITIATOR_ID_X_Y(14, 4, 14), mmDCORE1_RTR4_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_TPC_4, "TPC10"},
{RAZWI_INITIATOR_ID_X_Y(14, 4, 16), mmDCORE1_RTR4_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_TPC_5, "TPC11"},
{RAZWI_INITIATOR_ID_X_Y(5, 11, 2), mmDCORE2_RTR3_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_TPC_0, "TPC12"},
{RAZWI_INITIATOR_ID_X_Y(5, 11, 4), mmDCORE2_RTR3_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_TPC_1, "TPC13"},
{RAZWI_INITIATOR_ID_X_Y(4, 11, 2), mmDCORE2_RTR2_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_TPC_2, "TPC14"},
{RAZWI_INITIATOR_ID_X_Y(4, 11, 4), mmDCORE2_RTR2_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_TPC_3, "TPC15"},
{RAZWI_INITIATOR_ID_X_Y(3, 11, 2), mmDCORE2_RTR1_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_TPC_4, "TPC16"},
{RAZWI_INITIATOR_ID_X_Y(3, 11, 4), mmDCORE2_RTR1_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_TPC_5, "TPC17"},
{RAZWI_INITIATOR_ID_X_Y(14, 11, 14), mmDCORE3_RTR4_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_0, "TPC18"},
{RAZWI_INITIATOR_ID_X_Y(14, 11, 16), mmDCORE3_RTR4_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_1, "TPC19"},
{RAZWI_INITIATOR_ID_X_Y(15, 11, 14), mmDCORE3_RTR5_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_2, "TPC20"},
{RAZWI_INITIATOR_ID_X_Y(15, 11, 16), mmDCORE3_RTR5_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_3, "TPC21"},
{RAZWI_INITIATOR_ID_X_Y(16, 11, 14), mmDCORE3_RTR6_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_4, "TPC22"},
{RAZWI_INITIATOR_ID_X_Y(16, 11, 16), mmDCORE3_RTR6_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC23"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC24"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 8), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC0_0, "NIC0"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 10), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC0_1, "NIC1"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 12), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC1_0, "NIC2"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC1_1, "NIC3"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 15), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC2_0, "NIC4"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC2_1, "NIC5"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC3_0, "NIC6"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 6), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC3_1, "NIC7"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 8), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC4_0, "NIC8"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 12), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC4_1, "NIC9"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC5_0, "NIC10"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_NIC5_1, "NIC11"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_PDMA_0, "PDMA0"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 3), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_PDMA_1, "PDMA1"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "PMMU"},
{RAZWI_INITIATOR_ID_X_Y(2, 4, 5), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "PCIE"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 16), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_ARC_FARM, "ARC_FARM"},
{RAZWI_INITIATOR_ID_X_Y(17, 4, 17), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_KDMA, "KDMA"},
{RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_EDMA_0, "EDMA0"},
{RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_EDMA_1, "EDMA1"},
{RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF1_RTR_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_EDMA_0, "EDMA2"},
{RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_EDMA_1, "EDMA3"},
{RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_EDMA_0, "EDMA4"},
{RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_EDMA_1, "EDMA5"},
{RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_EDMA_0, "EDMA6"},
{RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_EDMA_1, "EDMA7"},
{RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU0"},
{RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU1"},
{RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU2"},
{RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU3"},
{RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU4"},
{RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU5"},
{RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU6"},
{RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU7"},
{RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU8"},
{RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU9"},
{RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU10"},
{RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU11"},
{RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU12"},
{RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU13"},
{RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU14"},
{RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_SIZE, "HMMU15"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_ROT_0, "ROT0"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_ROT_1, "ROT1"},
{RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE,
GAUDI2_ENGINE_ID_PSOC, "CPU"},
{RAZWI_INITIATOR_ID_X_Y(17, 11, 11), mmDCORE3_RTR7_CTRL_BASE,
GAUDI2_ENGINE_ID_PSOC, "PSOC"}
};
static struct gaudi2_razwi_info mme_razwi_info[] = {
/* MME X high coordinate is N/A, hence using only low coordinates */
{RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_WR"},
{RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_RD"},
{RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE2"},
{RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE3"},
{RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE,
GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE4"},
{RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_WR"},
{RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_RD"},
{RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE2"},
{RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE3"},
{RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE,
GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE4"},
{RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_WR"},
{RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_RD"},
{RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE2"},
{RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE3"},
{RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE,
GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE4"},
{RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_WR"},
{RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_RD"},
{RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE0"},
{RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE1"},
{RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE2"},
{RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE3"},
{RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE,
GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE4"}
};
enum hl_pmmu_fatal_cause {
LATENCY_RD_OUT_FIFO_OVERRUN,
LATENCY_WR_OUT_FIFO_OVERRUN,
};
enum hl_pcie_drain_ind_cause {
LBW_AXI_DRAIN_IND,
HBW_AXI_DRAIN_IND
};
static const u32 cluster_hmmu_hif_enabled_mask[GAUDI2_HBM_NUM] = {
[HBM_ID0] = 0xFFFC,
[HBM_ID1] = 0xFFCF,
[HBM_ID2] = 0xF7F7,
[HBM_ID3] = 0x7F7F,
[HBM_ID4] = 0xFCFF,
[HBM_ID5] = 0xCFFF,
};
static const u8 xbar_edge_to_hbm_cluster[EDMA_ID_SIZE] = {
[0] = HBM_ID0,
[1] = HBM_ID1,
[2] = HBM_ID4,
[3] = HBM_ID5,
};
static const u8 edma_to_hbm_cluster[EDMA_ID_SIZE] = {
[EDMA_ID_DCORE0_INSTANCE0] = HBM_ID0,
[EDMA_ID_DCORE0_INSTANCE1] = HBM_ID2,
[EDMA_ID_DCORE1_INSTANCE0] = HBM_ID1,
[EDMA_ID_DCORE1_INSTANCE1] = HBM_ID3,
[EDMA_ID_DCORE2_INSTANCE0] = HBM_ID2,
[EDMA_ID_DCORE2_INSTANCE1] = HBM_ID4,
[EDMA_ID_DCORE3_INSTANCE0] = HBM_ID3,
[EDMA_ID_DCORE3_INSTANCE1] = HBM_ID5,
};
static const int gaudi2_qman_async_event_id[] = {
[GAUDI2_QUEUE_ID_PDMA_0_0] = GAUDI2_EVENT_PDMA0_QM,
[GAUDI2_QUEUE_ID_PDMA_0_1] = GAUDI2_EVENT_PDMA0_QM,
[GAUDI2_QUEUE_ID_PDMA_0_2] = GAUDI2_EVENT_PDMA0_QM,
[GAUDI2_QUEUE_ID_PDMA_0_3] = GAUDI2_EVENT_PDMA0_QM,
[GAUDI2_QUEUE_ID_PDMA_1_0] = GAUDI2_EVENT_PDMA1_QM,
[GAUDI2_QUEUE_ID_PDMA_1_1] = GAUDI2_EVENT_PDMA1_QM,
[GAUDI2_QUEUE_ID_PDMA_1_2] = GAUDI2_EVENT_PDMA1_QM,
[GAUDI2_QUEUE_ID_PDMA_1_3] = GAUDI2_EVENT_PDMA1_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = GAUDI2_EVENT_HDMA0_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = GAUDI2_EVENT_HDMA0_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = GAUDI2_EVENT_HDMA0_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = GAUDI2_EVENT_HDMA0_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = GAUDI2_EVENT_HDMA1_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = GAUDI2_EVENT_HDMA1_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = GAUDI2_EVENT_HDMA1_QM,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = GAUDI2_EVENT_HDMA1_QM,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = GAUDI2_EVENT_MME0_QM,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = GAUDI2_EVENT_MME0_QM,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = GAUDI2_EVENT_MME0_QM,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = GAUDI2_EVENT_MME0_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = GAUDI2_EVENT_TPC0_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = GAUDI2_EVENT_TPC0_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = GAUDI2_EVENT_TPC0_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = GAUDI2_EVENT_TPC0_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = GAUDI2_EVENT_TPC1_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = GAUDI2_EVENT_TPC1_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = GAUDI2_EVENT_TPC1_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = GAUDI2_EVENT_TPC1_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = GAUDI2_EVENT_TPC2_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = GAUDI2_EVENT_TPC2_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = GAUDI2_EVENT_TPC2_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = GAUDI2_EVENT_TPC2_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = GAUDI2_EVENT_TPC3_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = GAUDI2_EVENT_TPC3_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = GAUDI2_EVENT_TPC3_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = GAUDI2_EVENT_TPC3_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = GAUDI2_EVENT_TPC4_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = GAUDI2_EVENT_TPC4_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = GAUDI2_EVENT_TPC4_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = GAUDI2_EVENT_TPC4_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = GAUDI2_EVENT_TPC5_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = GAUDI2_EVENT_TPC5_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = GAUDI2_EVENT_TPC5_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = GAUDI2_EVENT_TPC5_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = GAUDI2_EVENT_TPC24_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = GAUDI2_EVENT_TPC24_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = GAUDI2_EVENT_TPC24_QM,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = GAUDI2_EVENT_TPC24_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = GAUDI2_EVENT_HDMA2_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = GAUDI2_EVENT_HDMA2_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = GAUDI2_EVENT_HDMA2_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = GAUDI2_EVENT_HDMA2_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = GAUDI2_EVENT_HDMA3_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = GAUDI2_EVENT_HDMA3_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = GAUDI2_EVENT_HDMA3_QM,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = GAUDI2_EVENT_HDMA3_QM,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = GAUDI2_EVENT_MME1_QM,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = GAUDI2_EVENT_MME1_QM,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = GAUDI2_EVENT_MME1_QM,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = GAUDI2_EVENT_MME1_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = GAUDI2_EVENT_TPC6_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = GAUDI2_EVENT_TPC6_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = GAUDI2_EVENT_TPC6_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = GAUDI2_EVENT_TPC6_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = GAUDI2_EVENT_TPC7_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = GAUDI2_EVENT_TPC7_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = GAUDI2_EVENT_TPC7_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = GAUDI2_EVENT_TPC7_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = GAUDI2_EVENT_TPC8_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = GAUDI2_EVENT_TPC8_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = GAUDI2_EVENT_TPC8_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = GAUDI2_EVENT_TPC8_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = GAUDI2_EVENT_TPC9_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = GAUDI2_EVENT_TPC9_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = GAUDI2_EVENT_TPC9_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = GAUDI2_EVENT_TPC9_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = GAUDI2_EVENT_TPC10_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = GAUDI2_EVENT_TPC10_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = GAUDI2_EVENT_TPC10_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = GAUDI2_EVENT_TPC10_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = GAUDI2_EVENT_TPC11_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = GAUDI2_EVENT_TPC11_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = GAUDI2_EVENT_TPC11_QM,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = GAUDI2_EVENT_TPC11_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = GAUDI2_EVENT_HDMA4_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = GAUDI2_EVENT_HDMA4_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = GAUDI2_EVENT_HDMA4_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = GAUDI2_EVENT_HDMA4_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = GAUDI2_EVENT_HDMA5_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = GAUDI2_EVENT_HDMA5_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = GAUDI2_EVENT_HDMA5_QM,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = GAUDI2_EVENT_HDMA5_QM,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = GAUDI2_EVENT_MME2_QM,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = GAUDI2_EVENT_MME2_QM,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = GAUDI2_EVENT_MME2_QM,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = GAUDI2_EVENT_MME2_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = GAUDI2_EVENT_TPC12_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = GAUDI2_EVENT_TPC12_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = GAUDI2_EVENT_TPC12_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = GAUDI2_EVENT_TPC12_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = GAUDI2_EVENT_TPC13_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = GAUDI2_EVENT_TPC13_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = GAUDI2_EVENT_TPC13_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = GAUDI2_EVENT_TPC13_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = GAUDI2_EVENT_TPC14_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = GAUDI2_EVENT_TPC14_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = GAUDI2_EVENT_TPC14_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = GAUDI2_EVENT_TPC14_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = GAUDI2_EVENT_TPC15_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = GAUDI2_EVENT_TPC15_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = GAUDI2_EVENT_TPC15_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = GAUDI2_EVENT_TPC15_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = GAUDI2_EVENT_TPC16_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = GAUDI2_EVENT_TPC16_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = GAUDI2_EVENT_TPC16_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = GAUDI2_EVENT_TPC16_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = GAUDI2_EVENT_TPC17_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = GAUDI2_EVENT_TPC17_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = GAUDI2_EVENT_TPC17_QM,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = GAUDI2_EVENT_TPC17_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = GAUDI2_EVENT_HDMA6_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = GAUDI2_EVENT_HDMA6_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = GAUDI2_EVENT_HDMA6_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = GAUDI2_EVENT_HDMA6_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = GAUDI2_EVENT_HDMA7_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = GAUDI2_EVENT_HDMA7_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = GAUDI2_EVENT_HDMA7_QM,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = GAUDI2_EVENT_HDMA7_QM,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = GAUDI2_EVENT_MME3_QM,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = GAUDI2_EVENT_MME3_QM,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = GAUDI2_EVENT_MME3_QM,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = GAUDI2_EVENT_MME3_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = GAUDI2_EVENT_TPC18_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = GAUDI2_EVENT_TPC18_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = GAUDI2_EVENT_TPC18_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = GAUDI2_EVENT_TPC18_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = GAUDI2_EVENT_TPC19_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = GAUDI2_EVENT_TPC19_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = GAUDI2_EVENT_TPC19_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = GAUDI2_EVENT_TPC19_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = GAUDI2_EVENT_TPC20_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = GAUDI2_EVENT_TPC20_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = GAUDI2_EVENT_TPC20_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = GAUDI2_EVENT_TPC20_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = GAUDI2_EVENT_TPC21_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = GAUDI2_EVENT_TPC21_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = GAUDI2_EVENT_TPC21_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = GAUDI2_EVENT_TPC21_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = GAUDI2_EVENT_TPC22_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = GAUDI2_EVENT_TPC22_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = GAUDI2_EVENT_TPC22_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = GAUDI2_EVENT_TPC22_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = GAUDI2_EVENT_TPC23_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = GAUDI2_EVENT_TPC23_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = GAUDI2_EVENT_TPC23_QM,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = GAUDI2_EVENT_TPC23_QM,
[GAUDI2_QUEUE_ID_NIC_0_0] = GAUDI2_EVENT_NIC0_QM0,
[GAUDI2_QUEUE_ID_NIC_0_1] = GAUDI2_EVENT_NIC0_QM0,
[GAUDI2_QUEUE_ID_NIC_0_2] = GAUDI2_EVENT_NIC0_QM0,
[GAUDI2_QUEUE_ID_NIC_0_3] = GAUDI2_EVENT_NIC0_QM0,
[GAUDI2_QUEUE_ID_NIC_1_0] = GAUDI2_EVENT_NIC0_QM1,
[GAUDI2_QUEUE_ID_NIC_1_1] = GAUDI2_EVENT_NIC0_QM1,
[GAUDI2_QUEUE_ID_NIC_1_2] = GAUDI2_EVENT_NIC0_QM1,
[GAUDI2_QUEUE_ID_NIC_1_3] = GAUDI2_EVENT_NIC0_QM1,
[GAUDI2_QUEUE_ID_NIC_2_0] = GAUDI2_EVENT_NIC1_QM0,
[GAUDI2_QUEUE_ID_NIC_2_1] = GAUDI2_EVENT_NIC1_QM0,
[GAUDI2_QUEUE_ID_NIC_2_2] = GAUDI2_EVENT_NIC1_QM0,
[GAUDI2_QUEUE_ID_NIC_2_3] = GAUDI2_EVENT_NIC1_QM0,
[GAUDI2_QUEUE_ID_NIC_3_0] = GAUDI2_EVENT_NIC1_QM1,
[GAUDI2_QUEUE_ID_NIC_3_1] = GAUDI2_EVENT_NIC1_QM1,
[GAUDI2_QUEUE_ID_NIC_3_2] = GAUDI2_EVENT_NIC1_QM1,
[GAUDI2_QUEUE_ID_NIC_3_3] = GAUDI2_EVENT_NIC1_QM1,
[GAUDI2_QUEUE_ID_NIC_4_0] = GAUDI2_EVENT_NIC2_QM0,
[GAUDI2_QUEUE_ID_NIC_4_1] = GAUDI2_EVENT_NIC2_QM0,
[GAUDI2_QUEUE_ID_NIC_4_2] = GAUDI2_EVENT_NIC2_QM0,
[GAUDI2_QUEUE_ID_NIC_4_3] = GAUDI2_EVENT_NIC2_QM0,
[GAUDI2_QUEUE_ID_NIC_5_0] = GAUDI2_EVENT_NIC2_QM1,
[GAUDI2_QUEUE_ID_NIC_5_1] = GAUDI2_EVENT_NIC2_QM1,
[GAUDI2_QUEUE_ID_NIC_5_2] = GAUDI2_EVENT_NIC2_QM1,
[GAUDI2_QUEUE_ID_NIC_5_3] = GAUDI2_EVENT_NIC2_QM1,
[GAUDI2_QUEUE_ID_NIC_6_0] = GAUDI2_EVENT_NIC3_QM0,
[GAUDI2_QUEUE_ID_NIC_6_1] = GAUDI2_EVENT_NIC3_QM0,
[GAUDI2_QUEUE_ID_NIC_6_2] = GAUDI2_EVENT_NIC3_QM0,
[GAUDI2_QUEUE_ID_NIC_6_3] = GAUDI2_EVENT_NIC3_QM0,
[GAUDI2_QUEUE_ID_NIC_7_0] = GAUDI2_EVENT_NIC3_QM1,
[GAUDI2_QUEUE_ID_NIC_7_1] = GAUDI2_EVENT_NIC3_QM1,
[GAUDI2_QUEUE_ID_NIC_7_2] = GAUDI2_EVENT_NIC3_QM1,
[GAUDI2_QUEUE_ID_NIC_7_3] = GAUDI2_EVENT_NIC3_QM1,
[GAUDI2_QUEUE_ID_NIC_8_0] = GAUDI2_EVENT_NIC4_QM0,
[GAUDI2_QUEUE_ID_NIC_8_1] = GAUDI2_EVENT_NIC4_QM0,
[GAUDI2_QUEUE_ID_NIC_8_2] = GAUDI2_EVENT_NIC4_QM0,
[GAUDI2_QUEUE_ID_NIC_8_3] = GAUDI2_EVENT_NIC4_QM0,
[GAUDI2_QUEUE_ID_NIC_9_0] = GAUDI2_EVENT_NIC4_QM1,
[GAUDI2_QUEUE_ID_NIC_9_1] = GAUDI2_EVENT_NIC4_QM1,
[GAUDI2_QUEUE_ID_NIC_9_2] = GAUDI2_EVENT_NIC4_QM1,
[GAUDI2_QUEUE_ID_NIC_9_3] = GAUDI2_EVENT_NIC4_QM1,
[GAUDI2_QUEUE_ID_NIC_10_0] = GAUDI2_EVENT_NIC5_QM0,
[GAUDI2_QUEUE_ID_NIC_10_1] = GAUDI2_EVENT_NIC5_QM0,
[GAUDI2_QUEUE_ID_NIC_10_2] = GAUDI2_EVENT_NIC5_QM0,
[GAUDI2_QUEUE_ID_NIC_10_3] = GAUDI2_EVENT_NIC5_QM0,
[GAUDI2_QUEUE_ID_NIC_11_0] = GAUDI2_EVENT_NIC5_QM1,
[GAUDI2_QUEUE_ID_NIC_11_1] = GAUDI2_EVENT_NIC5_QM1,
[GAUDI2_QUEUE_ID_NIC_11_2] = GAUDI2_EVENT_NIC5_QM1,
[GAUDI2_QUEUE_ID_NIC_11_3] = GAUDI2_EVENT_NIC5_QM1,
[GAUDI2_QUEUE_ID_NIC_12_0] = GAUDI2_EVENT_NIC6_QM0,
[GAUDI2_QUEUE_ID_NIC_12_1] = GAUDI2_EVENT_NIC6_QM0,
[GAUDI2_QUEUE_ID_NIC_12_2] = GAUDI2_EVENT_NIC6_QM0,
[GAUDI2_QUEUE_ID_NIC_12_3] = GAUDI2_EVENT_NIC6_QM0,
[GAUDI2_QUEUE_ID_NIC_13_0] = GAUDI2_EVENT_NIC6_QM1,
[GAUDI2_QUEUE_ID_NIC_13_1] = GAUDI2_EVENT_NIC6_QM1,
[GAUDI2_QUEUE_ID_NIC_13_2] = GAUDI2_EVENT_NIC6_QM1,
[GAUDI2_QUEUE_ID_NIC_13_3] = GAUDI2_EVENT_NIC6_QM1,
[GAUDI2_QUEUE_ID_NIC_14_0] = GAUDI2_EVENT_NIC7_QM0,
[GAUDI2_QUEUE_ID_NIC_14_1] = GAUDI2_EVENT_NIC7_QM0,
[GAUDI2_QUEUE_ID_NIC_14_2] = GAUDI2_EVENT_NIC7_QM0,
[GAUDI2_QUEUE_ID_NIC_14_3] = GAUDI2_EVENT_NIC7_QM0,
[GAUDI2_QUEUE_ID_NIC_15_0] = GAUDI2_EVENT_NIC7_QM1,
[GAUDI2_QUEUE_ID_NIC_15_1] = GAUDI2_EVENT_NIC7_QM1,
[GAUDI2_QUEUE_ID_NIC_15_2] = GAUDI2_EVENT_NIC7_QM1,
[GAUDI2_QUEUE_ID_NIC_15_3] = GAUDI2_EVENT_NIC7_QM1,
[GAUDI2_QUEUE_ID_NIC_16_0] = GAUDI2_EVENT_NIC8_QM0,
[GAUDI2_QUEUE_ID_NIC_16_1] = GAUDI2_EVENT_NIC8_QM0,
[GAUDI2_QUEUE_ID_NIC_16_2] = GAUDI2_EVENT_NIC8_QM0,
[GAUDI2_QUEUE_ID_NIC_16_3] = GAUDI2_EVENT_NIC8_QM0,
[GAUDI2_QUEUE_ID_NIC_17_0] = GAUDI2_EVENT_NIC8_QM1,
[GAUDI2_QUEUE_ID_NIC_17_1] = GAUDI2_EVENT_NIC8_QM1,
[GAUDI2_QUEUE_ID_NIC_17_2] = GAUDI2_EVENT_NIC8_QM1,
[GAUDI2_QUEUE_ID_NIC_17_3] = GAUDI2_EVENT_NIC8_QM1,
[GAUDI2_QUEUE_ID_NIC_18_0] = GAUDI2_EVENT_NIC9_QM0,
[GAUDI2_QUEUE_ID_NIC_18_1] = GAUDI2_EVENT_NIC9_QM0,
[GAUDI2_QUEUE_ID_NIC_18_2] = GAUDI2_EVENT_NIC9_QM0,
[GAUDI2_QUEUE_ID_NIC_18_3] = GAUDI2_EVENT_NIC9_QM0,
[GAUDI2_QUEUE_ID_NIC_19_0] = GAUDI2_EVENT_NIC9_QM1,
[GAUDI2_QUEUE_ID_NIC_19_1] = GAUDI2_EVENT_NIC9_QM1,
[GAUDI2_QUEUE_ID_NIC_19_2] = GAUDI2_EVENT_NIC9_QM1,
[GAUDI2_QUEUE_ID_NIC_19_3] = GAUDI2_EVENT_NIC9_QM1,
[GAUDI2_QUEUE_ID_NIC_20_0] = GAUDI2_EVENT_NIC10_QM0,
[GAUDI2_QUEUE_ID_NIC_20_1] = GAUDI2_EVENT_NIC10_QM0,
[GAUDI2_QUEUE_ID_NIC_20_2] = GAUDI2_EVENT_NIC10_QM0,
[GAUDI2_QUEUE_ID_NIC_20_3] = GAUDI2_EVENT_NIC10_QM0,
[GAUDI2_QUEUE_ID_NIC_21_0] = GAUDI2_EVENT_NIC10_QM1,
[GAUDI2_QUEUE_ID_NIC_21_1] = GAUDI2_EVENT_NIC10_QM1,
[GAUDI2_QUEUE_ID_NIC_21_2] = GAUDI2_EVENT_NIC10_QM1,
[GAUDI2_QUEUE_ID_NIC_21_3] = GAUDI2_EVENT_NIC10_QM1,
[GAUDI2_QUEUE_ID_NIC_22_0] = GAUDI2_EVENT_NIC11_QM0,
[GAUDI2_QUEUE_ID_NIC_22_1] = GAUDI2_EVENT_NIC11_QM0,
[GAUDI2_QUEUE_ID_NIC_22_2] = GAUDI2_EVENT_NIC11_QM0,
[GAUDI2_QUEUE_ID_NIC_22_3] = GAUDI2_EVENT_NIC11_QM0,
[GAUDI2_QUEUE_ID_NIC_23_0] = GAUDI2_EVENT_NIC11_QM1,
[GAUDI2_QUEUE_ID_NIC_23_1] = GAUDI2_EVENT_NIC11_QM1,
[GAUDI2_QUEUE_ID_NIC_23_2] = GAUDI2_EVENT_NIC11_QM1,
[GAUDI2_QUEUE_ID_NIC_23_3] = GAUDI2_EVENT_NIC11_QM1,
[GAUDI2_QUEUE_ID_ROT_0_0] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
[GAUDI2_QUEUE_ID_ROT_0_1] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
[GAUDI2_QUEUE_ID_ROT_0_2] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
[GAUDI2_QUEUE_ID_ROT_0_3] = GAUDI2_EVENT_ROTATOR0_ROT0_QM,
[GAUDI2_QUEUE_ID_ROT_1_0] = GAUDI2_EVENT_ROTATOR1_ROT1_QM,
[GAUDI2_QUEUE_ID_ROT_1_1] = GAUDI2_EVENT_ROTATOR1_ROT1_QM,
[GAUDI2_QUEUE_ID_ROT_1_2] = GAUDI2_EVENT_ROTATOR1_ROT1_QM,
[GAUDI2_QUEUE_ID_ROT_1_3] = GAUDI2_EVENT_ROTATOR1_ROT1_QM
};
static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_EDMA0] = GAUDI2_EVENT_HDMA0_CORE,
[DMA_CORE_ID_EDMA1] = GAUDI2_EVENT_HDMA1_CORE,
[DMA_CORE_ID_EDMA2] = GAUDI2_EVENT_HDMA2_CORE,
[DMA_CORE_ID_EDMA3] = GAUDI2_EVENT_HDMA3_CORE,
[DMA_CORE_ID_EDMA4] = GAUDI2_EVENT_HDMA4_CORE,
[DMA_CORE_ID_EDMA5] = GAUDI2_EVENT_HDMA5_CORE,
[DMA_CORE_ID_EDMA6] = GAUDI2_EVENT_HDMA6_CORE,
[DMA_CORE_ID_EDMA7] = GAUDI2_EVENT_HDMA7_CORE,
[DMA_CORE_ID_PDMA0] = GAUDI2_EVENT_PDMA0_CORE,
[DMA_CORE_ID_PDMA1] = GAUDI2_EVENT_PDMA1_CORE,
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
};
static const char * const gaudi2_cpu_sei_error_cause[GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE] = {
"AXI_TERMINATOR WR",
"AXI_TERMINATOR RD",
"AXI SPLIT SEI Status"
};
static const char * const gaudi2_arc_sei_error_cause[GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE] = {
"cbu_bresp_sei_intr_cause",
"cbu_rresp_sei_intr_cause",
"lbu_bresp_sei_intr_cause",
"lbu_rresp_sei_intr_cause",
"cbu_axi_split_intr_cause",
"lbu_axi_split_intr_cause",
"arc_ip_excptn_sei_intr_cause",
"dmi_bresp_sei_intr_cause",
"aux2apb_err_sei_intr_cause",
"cfg_lbw_wr_terminated_intr_cause",
"cfg_lbw_rd_terminated_intr_cause",
"cfg_dccm_wr_terminated_intr_cause",
"cfg_dccm_rd_terminated_intr_cause",
"cfg_hbw_rd_terminated_intr_cause"
};
static const char * const gaudi2_dec_error_cause[GAUDI2_NUM_OF_DEC_ERR_CAUSE] = {
"msix_vcd_hbw_sei",
"msix_l2c_hbw_sei",
"msix_nrm_hbw_sei",
"msix_abnrm_hbw_sei",
"msix_vcd_lbw_sei",
"msix_l2c_lbw_sei",
"msix_nrm_lbw_sei",
"msix_abnrm_lbw_sei",
"apb_vcd_lbw_sei",
"apb_l2c_lbw_sei",
"apb_nrm_lbw_sei",
"apb_abnrm_lbw_sei",
"dec_sei",
"dec_apb_sei",
"trc_apb_sei",
"lbw_mstr_if_sei",
"axi_split_bresp_err_sei",
"hbw_axi_wr_viol_sei",
"hbw_axi_rd_viol_sei",
"lbw_axi_wr_viol_sei",
"lbw_axi_rd_viol_sei",
"vcd_spi",
"l2c_spi",
"nrm_spi",
"abnrm_spi",
};
static const char * const gaudi2_qman_error_cause[GAUDI2_NUM_OF_QM_ERR_CAUSE] = {
"PQ AXI HBW error",
"CQ AXI HBW error",
"CP AXI HBW error",
"CP error due to undefined OPCODE",
"CP encountered STOP OPCODE",
"CP AXI LBW error",
"CP WRREG32 or WRBULK returned error",
"N/A",
"FENCE 0 inc over max value and clipped",
"FENCE 1 inc over max value and clipped",
"FENCE 2 inc over max value and clipped",
"FENCE 3 inc over max value and clipped",
"FENCE 0 dec under min value and clipped",
"FENCE 1 dec under min value and clipped",
"FENCE 2 dec under min value and clipped",
"FENCE 3 dec under min value and clipped",
"CPDMA Up overflow",
"PQC L2H error"
};
static const char * const gaudi2_lower_qman_error_cause[GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE] = {
"RSVD0",
"CQ AXI HBW error",
"CP AXI HBW error",
"CP error due to undefined OPCODE",
"CP encountered STOP OPCODE",
"CP AXI LBW error",
"CP WRREG32 or WRBULK returned error",
"N/A",
"FENCE 0 inc over max value and clipped",
"FENCE 1 inc over max value and clipped",
"FENCE 2 inc over max value and clipped",
"FENCE 3 inc over max value and clipped",
"FENCE 0 dec under min value and clipped",
"FENCE 1 dec under min value and clipped",
"FENCE 2 dec under min value and clipped",
"FENCE 3 dec under min value and clipped",
"CPDMA Up overflow",
"RSVD17",
"CQ_WR_IFIFO_CI_ERR",
"CQ_WR_CTL_CI_ERR",
"ARC_CQF_RD_ERR",
"ARC_CQ_WR_IFIFO_CI_ERR",
"ARC_CQ_WR_CTL_CI_ERR",
"ARC_AXI_ERR",
"CP_SWITCH_WDT_ERR"
};
static const char * const gaudi2_qman_arb_error_cause[GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE] = {
"Choice push while full error",
"Choice Q watchdog error",
"MSG AXI LBW returned with error"
};
static const char * const guadi2_rot_error_cause[GAUDI2_NUM_OF_ROT_ERR_CAUSE] = {
"qm_axi_err",
"qm_trace_fence_events",
"qm_sw_err",
"qm_cp_sw_stop",
"lbw_mstr_rresp_err",
"lbw_mstr_bresp_err",
"lbw_msg_slverr",
"hbw_msg_slverr",
"wbc_slverr",
"hbw_mstr_rresp_err",
"hbw_mstr_bresp_err",
"sb_resp_intr",
"mrsb_resp_intr",
"core_dw_status_0",
"core_dw_status_1",
"core_dw_status_2",
"core_dw_status_3",
"core_dw_status_4",
"core_dw_status_5",
"core_dw_status_6",
"core_dw_status_7",
"async_arc2cpu_sei_intr",
};
static const char * const gaudi2_tpc_interrupts_cause[GAUDI2_NUM_OF_TPC_INTR_CAUSE] = {
"tpc_address_exceed_slm",
"tpc_div_by_0",
"tpc_spu_mac_overflow",
"tpc_spu_addsub_overflow",
"tpc_spu_abs_overflow",
"tpc_spu_fma_fp_dst_nan",
"tpc_spu_fma_fp_dst_inf",
"tpc_spu_convert_fp_dst_nan",
"tpc_spu_convert_fp_dst_inf",
"tpc_spu_fp_dst_denorm",
"tpc_vpu_mac_overflow",
"tpc_vpu_addsub_overflow",
"tpc_vpu_abs_overflow",
"tpc_vpu_convert_fp_dst_nan",
"tpc_vpu_convert_fp_dst_inf",
"tpc_vpu_fma_fp_dst_nan",
"tpc_vpu_fma_fp_dst_inf",
"tpc_vpu_fp_dst_denorm",
"tpc_assertions",
"tpc_illegal_instruction",
"tpc_pc_wrap_around",
"tpc_qm_sw_err",
"tpc_hbw_rresp_err",
"tpc_hbw_bresp_err",
"tpc_lbw_rresp_err",
"tpc_lbw_bresp_err",
"st_unlock_already_locked",
"invalid_lock_access",
"LD_L protection violation",
"ST_L protection violation",
"D$ L0CS mismatch",
};
static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] = {
"agu_resp_intr",
"qman_axi_err",
"wap sei (wbc axi err)",
"arc sei",
"cfg access error",
"qm_sw_err",
"sbte_dbg_intr_0",
"sbte_dbg_intr_1",
"sbte_dbg_intr_2",
"sbte_dbg_intr_3",
"sbte_dbg_intr_4",
"sbte_prtn_intr_0",
"sbte_prtn_intr_1",
"sbte_prtn_intr_2",
"sbte_prtn_intr_3",
"sbte_prtn_intr_4",
};
static const char * const guadi2_mme_sbte_error_cause[GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE] = {
"i0",
"i1",
"i2",
"i3",
"i4",
};
static const char * const guadi2_mme_wap_error_cause[GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE] = {
"WBC ERR RESP_0",
"WBC ERR RESP_1",
"AP SOURCE POS INF",
"AP SOURCE NEG INF",
"AP SOURCE NAN",
"AP RESULT POS INF",
"AP RESULT NEG INF",
};
static const char * const gaudi2_dma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE] = {
"HBW Read returned with error RRESP",
"HBW write returned with error BRESP",
"LBW write returned with error BRESP",
"descriptor_fifo_overflow",
"KDMA SB LBW Read returned with error",
"KDMA WBC LBW Write returned with error",
"TRANSPOSE ENGINE DESC FIFO OVERFLOW",
"WRONG CFG FOR COMMIT IN LIN DMA"
};
static const char * const gaudi2_kdma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE] = {
"HBW/LBW Read returned with error RRESP",
"HBW/LBW write returned with error BRESP",
"LBW write returned with error BRESP",
"descriptor_fifo_overflow",
"KDMA SB LBW Read returned with error",
"KDMA WBC LBW Write returned with error",
"TRANSPOSE ENGINE DESC FIFO OVERFLOW",
"WRONG CFG FOR COMMIT IN LIN DMA"
};
struct gaudi2_sm_sei_cause_data {
const char *cause_name;
const char *log_name;
};
static const struct gaudi2_sm_sei_cause_data
gaudi2_sm_sei_cause[GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE] = {
{"calculated SO value overflow/underflow", "SOB ID"},
{"payload address of monitor is not aligned to 4B", "monitor addr"},
{"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id"},
};
static const char * const
gaudi2_pmmu_fatal_interrupts_cause[GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE] = {
"LATENCY_RD_OUT_FIFO_OVERRUN",
"LATENCY_WR_OUT_FIFO_OVERRUN",
};
static const char * const
gaudi2_hif_fatal_interrupts_cause[GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE] = {
"LATENCY_RD_OUT_FIFO_OVERRUN",
"LATENCY_WR_OUT_FIFO_OVERRUN",
};
static const char * const
gaudi2_psoc_axi_drain_interrupts_cause[GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE] = {
"AXI drain HBW",
"AXI drain LBW",
};
static const char * const
gaudi2_pcie_addr_dec_error_cause[GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE] = {
"HBW error response",
"LBW error response",
"TLP is blocked by RR"
};
const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE] = {
[GAUDI2_QUEUE_ID_PDMA_0_0] = mmPDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_0_1] = mmPDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_0_2] = mmPDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_0_3] = mmPDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_1_0] = mmPDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_1_1] = mmPDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_1_2] = mmPDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_PDMA_1_3] = mmPDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = mmDCORE0_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = mmDCORE0_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = mmDCORE0_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = mmDCORE0_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = mmDCORE0_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = mmDCORE0_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = mmDCORE0_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = mmDCORE0_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = mmDCORE0_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = mmDCORE0_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = mmDCORE0_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = mmDCORE0_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = mmDCORE0_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = mmDCORE0_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = mmDCORE0_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = mmDCORE0_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = mmDCORE0_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = mmDCORE0_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = mmDCORE0_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = mmDCORE0_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = mmDCORE0_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = mmDCORE0_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = mmDCORE0_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = mmDCORE0_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = mmDCORE0_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = mmDCORE0_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = mmDCORE0_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = mmDCORE0_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = mmDCORE0_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = mmDCORE0_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = mmDCORE0_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = mmDCORE0_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = mmDCORE0_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = mmDCORE0_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = mmDCORE0_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = mmDCORE0_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = mmDCORE0_TPC6_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = mmDCORE0_TPC6_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = mmDCORE0_TPC6_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = mmDCORE0_TPC6_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = mmDCORE1_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = mmDCORE1_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = mmDCORE1_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = mmDCORE1_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = mmDCORE1_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = mmDCORE1_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = mmDCORE1_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = mmDCORE1_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = mmDCORE1_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = mmDCORE1_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = mmDCORE1_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = mmDCORE1_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = mmDCORE1_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = mmDCORE1_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = mmDCORE1_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = mmDCORE1_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = mmDCORE1_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = mmDCORE1_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = mmDCORE1_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = mmDCORE1_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = mmDCORE1_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = mmDCORE1_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = mmDCORE1_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = mmDCORE1_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = mmDCORE1_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = mmDCORE1_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = mmDCORE1_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = mmDCORE1_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = mmDCORE1_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = mmDCORE1_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = mmDCORE1_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = mmDCORE1_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = mmDCORE1_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = mmDCORE1_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = mmDCORE1_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = mmDCORE1_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = mmDCORE2_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = mmDCORE2_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = mmDCORE2_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = mmDCORE2_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = mmDCORE2_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = mmDCORE2_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = mmDCORE2_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = mmDCORE2_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = mmDCORE2_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = mmDCORE2_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = mmDCORE2_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = mmDCORE2_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = mmDCORE2_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = mmDCORE2_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = mmDCORE2_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = mmDCORE2_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = mmDCORE2_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = mmDCORE2_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = mmDCORE2_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = mmDCORE2_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = mmDCORE2_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = mmDCORE2_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = mmDCORE2_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = mmDCORE2_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = mmDCORE2_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = mmDCORE2_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = mmDCORE2_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = mmDCORE2_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = mmDCORE2_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = mmDCORE2_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = mmDCORE2_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = mmDCORE2_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = mmDCORE2_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = mmDCORE2_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = mmDCORE2_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = mmDCORE2_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = mmDCORE3_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = mmDCORE3_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = mmDCORE3_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = mmDCORE3_EDMA0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = mmDCORE3_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = mmDCORE3_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = mmDCORE3_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = mmDCORE3_EDMA1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = mmDCORE3_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = mmDCORE3_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = mmDCORE3_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = mmDCORE3_MME_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = mmDCORE3_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = mmDCORE3_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = mmDCORE3_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = mmDCORE3_TPC0_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = mmDCORE3_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = mmDCORE3_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = mmDCORE3_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = mmDCORE3_TPC1_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = mmDCORE3_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = mmDCORE3_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = mmDCORE3_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = mmDCORE3_TPC2_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = mmDCORE3_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = mmDCORE3_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = mmDCORE3_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = mmDCORE3_TPC3_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = mmDCORE3_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = mmDCORE3_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = mmDCORE3_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = mmDCORE3_TPC4_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = mmDCORE3_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = mmDCORE3_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = mmDCORE3_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = mmDCORE3_TPC5_QM_BASE,
[GAUDI2_QUEUE_ID_NIC_0_0] = mmNIC0_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_0_1] = mmNIC0_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_0_2] = mmNIC0_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_0_3] = mmNIC0_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_1_0] = mmNIC0_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_1_1] = mmNIC0_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_1_2] = mmNIC0_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_1_3] = mmNIC0_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_2_0] = mmNIC1_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_2_1] = mmNIC1_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_2_2] = mmNIC1_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_2_3] = mmNIC1_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_3_0] = mmNIC1_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_3_1] = mmNIC1_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_3_2] = mmNIC1_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_3_3] = mmNIC1_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_4_0] = mmNIC2_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_4_1] = mmNIC2_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_4_2] = mmNIC2_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_4_3] = mmNIC2_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_5_0] = mmNIC2_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_5_1] = mmNIC2_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_5_2] = mmNIC2_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_5_3] = mmNIC2_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_6_0] = mmNIC3_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_6_1] = mmNIC3_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_6_2] = mmNIC3_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_6_3] = mmNIC3_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_7_0] = mmNIC3_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_7_1] = mmNIC3_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_7_2] = mmNIC3_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_7_3] = mmNIC3_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_8_0] = mmNIC4_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_8_1] = mmNIC4_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_8_2] = mmNIC4_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_8_3] = mmNIC4_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_9_0] = mmNIC4_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_9_1] = mmNIC4_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_9_2] = mmNIC4_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_9_3] = mmNIC4_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_10_0] = mmNIC5_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_10_1] = mmNIC5_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_10_2] = mmNIC5_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_10_3] = mmNIC5_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_11_0] = mmNIC5_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_11_1] = mmNIC5_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_11_2] = mmNIC5_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_11_3] = mmNIC5_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_12_0] = mmNIC6_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_12_1] = mmNIC6_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_12_2] = mmNIC6_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_12_3] = mmNIC6_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_13_0] = mmNIC6_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_13_1] = mmNIC6_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_13_2] = mmNIC6_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_13_3] = mmNIC6_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_14_0] = mmNIC7_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_14_1] = mmNIC7_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_14_2] = mmNIC7_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_14_3] = mmNIC7_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_15_0] = mmNIC7_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_15_1] = mmNIC7_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_15_2] = mmNIC7_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_15_3] = mmNIC7_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_16_0] = mmNIC8_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_16_1] = mmNIC8_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_16_2] = mmNIC8_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_16_3] = mmNIC8_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_17_0] = mmNIC8_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_17_1] = mmNIC8_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_17_2] = mmNIC8_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_17_3] = mmNIC8_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_18_0] = mmNIC9_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_18_1] = mmNIC9_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_18_2] = mmNIC9_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_18_3] = mmNIC9_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_19_0] = mmNIC9_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_19_1] = mmNIC9_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_19_2] = mmNIC9_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_19_3] = mmNIC9_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_20_0] = mmNIC10_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_20_1] = mmNIC10_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_20_2] = mmNIC10_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_20_3] = mmNIC10_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_21_0] = mmNIC10_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_21_1] = mmNIC10_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_21_2] = mmNIC10_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_21_3] = mmNIC10_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_22_0] = mmNIC11_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_22_1] = mmNIC11_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_22_2] = mmNIC11_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_22_3] = mmNIC11_QM0_BASE,
[GAUDI2_QUEUE_ID_NIC_23_0] = mmNIC11_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_23_1] = mmNIC11_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_23_2] = mmNIC11_QM1_BASE,
[GAUDI2_QUEUE_ID_NIC_23_3] = mmNIC11_QM1_BASE,
[GAUDI2_QUEUE_ID_ROT_0_0] = mmROT0_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_0_1] = mmROT0_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_0_2] = mmROT0_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_0_3] = mmROT0_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_1_0] = mmROT1_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_1_1] = mmROT1_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_1_2] = mmROT1_QM_BASE,
[GAUDI2_QUEUE_ID_ROT_1_3] = mmROT1_QM_BASE
};
static const u32 gaudi2_arc_blocks_bases[NUM_ARC_CPUS] = {
[CPU_ID_SCHED_ARC0] = mmARC_FARM_ARC0_AUX_BASE,
[CPU_ID_SCHED_ARC1] = mmARC_FARM_ARC1_AUX_BASE,
[CPU_ID_SCHED_ARC2] = mmARC_FARM_ARC2_AUX_BASE,
[CPU_ID_SCHED_ARC3] = mmARC_FARM_ARC3_AUX_BASE,
[CPU_ID_SCHED_ARC4] = mmDCORE1_MME_QM_ARC_AUX_BASE,
[CPU_ID_SCHED_ARC5] = mmDCORE3_MME_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC0] = mmDCORE0_TPC0_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC1] = mmDCORE0_TPC1_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC2] = mmDCORE0_TPC2_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC3] = mmDCORE0_TPC3_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC4] = mmDCORE0_TPC4_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC5] = mmDCORE0_TPC5_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC6] = mmDCORE1_TPC0_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC7] = mmDCORE1_TPC1_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC8] = mmDCORE1_TPC2_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC9] = mmDCORE1_TPC3_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC10] = mmDCORE1_TPC4_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC11] = mmDCORE1_TPC5_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC12] = mmDCORE2_TPC0_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC13] = mmDCORE2_TPC1_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC14] = mmDCORE2_TPC2_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC15] = mmDCORE2_TPC3_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC16] = mmDCORE2_TPC4_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC17] = mmDCORE2_TPC5_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC18] = mmDCORE3_TPC0_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC19] = mmDCORE3_TPC1_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC20] = mmDCORE3_TPC2_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC21] = mmDCORE3_TPC3_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC22] = mmDCORE3_TPC4_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC23] = mmDCORE3_TPC5_QM_ARC_AUX_BASE,
[CPU_ID_TPC_QMAN_ARC24] = mmDCORE0_TPC6_QM_ARC_AUX_BASE,
[CPU_ID_MME_QMAN_ARC0] = mmDCORE0_MME_QM_ARC_AUX_BASE,
[CPU_ID_MME_QMAN_ARC1] = mmDCORE2_MME_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC0] = mmDCORE0_EDMA0_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC1] = mmDCORE0_EDMA1_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC2] = mmDCORE1_EDMA0_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC3] = mmDCORE1_EDMA1_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC4] = mmDCORE2_EDMA0_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC5] = mmDCORE2_EDMA1_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC6] = mmDCORE3_EDMA0_QM_ARC_AUX_BASE,
[CPU_ID_EDMA_QMAN_ARC7] = mmDCORE3_EDMA1_QM_ARC_AUX_BASE,
[CPU_ID_PDMA_QMAN_ARC0] = mmPDMA0_QM_ARC_AUX_BASE,
[CPU_ID_PDMA_QMAN_ARC1] = mmPDMA1_QM_ARC_AUX_BASE,
[CPU_ID_ROT_QMAN_ARC0] = mmROT0_QM_ARC_AUX_BASE,
[CPU_ID_ROT_QMAN_ARC1] = mmROT1_QM_ARC_AUX_BASE,
[CPU_ID_NIC_QMAN_ARC0] = mmNIC0_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC1] = mmNIC0_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC2] = mmNIC1_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC3] = mmNIC1_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC4] = mmNIC2_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC5] = mmNIC2_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC6] = mmNIC3_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC7] = mmNIC3_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC8] = mmNIC4_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC9] = mmNIC4_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC10] = mmNIC5_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC11] = mmNIC5_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC12] = mmNIC6_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC13] = mmNIC6_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC14] = mmNIC7_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC15] = mmNIC7_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC16] = mmNIC8_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC17] = mmNIC8_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC18] = mmNIC9_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC19] = mmNIC9_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC20] = mmNIC10_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC21] = mmNIC10_QM_ARC_AUX1_BASE,
[CPU_ID_NIC_QMAN_ARC22] = mmNIC11_QM_ARC_AUX0_BASE,
[CPU_ID_NIC_QMAN_ARC23] = mmNIC11_QM_ARC_AUX1_BASE,
};
static const u32 gaudi2_arc_dccm_bases[NUM_ARC_CPUS] = {
[CPU_ID_SCHED_ARC0] = mmARC_FARM_ARC0_DCCM0_BASE,
[CPU_ID_SCHED_ARC1] = mmARC_FARM_ARC1_DCCM0_BASE,
[CPU_ID_SCHED_ARC2] = mmARC_FARM_ARC2_DCCM0_BASE,
[CPU_ID_SCHED_ARC3] = mmARC_FARM_ARC3_DCCM0_BASE,
[CPU_ID_SCHED_ARC4] = mmDCORE1_MME_QM_ARC_DCCM_BASE,
[CPU_ID_SCHED_ARC5] = mmDCORE3_MME_QM_ARC_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC0] = mmDCORE0_TPC0_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC1] = mmDCORE0_TPC1_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC2] = mmDCORE0_TPC2_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC3] = mmDCORE0_TPC3_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC4] = mmDCORE0_TPC4_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC5] = mmDCORE0_TPC5_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC6] = mmDCORE1_TPC0_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC7] = mmDCORE1_TPC1_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC8] = mmDCORE1_TPC2_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC9] = mmDCORE1_TPC3_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC10] = mmDCORE1_TPC4_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC11] = mmDCORE1_TPC5_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC12] = mmDCORE2_TPC0_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC13] = mmDCORE2_TPC1_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC14] = mmDCORE2_TPC2_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC15] = mmDCORE2_TPC3_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC16] = mmDCORE2_TPC4_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC17] = mmDCORE2_TPC5_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC18] = mmDCORE3_TPC0_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC19] = mmDCORE3_TPC1_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC20] = mmDCORE3_TPC2_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC21] = mmDCORE3_TPC3_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC22] = mmDCORE3_TPC4_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC23] = mmDCORE3_TPC5_QM_DCCM_BASE,
[CPU_ID_TPC_QMAN_ARC24] = mmDCORE0_TPC6_QM_DCCM_BASE,
[CPU_ID_MME_QMAN_ARC0] = mmDCORE0_MME_QM_ARC_DCCM_BASE,
[CPU_ID_MME_QMAN_ARC1] = mmDCORE2_MME_QM_ARC_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC0] = mmDCORE0_EDMA0_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC1] = mmDCORE0_EDMA1_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC2] = mmDCORE1_EDMA0_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC3] = mmDCORE1_EDMA1_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC4] = mmDCORE2_EDMA0_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC5] = mmDCORE2_EDMA1_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC6] = mmDCORE3_EDMA0_QM_DCCM_BASE,
[CPU_ID_EDMA_QMAN_ARC7] = mmDCORE3_EDMA1_QM_DCCM_BASE,
[CPU_ID_PDMA_QMAN_ARC0] = mmPDMA0_QM_ARC_DCCM_BASE,
[CPU_ID_PDMA_QMAN_ARC1] = mmPDMA1_QM_ARC_DCCM_BASE,
[CPU_ID_ROT_QMAN_ARC0] = mmROT0_QM_ARC_DCCM_BASE,
[CPU_ID_ROT_QMAN_ARC1] = mmROT1_QM_ARC_DCCM_BASE,
[CPU_ID_NIC_QMAN_ARC0] = mmNIC0_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC1] = mmNIC0_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC2] = mmNIC1_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC3] = mmNIC1_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC4] = mmNIC2_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC5] = mmNIC2_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC6] = mmNIC3_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC7] = mmNIC3_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC8] = mmNIC4_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC9] = mmNIC4_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC10] = mmNIC5_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC11] = mmNIC5_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC12] = mmNIC6_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC13] = mmNIC6_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC14] = mmNIC7_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC15] = mmNIC7_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC16] = mmNIC8_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC17] = mmNIC8_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC18] = mmNIC9_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC19] = mmNIC9_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC20] = mmNIC10_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC21] = mmNIC10_QM_DCCM1_BASE,
[CPU_ID_NIC_QMAN_ARC22] = mmNIC11_QM_DCCM0_BASE,
[CPU_ID_NIC_QMAN_ARC23] = mmNIC11_QM_DCCM1_BASE,
};
const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE] = {
[MME_ID_DCORE0] = mmDCORE0_MME_CTRL_LO_BASE,
[MME_ID_DCORE1] = mmDCORE1_MME_CTRL_LO_BASE,
[MME_ID_DCORE2] = mmDCORE2_MME_CTRL_LO_BASE,
[MME_ID_DCORE3] = mmDCORE3_MME_CTRL_LO_BASE,
};
static const u32 gaudi2_queue_id_to_arc_id[GAUDI2_QUEUE_ID_SIZE] = {
[GAUDI2_QUEUE_ID_PDMA_0_0] = CPU_ID_PDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_PDMA_0_1] = CPU_ID_PDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_PDMA_0_2] = CPU_ID_PDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_PDMA_0_3] = CPU_ID_PDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_PDMA_1_0] = CPU_ID_PDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_PDMA_1_1] = CPU_ID_PDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_PDMA_1_2] = CPU_ID_PDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_PDMA_1_3] = CPU_ID_PDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = CPU_ID_MME_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = CPU_ID_MME_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = CPU_ID_MME_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = CPU_ID_MME_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = CPU_ID_TPC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = CPU_ID_TPC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = CPU_ID_TPC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = CPU_ID_TPC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = CPU_ID_TPC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = CPU_ID_TPC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = CPU_ID_TPC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = CPU_ID_TPC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = CPU_ID_TPC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = CPU_ID_TPC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = CPU_ID_TPC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = CPU_ID_TPC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = CPU_ID_TPC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = CPU_ID_TPC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = CPU_ID_TPC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = CPU_ID_TPC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = CPU_ID_TPC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = CPU_ID_TPC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = CPU_ID_TPC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = CPU_ID_TPC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = CPU_ID_TPC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = CPU_ID_TPC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = CPU_ID_TPC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = CPU_ID_TPC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = CPU_ID_TPC_QMAN_ARC24,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = CPU_ID_TPC_QMAN_ARC24,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = CPU_ID_TPC_QMAN_ARC24,
[GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = CPU_ID_TPC_QMAN_ARC24,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC2,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC3,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = CPU_ID_SCHED_ARC4,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = CPU_ID_SCHED_ARC4,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = CPU_ID_SCHED_ARC4,
[GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = CPU_ID_SCHED_ARC4,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = CPU_ID_TPC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = CPU_ID_TPC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = CPU_ID_TPC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = CPU_ID_TPC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = CPU_ID_TPC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = CPU_ID_TPC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = CPU_ID_TPC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = CPU_ID_TPC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = CPU_ID_TPC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = CPU_ID_TPC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = CPU_ID_TPC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = CPU_ID_TPC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = CPU_ID_TPC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = CPU_ID_TPC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = CPU_ID_TPC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = CPU_ID_TPC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = CPU_ID_TPC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = CPU_ID_TPC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = CPU_ID_TPC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = CPU_ID_TPC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = CPU_ID_TPC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = CPU_ID_TPC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = CPU_ID_TPC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = CPU_ID_TPC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC4,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC5,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = CPU_ID_MME_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = CPU_ID_MME_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = CPU_ID_MME_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = CPU_ID_MME_QMAN_ARC1,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = CPU_ID_TPC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = CPU_ID_TPC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = CPU_ID_TPC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = CPU_ID_TPC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = CPU_ID_TPC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = CPU_ID_TPC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = CPU_ID_TPC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = CPU_ID_TPC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = CPU_ID_TPC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = CPU_ID_TPC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = CPU_ID_TPC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = CPU_ID_TPC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = CPU_ID_TPC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = CPU_ID_TPC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = CPU_ID_TPC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = CPU_ID_TPC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = CPU_ID_TPC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = CPU_ID_TPC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = CPU_ID_TPC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = CPU_ID_TPC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = CPU_ID_TPC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = CPU_ID_TPC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = CPU_ID_TPC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = CPU_ID_TPC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC6,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC7,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = CPU_ID_SCHED_ARC5,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = CPU_ID_SCHED_ARC5,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = CPU_ID_SCHED_ARC5,
[GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = CPU_ID_SCHED_ARC5,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = CPU_ID_TPC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = CPU_ID_TPC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = CPU_ID_TPC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = CPU_ID_TPC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = CPU_ID_TPC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = CPU_ID_TPC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = CPU_ID_TPC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = CPU_ID_TPC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = CPU_ID_TPC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = CPU_ID_TPC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = CPU_ID_TPC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = CPU_ID_TPC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = CPU_ID_TPC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = CPU_ID_TPC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = CPU_ID_TPC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = CPU_ID_TPC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = CPU_ID_TPC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = CPU_ID_TPC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = CPU_ID_TPC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = CPU_ID_TPC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = CPU_ID_TPC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = CPU_ID_TPC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = CPU_ID_TPC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = CPU_ID_TPC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_NIC_0_0] = CPU_ID_NIC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_NIC_0_1] = CPU_ID_NIC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_NIC_0_2] = CPU_ID_NIC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_NIC_0_3] = CPU_ID_NIC_QMAN_ARC0,
[GAUDI2_QUEUE_ID_NIC_1_0] = CPU_ID_NIC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_NIC_1_1] = CPU_ID_NIC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_NIC_1_2] = CPU_ID_NIC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_NIC_1_3] = CPU_ID_NIC_QMAN_ARC1,
[GAUDI2_QUEUE_ID_NIC_2_0] = CPU_ID_NIC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_NIC_2_1] = CPU_ID_NIC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_NIC_2_2] = CPU_ID_NIC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_NIC_2_3] = CPU_ID_NIC_QMAN_ARC2,
[GAUDI2_QUEUE_ID_NIC_3_0] = CPU_ID_NIC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_NIC_3_1] = CPU_ID_NIC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_NIC_3_2] = CPU_ID_NIC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_NIC_3_3] = CPU_ID_NIC_QMAN_ARC3,
[GAUDI2_QUEUE_ID_NIC_4_0] = CPU_ID_NIC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_NIC_4_1] = CPU_ID_NIC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_NIC_4_2] = CPU_ID_NIC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_NIC_4_3] = CPU_ID_NIC_QMAN_ARC4,
[GAUDI2_QUEUE_ID_NIC_5_0] = CPU_ID_NIC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_NIC_5_1] = CPU_ID_NIC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_NIC_5_2] = CPU_ID_NIC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_NIC_5_3] = CPU_ID_NIC_QMAN_ARC5,
[GAUDI2_QUEUE_ID_NIC_6_0] = CPU_ID_NIC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_NIC_6_1] = CPU_ID_NIC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_NIC_6_2] = CPU_ID_NIC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_NIC_6_3] = CPU_ID_NIC_QMAN_ARC6,
[GAUDI2_QUEUE_ID_NIC_7_0] = CPU_ID_NIC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_NIC_7_1] = CPU_ID_NIC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_NIC_7_2] = CPU_ID_NIC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_NIC_7_3] = CPU_ID_NIC_QMAN_ARC7,
[GAUDI2_QUEUE_ID_NIC_8_0] = CPU_ID_NIC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_NIC_8_1] = CPU_ID_NIC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_NIC_8_2] = CPU_ID_NIC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_NIC_8_3] = CPU_ID_NIC_QMAN_ARC8,
[GAUDI2_QUEUE_ID_NIC_9_0] = CPU_ID_NIC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_NIC_9_1] = CPU_ID_NIC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_NIC_9_2] = CPU_ID_NIC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_NIC_9_3] = CPU_ID_NIC_QMAN_ARC9,
[GAUDI2_QUEUE_ID_NIC_10_0] = CPU_ID_NIC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_NIC_10_1] = CPU_ID_NIC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_NIC_10_2] = CPU_ID_NIC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_NIC_10_3] = CPU_ID_NIC_QMAN_ARC10,
[GAUDI2_QUEUE_ID_NIC_11_0] = CPU_ID_NIC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_NIC_11_1] = CPU_ID_NIC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_NIC_11_2] = CPU_ID_NIC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_NIC_11_3] = CPU_ID_NIC_QMAN_ARC11,
[GAUDI2_QUEUE_ID_NIC_12_0] = CPU_ID_NIC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_NIC_12_1] = CPU_ID_NIC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_NIC_12_2] = CPU_ID_NIC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_NIC_12_3] = CPU_ID_NIC_QMAN_ARC12,
[GAUDI2_QUEUE_ID_NIC_13_0] = CPU_ID_NIC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_NIC_13_1] = CPU_ID_NIC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_NIC_13_2] = CPU_ID_NIC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_NIC_13_3] = CPU_ID_NIC_QMAN_ARC13,
[GAUDI2_QUEUE_ID_NIC_14_0] = CPU_ID_NIC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_NIC_14_1] = CPU_ID_NIC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_NIC_14_2] = CPU_ID_NIC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_NIC_14_3] = CPU_ID_NIC_QMAN_ARC14,
[GAUDI2_QUEUE_ID_NIC_15_0] = CPU_ID_NIC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_NIC_15_1] = CPU_ID_NIC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_NIC_15_2] = CPU_ID_NIC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_NIC_15_3] = CPU_ID_NIC_QMAN_ARC15,
[GAUDI2_QUEUE_ID_NIC_16_0] = CPU_ID_NIC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_NIC_16_1] = CPU_ID_NIC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_NIC_16_2] = CPU_ID_NIC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_NIC_16_3] = CPU_ID_NIC_QMAN_ARC16,
[GAUDI2_QUEUE_ID_NIC_17_0] = CPU_ID_NIC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_NIC_17_1] = CPU_ID_NIC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_NIC_17_2] = CPU_ID_NIC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_NIC_17_3] = CPU_ID_NIC_QMAN_ARC17,
[GAUDI2_QUEUE_ID_NIC_18_0] = CPU_ID_NIC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_NIC_18_1] = CPU_ID_NIC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_NIC_18_2] = CPU_ID_NIC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_NIC_18_3] = CPU_ID_NIC_QMAN_ARC18,
[GAUDI2_QUEUE_ID_NIC_19_0] = CPU_ID_NIC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_NIC_19_1] = CPU_ID_NIC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_NIC_19_2] = CPU_ID_NIC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_NIC_19_3] = CPU_ID_NIC_QMAN_ARC19,
[GAUDI2_QUEUE_ID_NIC_20_0] = CPU_ID_NIC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_NIC_20_1] = CPU_ID_NIC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_NIC_20_2] = CPU_ID_NIC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_NIC_20_3] = CPU_ID_NIC_QMAN_ARC20,
[GAUDI2_QUEUE_ID_NIC_21_0] = CPU_ID_NIC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_NIC_21_1] = CPU_ID_NIC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_NIC_21_2] = CPU_ID_NIC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_NIC_21_3] = CPU_ID_NIC_QMAN_ARC21,
[GAUDI2_QUEUE_ID_NIC_22_0] = CPU_ID_NIC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_NIC_22_1] = CPU_ID_NIC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_NIC_22_2] = CPU_ID_NIC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_NIC_22_3] = CPU_ID_NIC_QMAN_ARC22,
[GAUDI2_QUEUE_ID_NIC_23_0] = CPU_ID_NIC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_NIC_23_1] = CPU_ID_NIC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_NIC_23_2] = CPU_ID_NIC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_NIC_23_3] = CPU_ID_NIC_QMAN_ARC23,
[GAUDI2_QUEUE_ID_ROT_0_0] = CPU_ID_ROT_QMAN_ARC0,
[GAUDI2_QUEUE_ID_ROT_0_1] = CPU_ID_ROT_QMAN_ARC0,
[GAUDI2_QUEUE_ID_ROT_0_2] = CPU_ID_ROT_QMAN_ARC0,
[GAUDI2_QUEUE_ID_ROT_0_3] = CPU_ID_ROT_QMAN_ARC0,
[GAUDI2_QUEUE_ID_ROT_1_0] = CPU_ID_ROT_QMAN_ARC1,
[GAUDI2_QUEUE_ID_ROT_1_1] = CPU_ID_ROT_QMAN_ARC1,
[GAUDI2_QUEUE_ID_ROT_1_2] = CPU_ID_ROT_QMAN_ARC1,
[GAUDI2_QUEUE_ID_ROT_1_3] = CPU_ID_ROT_QMAN_ARC1
};
const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE] = {
[DMA_CORE_ID_PDMA0] = mmPDMA0_CORE_BASE,
[DMA_CORE_ID_PDMA1] = mmPDMA1_CORE_BASE,
[DMA_CORE_ID_EDMA0] = mmDCORE0_EDMA0_CORE_BASE,
[DMA_CORE_ID_EDMA1] = mmDCORE0_EDMA1_CORE_BASE,
[DMA_CORE_ID_EDMA2] = mmDCORE1_EDMA0_CORE_BASE,
[DMA_CORE_ID_EDMA3] = mmDCORE1_EDMA1_CORE_BASE,
[DMA_CORE_ID_EDMA4] = mmDCORE2_EDMA0_CORE_BASE,
[DMA_CORE_ID_EDMA5] = mmDCORE2_EDMA1_CORE_BASE,
[DMA_CORE_ID_EDMA6] = mmDCORE3_EDMA0_CORE_BASE,
[DMA_CORE_ID_EDMA7] = mmDCORE3_EDMA1_CORE_BASE,
[DMA_CORE_ID_KDMA] = mmARC_FARM_KDMA_BASE
};
const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE] = {
[MME_ID_DCORE0] = mmDCORE0_MME_ACC_BASE,
[MME_ID_DCORE1] = mmDCORE1_MME_ACC_BASE,
[MME_ID_DCORE2] = mmDCORE2_MME_ACC_BASE,
[MME_ID_DCORE3] = mmDCORE3_MME_ACC_BASE
};
static const u32 gaudi2_tpc_cfg_blocks_bases[TPC_ID_SIZE] = {
[TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_CFG_BASE,
[TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_CFG_BASE,
[TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_CFG_BASE,
[TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_CFG_BASE,
[TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_CFG_BASE,
[TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_CFG_BASE,
[TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_CFG_BASE,
[TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_CFG_BASE,
[TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_CFG_BASE,
[TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_CFG_BASE,
[TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_CFG_BASE,
[TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_CFG_BASE,
[TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_CFG_BASE,
[TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_CFG_BASE,
[TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_CFG_BASE,
[TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_CFG_BASE,
[TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_CFG_BASE,
[TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_CFG_BASE,
[TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_CFG_BASE,
[TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_CFG_BASE,
[TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_CFG_BASE,
[TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_CFG_BASE,
[TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_CFG_BASE,
[TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_CFG_BASE,
[TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_CFG_BASE,
};
static const u32 gaudi2_tpc_eml_cfg_blocks_bases[TPC_ID_SIZE] = {
[TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_EML_CFG_BASE,
[TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_EML_CFG_BASE,
[TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_EML_CFG_BASE,
[TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_EML_CFG_BASE,
[TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_EML_CFG_BASE,
[TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_EML_CFG_BASE,
[TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_EML_CFG_BASE,
[TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_EML_CFG_BASE,
[TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_EML_CFG_BASE,
[TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_EML_CFG_BASE,
[TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_EML_CFG_BASE,
[TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_EML_CFG_BASE,
[TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_EML_CFG_BASE,
[TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_EML_CFG_BASE,
[TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_EML_CFG_BASE,
[TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_EML_CFG_BASE,
[TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_EML_CFG_BASE,
[TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_EML_CFG_BASE,
[TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_EML_CFG_BASE,
[TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_EML_CFG_BASE,
[TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_EML_CFG_BASE,
[TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_EML_CFG_BASE,
[TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_EML_CFG_BASE,
[TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_EML_CFG_BASE,
[TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_EML_CFG_BASE,
};
const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE] = {
[ROTATOR_ID_0] = mmROT0_BASE,
[ROTATOR_ID_1] = mmROT1_BASE
};
static const u32 gaudi2_tpc_id_to_queue_id[TPC_ID_SIZE] = {
[TPC_ID_DCORE0_TPC0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0,
[TPC_ID_DCORE0_TPC1] = GAUDI2_QUEUE_ID_DCORE0_TPC_1_0,
[TPC_ID_DCORE0_TPC2] = GAUDI2_QUEUE_ID_DCORE0_TPC_2_0,
[TPC_ID_DCORE0_TPC3] = GAUDI2_QUEUE_ID_DCORE0_TPC_3_0,
[TPC_ID_DCORE0_TPC4] = GAUDI2_QUEUE_ID_DCORE0_TPC_4_0,
[TPC_ID_DCORE0_TPC5] = GAUDI2_QUEUE_ID_DCORE0_TPC_5_0,
[TPC_ID_DCORE1_TPC0] = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0,
[TPC_ID_DCORE1_TPC1] = GAUDI2_QUEUE_ID_DCORE1_TPC_1_0,
[TPC_ID_DCORE1_TPC2] = GAUDI2_QUEUE_ID_DCORE1_TPC_2_0,
[TPC_ID_DCORE1_TPC3] = GAUDI2_QUEUE_ID_DCORE1_TPC_3_0,
[TPC_ID_DCORE1_TPC4] = GAUDI2_QUEUE_ID_DCORE1_TPC_4_0,
[TPC_ID_DCORE1_TPC5] = GAUDI2_QUEUE_ID_DCORE1_TPC_5_0,
[TPC_ID_DCORE2_TPC0] = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0,
[TPC_ID_DCORE2_TPC1] = GAUDI2_QUEUE_ID_DCORE2_TPC_1_0,
[TPC_ID_DCORE2_TPC2] = GAUDI2_QUEUE_ID_DCORE2_TPC_2_0,
[TPC_ID_DCORE2_TPC3] = GAUDI2_QUEUE_ID_DCORE2_TPC_3_0,
[TPC_ID_DCORE2_TPC4] = GAUDI2_QUEUE_ID_DCORE2_TPC_4_0,
[TPC_ID_DCORE2_TPC5] = GAUDI2_QUEUE_ID_DCORE2_TPC_5_0,
[TPC_ID_DCORE3_TPC0] = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0,
[TPC_ID_DCORE3_TPC1] = GAUDI2_QUEUE_ID_DCORE3_TPC_1_0,
[TPC_ID_DCORE3_TPC2] = GAUDI2_QUEUE_ID_DCORE3_TPC_2_0,
[TPC_ID_DCORE3_TPC3] = GAUDI2_QUEUE_ID_DCORE3_TPC_3_0,
[TPC_ID_DCORE3_TPC4] = GAUDI2_QUEUE_ID_DCORE3_TPC_4_0,
[TPC_ID_DCORE3_TPC5] = GAUDI2_QUEUE_ID_DCORE3_TPC_5_0,
[TPC_ID_DCORE0_TPC6] = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0,
};
static const u32 gaudi2_rot_id_to_queue_id[ROTATOR_ID_SIZE] = {
[ROTATOR_ID_0] = GAUDI2_QUEUE_ID_ROT_0_0,
[ROTATOR_ID_1] = GAUDI2_QUEUE_ID_ROT_1_0,
};
static const u32 gaudi2_tpc_engine_id_to_tpc_id[] = {
[GAUDI2_DCORE0_ENGINE_ID_TPC_0] = TPC_ID_DCORE0_TPC0,
[GAUDI2_DCORE0_ENGINE_ID_TPC_1] = TPC_ID_DCORE0_TPC1,
[GAUDI2_DCORE0_ENGINE_ID_TPC_2] = TPC_ID_DCORE0_TPC2,
[GAUDI2_DCORE0_ENGINE_ID_TPC_3] = TPC_ID_DCORE0_TPC3,
[GAUDI2_DCORE0_ENGINE_ID_TPC_4] = TPC_ID_DCORE0_TPC4,
[GAUDI2_DCORE0_ENGINE_ID_TPC_5] = TPC_ID_DCORE0_TPC5,
[GAUDI2_DCORE1_ENGINE_ID_TPC_0] = TPC_ID_DCORE1_TPC0,
[GAUDI2_DCORE1_ENGINE_ID_TPC_1] = TPC_ID_DCORE1_TPC1,
[GAUDI2_DCORE1_ENGINE_ID_TPC_2] = TPC_ID_DCORE1_TPC2,
[GAUDI2_DCORE1_ENGINE_ID_TPC_3] = TPC_ID_DCORE1_TPC3,
[GAUDI2_DCORE1_ENGINE_ID_TPC_4] = TPC_ID_DCORE1_TPC4,
[GAUDI2_DCORE1_ENGINE_ID_TPC_5] = TPC_ID_DCORE1_TPC5,
[GAUDI2_DCORE2_ENGINE_ID_TPC_0] = TPC_ID_DCORE2_TPC0,
[GAUDI2_DCORE2_ENGINE_ID_TPC_1] = TPC_ID_DCORE2_TPC1,
[GAUDI2_DCORE2_ENGINE_ID_TPC_2] = TPC_ID_DCORE2_TPC2,
[GAUDI2_DCORE2_ENGINE_ID_TPC_3] = TPC_ID_DCORE2_TPC3,
[GAUDI2_DCORE2_ENGINE_ID_TPC_4] = TPC_ID_DCORE2_TPC4,
[GAUDI2_DCORE2_ENGINE_ID_TPC_5] = TPC_ID_DCORE2_TPC5,
[GAUDI2_DCORE3_ENGINE_ID_TPC_0] = TPC_ID_DCORE3_TPC0,
[GAUDI2_DCORE3_ENGINE_ID_TPC_1] = TPC_ID_DCORE3_TPC1,
[GAUDI2_DCORE3_ENGINE_ID_TPC_2] = TPC_ID_DCORE3_TPC2,
[GAUDI2_DCORE3_ENGINE_ID_TPC_3] = TPC_ID_DCORE3_TPC3,
[GAUDI2_DCORE3_ENGINE_ID_TPC_4] = TPC_ID_DCORE3_TPC4,
[GAUDI2_DCORE3_ENGINE_ID_TPC_5] = TPC_ID_DCORE3_TPC5,
/* the PCI TPC is placed last (mapped liked HW) */
[GAUDI2_DCORE0_ENGINE_ID_TPC_6] = TPC_ID_DCORE0_TPC6,
};
static const u32 gaudi2_mme_engine_id_to_mme_id[] = {
[GAUDI2_DCORE0_ENGINE_ID_MME] = MME_ID_DCORE0,
[GAUDI2_DCORE1_ENGINE_ID_MME] = MME_ID_DCORE1,
[GAUDI2_DCORE2_ENGINE_ID_MME] = MME_ID_DCORE2,
[GAUDI2_DCORE3_ENGINE_ID_MME] = MME_ID_DCORE3,
};
static const u32 gaudi2_edma_engine_id_to_edma_id[] = {
[GAUDI2_ENGINE_ID_PDMA_0] = DMA_CORE_ID_PDMA0,
[GAUDI2_ENGINE_ID_PDMA_1] = DMA_CORE_ID_PDMA1,
[GAUDI2_DCORE0_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA0,
[GAUDI2_DCORE0_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA1,
[GAUDI2_DCORE1_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA2,
[GAUDI2_DCORE1_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA3,
[GAUDI2_DCORE2_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA4,
[GAUDI2_DCORE2_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA5,
[GAUDI2_DCORE3_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA6,
[GAUDI2_DCORE3_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA7,
[GAUDI2_ENGINE_ID_KDMA] = DMA_CORE_ID_KDMA,
};
const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0,
GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0,
GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0,
GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0,
};
static const char gaudi2_vdec_irq_name[GAUDI2_VDEC_MSIX_ENTRIES][GAUDI2_MAX_STRING_LEN] = {
"gaudi2 vdec 0_0", "gaudi2 vdec 0_0 abnormal",
"gaudi2 vdec 0_1", "gaudi2 vdec 0_1 abnormal",
"gaudi2 vdec 1_0", "gaudi2 vdec 1_0 abnormal",
"gaudi2 vdec 1_1", "gaudi2 vdec 1_1 abnormal",
"gaudi2 vdec 2_0", "gaudi2 vdec 2_0 abnormal",
"gaudi2 vdec 2_1", "gaudi2 vdec 2_1 abnormal",
"gaudi2 vdec 3_0", "gaudi2 vdec 3_0 abnormal",
"gaudi2 vdec 3_1", "gaudi2 vdec 3_1 abnormal",
"gaudi2 vdec s_0", "gaudi2 vdec s_0 abnormal",
"gaudi2 vdec s_1", "gaudi2 vdec s_1 abnormal"
};
enum rtr_id {
DCORE0_RTR0,
DCORE0_RTR1,
DCORE0_RTR2,
DCORE0_RTR3,
DCORE0_RTR4,
DCORE0_RTR5,
DCORE0_RTR6,
DCORE0_RTR7,
DCORE1_RTR0,
DCORE1_RTR1,
DCORE1_RTR2,
DCORE1_RTR3,
DCORE1_RTR4,
DCORE1_RTR5,
DCORE1_RTR6,
DCORE1_RTR7,
DCORE2_RTR0,
DCORE2_RTR1,
DCORE2_RTR2,
DCORE2_RTR3,
DCORE2_RTR4,
DCORE2_RTR5,
DCORE2_RTR6,
DCORE2_RTR7,
DCORE3_RTR0,
DCORE3_RTR1,
DCORE3_RTR2,
DCORE3_RTR3,
DCORE3_RTR4,
DCORE3_RTR5,
DCORE3_RTR6,
DCORE3_RTR7,
};
static const u32 gaudi2_tpc_initiator_hbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3,
DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4,
DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1,
DCORE3_RTR4, DCORE3_RTR4, DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6,
DCORE0_RTR0
};
static const u32 gaudi2_tpc_initiator_lbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2,
DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5,
DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, DCORE2_RTR0, DCORE2_RTR0,
DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6, DCORE3_RTR7, DCORE3_RTR7,
DCORE0_RTR0
};
static const u32 gaudi2_dec_initiator_hbw_rtr_id[NUMBER_OF_DEC] = {
DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0,
DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_dec_initiator_lbw_rtr_id[NUMBER_OF_DEC] = {
DCORE0_RTR1, DCORE0_RTR1, DCORE1_RTR6, DCORE1_RTR6, DCORE2_RTR1, DCORE2_RTR1,
DCORE3_RTR6, DCORE3_RTR6, DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_nic_initiator_hbw_rtr_id[NIC_NUMBER_OF_MACROS] = {
DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
static const u32 gaudi2_nic_initiator_lbw_rtr_id[NIC_NUMBER_OF_MACROS] = {
DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
static const u32 gaudi2_edma_initiator_hbw_sft[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE
};
static const u32 gaudi2_pdma_initiator_hbw_rtr_id[NUM_OF_PDMA] = {
DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_pdma_initiator_lbw_rtr_id[NUM_OF_PDMA] = {
DCORE0_RTR2, DCORE0_RTR2
};
static const u32 gaudi2_rot_initiator_hbw_rtr_id[NUM_OF_ROT] = {
DCORE2_RTR0, DCORE3_RTR7
};
static const u32 gaudi2_rot_initiator_lbw_rtr_id[NUM_OF_ROT] = {
DCORE2_RTR2, DCORE3_RTR5
};
struct mme_initiators_rtr_id {
u32 wap0;
u32 wap1;
u32 write;
u32 read;
u32 sbte0;
u32 sbte1;
u32 sbte2;
u32 sbte3;
u32 sbte4;
};
enum mme_initiators {
MME_WAP0 = 0,
MME_WAP1,
MME_WRITE,
MME_READ,
MME_SBTE0,
MME_SBTE1,
MME_SBTE2,
MME_SBTE3,
MME_SBTE4,
MME_INITIATORS_MAX
};
static const struct mme_initiators_rtr_id
gaudi2_mme_initiator_rtr_id[NUM_OF_MME_PER_DCORE * NUM_OF_DCORES] = {
{ .wap0 = 5, .wap1 = 7, .write = 6, .read = 7,
.sbte0 = 7, .sbte1 = 4, .sbte2 = 4, .sbte3 = 5, .sbte4 = 6},
{ .wap0 = 10, .wap1 = 8, .write = 9, .read = 8,
.sbte0 = 11, .sbte1 = 11, .sbte2 = 10, .sbte3 = 9, .sbte4 = 8},
{ .wap0 = 21, .wap1 = 23, .write = 22, .read = 23,
.sbte0 = 20, .sbte1 = 20, .sbte2 = 21, .sbte3 = 22, .sbte4 = 23},
{ .wap0 = 30, .wap1 = 28, .write = 29, .read = 30,
.sbte0 = 31, .sbte1 = 31, .sbte2 = 30, .sbte3 = 29, .sbte4 = 28},
};
enum razwi_event_sources {
RAZWI_TPC,
RAZWI_MME,
RAZWI_EDMA,
RAZWI_PDMA,
RAZWI_NIC,
RAZWI_DEC,
RAZWI_ROT
};
struct hbm_mc_error_causes {
u32 mask;
char cause[50];
};
static struct hl_special_block_info gaudi2_special_blocks[] = GAUDI2_SPECIAL_BLOCKS;
/* Special blocks iterator is currently used to configure security protection bits,
* and read global errors. Most HW blocks are addressable and those who aren't (N/A)-
* must be skipped. Following configurations are commonly used for both PB config
* and global error reading, since currently they both share the same settings.
* Once it changes, we must remember to use separate configurations for either one.
*/
static int gaudi2_iterator_skip_block_types[] = {
GAUDI2_BLOCK_TYPE_PLL,
GAUDI2_BLOCK_TYPE_EU_BIST,
GAUDI2_BLOCK_TYPE_HBM,
GAUDI2_BLOCK_TYPE_XFT
};
static struct range gaudi2_iterator_skip_block_ranges[] = {
/* Skip all PSOC blocks except for PSOC_GLOBAL_CONF */
{mmPSOC_I2C_M0_BASE, mmPSOC_EFUSE_BASE},
{mmPSOC_BTL_BASE, mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE},
/* Skip all CPU blocks except for CPU_IF */
{mmCPU_CA53_CFG_BASE, mmCPU_CA53_CFG_BASE},
{mmCPU_TIMESTAMP_BASE, mmCPU_MSTR_IF_RR_SHRD_HBW_BASE}
};
static struct hbm_mc_error_causes hbm_mc_spi[GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE] = {
{HBM_MC_SPI_TEMP_PIN_CHG_MASK, "temperature pins changed"},
{HBM_MC_SPI_THR_ENG_MASK, "temperature-based throttling engaged"},
{HBM_MC_SPI_THR_DIS_ENG_MASK, "temperature-based throttling disengaged"},
{HBM_MC_SPI_IEEE1500_COMP_MASK, "IEEE1500 op comp"},
{HBM_MC_SPI_IEEE1500_PAUSED_MASK, "IEEE1500 op paused"},
};
static const char * const hbm_mc_sei_cause[GAUDI2_NUM_OF_HBM_SEI_CAUSE] = {
[HBM_SEI_CMD_PARITY_EVEN] = "SEI C/A parity even",
[HBM_SEI_CMD_PARITY_ODD] = "SEI C/A parity odd",
[HBM_SEI_READ_ERR] = "SEI read data error",
[HBM_SEI_WRITE_DATA_PARITY_ERR] = "SEI write data parity error",
[HBM_SEI_CATTRIP] = "SEI CATTRIP asserted",
[HBM_SEI_MEM_BIST_FAIL] = "SEI memory BIST fail",
[HBM_SEI_DFI] = "SEI DFI error",
[HBM_SEI_INV_TEMP_READ_OUT] = "SEI invalid temp read",
[HBM_SEI_BIST_FAIL] = "SEI BIST fail"
};
struct mmu_spi_sei_cause {
char cause[50];
int clear_bit;
};
static const struct mmu_spi_sei_cause gaudi2_mmu_spi_sei[GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE] = {
{"page fault", 1}, /* INTERRUPT_CLR[1] */
{"page access", 1}, /* INTERRUPT_CLR[1] */
{"bypass ddr", 2}, /* INTERRUPT_CLR[2] */
{"multi hit", 2}, /* INTERRUPT_CLR[2] */
{"mmu rei0", -1}, /* no clear register bit */
{"mmu rei1", -1}, /* no clear register bit */
{"stlb rei0", -1}, /* no clear register bit */
{"stlb rei1", -1}, /* no clear register bit */
{"rr privileged write hit", 2}, /* INTERRUPT_CLR[2] */
{"rr privileged read hit", 2}, /* INTERRUPT_CLR[2] */
{"rr secure write hit", 2}, /* INTERRUPT_CLR[2] */
{"rr secure read hit", 2}, /* INTERRUPT_CLR[2] */
{"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
{"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
{"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
{"bist_fail no use", 2}, /* INTERRUPT_CLR[2] */
{"slave error", 16}, /* INTERRUPT_CLR[16] */
{"dec error", 17}, /* INTERRUPT_CLR[17] */
{"burst fifo full", 2} /* INTERRUPT_CLR[2] */
};
struct gaudi2_cache_invld_params {
u64 start_va;
u64 end_va;
u32 inv_start_val;
u32 flags;
bool range_invalidation;
};
struct gaudi2_tpc_idle_data {
struct engines_data *e;
unsigned long *mask;
bool *is_idle;
const char *tpc_fmt;
};
struct gaudi2_tpc_mmu_data {
u32 rw_asid;
};
static s64 gaudi2_state_dump_specs_props[SP_MAX] = {0};
static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val);
static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id);
static bool gaudi2_is_arc_enabled(struct hl_device *hdev, u64 arc_id);
static void gaudi2_clr_arc_id_cap(struct hl_device *hdev, u64 arc_id);
static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id);
static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val);
static int gaudi2_send_job_to_kdma(struct hl_device *hdev, u64 src_addr, u64 dst_addr, u32 size,
bool is_memset);
static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e);
static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e);
static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e);
static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr);
static u64 gaudi2_mmu_descramble_addr(struct hl_device *hdev, u64 scrambled_addr);
static void gaudi2_init_scrambler_hbm(struct hl_device *hdev)
{
}
static u32 gaudi2_get_signal_cb_size(struct hl_device *hdev)
{
return sizeof(struct packet_msg_short);
}
static u32 gaudi2_get_wait_cb_size(struct hl_device *hdev)
{
return sizeof(struct packet_msg_short) * 4 + sizeof(struct packet_fence);
}
void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int dcore, inst, tpc_seq;
u32 offset;
/* init the return code */
ctx->rc = 0;
for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) {
for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) {
tpc_seq = dcore * NUM_OF_TPC_PER_DCORE + inst;
if (!(prop->tpc_enabled_mask & BIT(tpc_seq)))
continue;
offset = (DCORE_OFFSET * dcore) + (DCORE_TPC_OFFSET * inst);
ctx->fn(hdev, dcore, inst, offset, ctx);
if (ctx->rc) {
dev_err(hdev->dev, "TPC iterator failed for DCORE%d TPC%d\n",
dcore, inst);
return;
}
}
}
if (!(prop->tpc_enabled_mask & BIT(TPC_ID_DCORE0_TPC6)))
return;
/* special check for PCI TPC (DCORE0_TPC6) */
offset = DCORE_TPC_OFFSET * (NUM_DCORE0_TPC - 1);
ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx);
if (ctx->rc)
dev_err(hdev->dev, "TPC iterator failed for DCORE0 TPC6\n");
}
static bool gaudi2_host_phys_addr_valid(u64 addr)
{
if ((addr < HOST_PHYS_BASE_0 + HOST_PHYS_SIZE_0) || (addr >= HOST_PHYS_BASE_1))
return true;
return false;
}
static int set_number_of_functional_hbms(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 faulty_hbms = hweight64(hdev->dram_binning);
/* check if all HBMs should be used */
if (!faulty_hbms) {
dev_dbg(hdev->dev, "All HBM are in use (no binning)\n");
prop->num_functional_hbms = GAUDI2_HBM_NUM;
return 0;
}
/*
* check for error condition in which number of binning
* candidates is higher than the maximum supported by the
* driver (in which case binning mask shall be ignored and driver will
* set the default)
*/
if (faulty_hbms > MAX_FAULTY_HBMS) {
dev_err(hdev->dev,
"HBM binning supports max of %d faulty HBMs, supplied mask 0x%llx.\n",
MAX_FAULTY_HBMS, hdev->dram_binning);
return -EINVAL;
}
/*
* by default, number of functional HBMs in Gaudi2 is always
* GAUDI2_HBM_NUM - 1.
*/
prop->num_functional_hbms = GAUDI2_HBM_NUM - faulty_hbms;
return 0;
}
static int gaudi2_set_dram_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 basic_hbm_page_size;
int rc;
rc = set_number_of_functional_hbms(hdev);
if (rc)
return -EINVAL;
/*
* Due to HW bug in which TLB size is x16 smaller than expected we use a workaround
* in which we are using x16 bigger page size to be able to populate the entire
* HBM mappings in the TLB
*/
basic_hbm_page_size = prop->num_functional_hbms * SZ_8M;
prop->dram_page_size = GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR * basic_hbm_page_size;
prop->device_mem_alloc_default_page_size = prop->dram_page_size;
prop->dram_size = prop->num_functional_hbms * SZ_16G;
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_end_address = prop->dram_base_address + prop->dram_size;
prop->dram_supports_virtual_memory = true;
prop->dram_user_base_address = DRAM_PHYS_BASE + prop->dram_page_size;
prop->dram_hints_align_mask = ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK;
prop->hints_dram_reserved_va_range.start_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_START;
prop->hints_dram_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_END;
/* since DRAM page size differs from DMMU page size we need to allocate
* DRAM memory in units of dram_page size and mapping this memory in
* units of DMMU page size. we overcome this size mismatch using a
* scrambling routine which takes a DRAM page and converts it to a DMMU
* page.
* We therefore:
* 1. partition the virtual address space to DRAM-page (whole) pages.
* (suppose we get n such pages)
* 2. limit the amount of virtual address space we got from 1 above to
* a multiple of 64M as we don't want the scrambled address to cross
* the DRAM virtual address space.
* ( m = (n * DRAM_page_size) / DMMU_page_size).
* 3. determine the and address accordingly
* end_addr = start_addr + m * 48M
*
* the DRAM address MSBs (63:48) are not part of the roundup calculation
*/
prop->dmmu.start_addr = prop->dram_base_address +
(prop->dram_page_size *
DIV_ROUND_UP_SECTOR_T(prop->dram_size, prop->dram_page_size));
prop->dmmu.end_addr = prop->dmmu.start_addr + prop->dram_page_size *
div_u64((VA_HBM_SPACE_END - prop->dmmu.start_addr), prop->dmmu.page_size);
return 0;
}
static int gaudi2_set_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hw_queue_properties *q_props;
u32 num_sync_stream_queues = 0;
int i;
prop->max_queues = GAUDI2_QUEUE_ID_SIZE;
prop->hw_queues_props = kcalloc(prop->max_queues, sizeof(struct hw_queue_properties),
GFP_KERNEL);
if (!prop->hw_queues_props)
return -ENOMEM;
q_props = prop->hw_queues_props;
for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) {
q_props[i].type = QUEUE_TYPE_HW;
q_props[i].driver_only = 0;
if (i >= GAUDI2_QUEUE_ID_NIC_0_0 && i <= GAUDI2_QUEUE_ID_NIC_23_3) {
q_props[i].supports_sync_stream = 0;
} else {
q_props[i].supports_sync_stream = 1;
num_sync_stream_queues++;
}
q_props[i].cb_alloc_flags = CB_ALLOC_USER;
}
q_props[GAUDI2_QUEUE_ID_CPU_PQ].type = QUEUE_TYPE_CPU;
q_props[GAUDI2_QUEUE_ID_CPU_PQ].driver_only = 1;
q_props[GAUDI2_QUEUE_ID_CPU_PQ].cb_alloc_flags = CB_ALLOC_KERNEL;
prop->cache_line_size = DEVICE_CACHE_LINE_SIZE;
prop->cfg_base_address = CFG_BASE;
prop->device_dma_offset_for_host_access = HOST_PHYS_BASE_0;
prop->host_base_address = HOST_PHYS_BASE_0;
prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE_0;
prop->max_pending_cs = GAUDI2_MAX_PENDING_CS;
prop->completion_queues_count = GAUDI2_RESERVED_CQ_NUMBER;
prop->user_dec_intr_count = NUMBER_OF_DEC;
prop->user_interrupt_count = GAUDI2_IRQ_NUM_USER_LAST - GAUDI2_IRQ_NUM_USER_FIRST + 1;
prop->completion_mode = HL_COMPLETION_MODE_CS;
prop->sync_stream_first_sob = GAUDI2_RESERVED_SOB_NUMBER;
prop->sync_stream_first_mon = GAUDI2_RESERVED_MON_NUMBER;
prop->sram_base_address = SRAM_BASE_ADDR;
prop->sram_size = SRAM_SIZE;
prop->sram_end_address = prop->sram_base_address + prop->sram_size;
prop->sram_user_base_address = prop->sram_base_address + SRAM_USER_BASE_OFFSET;
prop->hints_range_reservation = true;
prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1;
if (hdev->pldm)
prop->mmu_pgt_size = 0x800000; /* 8MB */
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_INITIAL_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dmmu.hop_shifts[MMU_HOP0] = DHOP0_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP1] = DHOP1_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP2] = DHOP2_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP3] = DHOP3_SHIFT;
prop->dmmu.hop_shifts[MMU_HOP4] = DHOP4_SHIFT;
prop->dmmu.hop_masks[MMU_HOP0] = DHOP0_MASK;
prop->dmmu.hop_masks[MMU_HOP1] = DHOP1_MASK;
prop->dmmu.hop_masks[MMU_HOP2] = DHOP2_MASK;
prop->dmmu.hop_masks[MMU_HOP3] = DHOP3_MASK;
prop->dmmu.hop_masks[MMU_HOP4] = DHOP4_MASK;
prop->dmmu.page_size = PAGE_SIZE_1GB;
prop->dmmu.num_hops = MMU_ARCH_6_HOPS;
prop->dmmu.last_mask = LAST_MASK;
prop->dmmu.host_resident = 1;
prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
/*
* this is done in order to be able to validate FW descriptor (i.e. validating that
* the addresses and allocated space for FW image does not cross memory bounds).
* for this reason we set the DRAM size to the minimum possible and later it will
* be modified according to what reported in the cpucp info packet
*/
prop->dram_size = (GAUDI2_HBM_NUM - 1) * SZ_16G;
hdev->pmmu_huge_range = true;
prop->pmmu.host_resident = 1;
prop->pmmu.num_hops = MMU_ARCH_6_HOPS;
prop->pmmu.last_mask = LAST_MASK;
prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
prop->hints_host_reserved_va_range.start_addr = RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START;
prop->hints_host_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END;
prop->hints_host_hpage_reserved_va_range.start_addr =
RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START;
prop->hints_host_hpage_reserved_va_range.end_addr =
RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END;
if (PAGE_SIZE == SZ_64K) {
prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_64K;
prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_64K;
prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_64K;
prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_64K;
prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_64K;
prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_64K;
prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_64K;
prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_64K;
prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_64K;
prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_64K;
prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_64K;
prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_64K;
prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START;
prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END;
prop->pmmu.page_size = PAGE_SIZE_64KB;
/* shifts and masks are the same in PMMU and HPMMU */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
prop->pmmu_huge.page_size = PAGE_SIZE_16MB;
prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START;
prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
} else {
prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_4K;
prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_4K;
prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_4K;
prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_4K;
prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_4K;
prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_4K;
prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_4K;
prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_4K;
prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_4K;
prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_4K;
prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_4K;
prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_4K;
prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START;
prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
/* shifts and masks are the same in PMMU and HPMMU */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START;
prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
}
prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE;
prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
prop->supports_engine_modes = true;
prop->dc_power_default = DC_POWER_DEFAULT;
prop->cb_pool_cb_cnt = GAUDI2_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GAUDI2_CB_POOL_CB_SIZE;
prop->pcie_dbi_base_address = CFG_BASE + mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
strncpy(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN);
prop->mme_master_slave_mode = 1;
prop->first_available_user_sob[0] = GAUDI2_RESERVED_SOB_NUMBER +
(num_sync_stream_queues * HL_RSVD_SOBS);
prop->first_available_user_mon[0] = GAUDI2_RESERVED_MON_NUMBER +
(num_sync_stream_queues * HL_RSVD_MONS);
prop->first_available_user_interrupt = GAUDI2_IRQ_NUM_USER_FIRST;
prop->tpc_interrupt_id = GAUDI2_IRQ_NUM_TPC_ASSERT;
prop->eq_interrupt_id = GAUDI2_IRQ_NUM_EVENT_QUEUE;
prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER;
prop->fw_cpu_boot_dev_sts0_valid = false;
prop->fw_cpu_boot_dev_sts1_valid = false;
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
prop->max_dec = NUMBER_OF_DEC;
prop->clk_pll_index = HL_GAUDI2_MME_PLL;
prop->dma_mask = 64;
prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
return 0;
}
static int gaudi2_pci_bars_map(struct hl_device *hdev)
{
static const char * const name[] = {"CFG_SRAM", "MSIX", "DRAM"};
bool is_wc[3] = {false, false, true};
int rc;
rc = hl_pci_bars_map(hdev, name, is_wc);
if (rc)
return rc;
hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + (CFG_BASE - STM_FLASH_BASE_ADDR);
return 0;
}
static u64 gaudi2_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
if ((gaudi2) && (gaudi2->dram_bar_cur_addr == addr))
return old_addr;
if (hdev->asic_prop.iatu_done_by_fw)
return U64_MAX;
/* Inbound Region 2 - Bar 4 - Point to DRAM */
pci_region.mode = PCI_BAR_MATCH_MODE;
pci_region.bar = DRAM_BAR_ID;
pci_region.addr = addr;
rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
if (rc)
return U64_MAX;
if (gaudi2) {
old_addr = gaudi2->dram_bar_cur_addr;
gaudi2->dram_bar_cur_addr = addr;
}
return old_addr;
}
static int gaudi2_init_iatu(struct hl_device *hdev)
{
struct hl_inbound_pci_region inbound_region;
struct hl_outbound_pci_region outbound_region;
u32 bar_addr_low, bar_addr_high;
int rc;
if (hdev->asic_prop.iatu_done_by_fw)
return 0;
/* Temporary inbound Region 0 - Bar 0 - Point to CFG
* We must map this region in BAR match mode in order to
* fetch BAR physical base address
*/
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = SRAM_CFG_BAR_ID;
/* Base address must be aligned to Bar size which is 256 MB */
inbound_region.addr = STM_FLASH_BASE_ADDR - STM_FLASH_ALIGNED_OFF;
rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
if (rc)
return rc;
/* Fetch physical BAR address */
bar_addr_high = RREG32(mmPCIE_DBI_BAR1_REG + STM_FLASH_ALIGNED_OFF);
bar_addr_low = RREG32(mmPCIE_DBI_BAR0_REG + STM_FLASH_ALIGNED_OFF) & ~0xF;
hdev->pcie_bar_phys[SRAM_CFG_BAR_ID] = (u64)bar_addr_high << 32 | bar_addr_low;
/* Inbound Region 0 - Bar 0 - Point to CFG */
inbound_region.mode = PCI_ADDRESS_MATCH_MODE;
inbound_region.bar = SRAM_CFG_BAR_ID;
inbound_region.offset_in_bar = 0;
inbound_region.addr = STM_FLASH_BASE_ADDR;
inbound_region.size = CFG_REGION_SIZE;
rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
if (rc)
return rc;
/* Inbound Region 1 - Bar 0 - Point to BAR0_RESERVED + SRAM */
inbound_region.mode = PCI_ADDRESS_MATCH_MODE;
inbound_region.bar = SRAM_CFG_BAR_ID;
inbound_region.offset_in_bar = CFG_REGION_SIZE;
inbound_region.addr = BAR0_RSRVD_BASE_ADDR;
inbound_region.size = BAR0_RSRVD_SIZE + SRAM_SIZE;
rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
if (rc)
return rc;
/* Inbound Region 2 - Bar 4 - Point to DRAM */
inbound_region.mode = PCI_BAR_MATCH_MODE;
inbound_region.bar = DRAM_BAR_ID;
inbound_region.addr = DRAM_PHYS_BASE;
rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
if (rc)
return rc;
/* Outbound Region 0 - Point to Host */
outbound_region.addr = HOST_PHYS_BASE_0;
outbound_region.size = HOST_PHYS_SIZE_0;
rc = hl_pci_set_outbound_region(hdev, &outbound_region);
return rc;
}
static enum hl_device_hw_state gaudi2_get_hw_state(struct hl_device *hdev)
{
return RREG32(mmHW_STATE);
}
static int gaudi2_tpc_binning_init_prop(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
/*
* check for error condition in which number of binning candidates
* is higher than the maximum supported by the driver
*/
if (hweight64(hdev->tpc_binning) > MAX_CLUSTER_BINNING_FAULTY_TPCS) {
dev_err(hdev->dev, "TPC binning is supported for max of %d faulty TPCs, provided mask 0x%llx\n",
MAX_CLUSTER_BINNING_FAULTY_TPCS,
hdev->tpc_binning);
return -EINVAL;
}
prop->tpc_binning_mask = hdev->tpc_binning;
prop->tpc_enabled_mask = GAUDI2_TPC_FULL_MASK;
return 0;
}
static int gaudi2_set_tpc_binning_masks(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hw_queue_properties *q_props = prop->hw_queues_props;
u64 tpc_binning_mask;
u8 subst_idx = 0;
int i, rc;
rc = gaudi2_tpc_binning_init_prop(hdev);
if (rc)
return rc;
tpc_binning_mask = prop->tpc_binning_mask;
for (i = 0 ; i < MAX_FAULTY_TPCS ; i++) {
u8 subst_seq, binned, qid_base;
if (tpc_binning_mask == 0)
break;
if (subst_idx == 0) {
subst_seq = TPC_ID_DCORE0_TPC6;
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0;
} else {
subst_seq = TPC_ID_DCORE3_TPC5;
qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_5_0;
}
/* clear bit from mask */
binned = __ffs(tpc_binning_mask);
/*
* Coverity complains about possible out-of-bound access in
* clear_bit
*/
if (binned >= TPC_ID_SIZE) {
dev_err(hdev->dev,
"Invalid binned TPC (binning mask: %llx)\n",
tpc_binning_mask);
return -EINVAL;
}
clear_bit(binned, (unsigned long *)&tpc_binning_mask);
/* also clear replacing TPC bit from enabled mask */
clear_bit(subst_seq, (unsigned long *)&prop->tpc_enabled_mask);
/* bin substite TPC's Qs */
q_props[qid_base].binned = 1;
q_props[qid_base + 1].binned = 1;
q_props[qid_base + 2].binned = 1;
q_props[qid_base + 3].binned = 1;
subst_idx++;
}
return 0;
}
static int gaudi2_set_dec_binning_masks(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 num_faulty;
num_faulty = hweight32(hdev->decoder_binning);
/*
* check for error condition in which number of binning candidates
* is higher than the maximum supported by the driver
*/
if (num_faulty > MAX_FAULTY_DECODERS) {
dev_err(hdev->dev, "decoder binning is supported for max of single faulty decoder, provided mask 0x%x\n",
hdev->decoder_binning);
return -EINVAL;
}
prop->decoder_binning_mask = (hdev->decoder_binning & GAUDI2_DECODER_FULL_MASK);
if (prop->decoder_binning_mask)
prop->decoder_enabled_mask = (GAUDI2_DECODER_FULL_MASK & ~BIT(DEC_ID_PCIE_VDEC1));
else
prop->decoder_enabled_mask = GAUDI2_DECODER_FULL_MASK;
return 0;
}
static void gaudi2_set_dram_binning_masks(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
/* check if we should override default binning */
if (!hdev->dram_binning) {
prop->dram_binning_mask = 0;
prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK;
return;
}
/* set DRAM binning constraints */
prop->faulty_dram_cluster_map |= hdev->dram_binning;
prop->dram_binning_mask = hdev->dram_binning;
prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK & ~BIT(HBM_ID5);
}
static int gaudi2_set_edma_binning_masks(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hw_queue_properties *q_props;
u8 seq, num_faulty;
num_faulty = hweight32(hdev->edma_binning);
/*
* check for error condition in which number of binning candidates
* is higher than the maximum supported by the driver
*/
if (num_faulty > MAX_FAULTY_EDMAS) {
dev_err(hdev->dev,
"EDMA binning is supported for max of single faulty EDMA, provided mask 0x%x\n",
hdev->edma_binning);
return -EINVAL;
}
if (!hdev->edma_binning) {
prop->edma_binning_mask = 0;
prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK;
return 0;
}
seq = __ffs((unsigned long)hdev->edma_binning);
/* set binning constraints */
prop->faulty_dram_cluster_map |= BIT(edma_to_hbm_cluster[seq]);
prop->edma_binning_mask = hdev->edma_binning;
prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK & ~BIT(EDMA_ID_DCORE3_INSTANCE1);
/* bin substitute EDMA's queue */
q_props = prop->hw_queues_props;
q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0].binned = 1;
q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1].binned = 1;
q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2].binned = 1;
q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3].binned = 1;
return 0;
}
static int gaudi2_set_xbar_edge_enable_mask(struct hl_device *hdev, u32 xbar_edge_iso_mask)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 num_faulty, seq;
/* check if we should override default binning */
if (!xbar_edge_iso_mask) {
prop->xbar_edge_enabled_mask = GAUDI2_XBAR_EDGE_FULL_MASK;
return 0;
}
/*
* note that it can be set to value other than 0 only after cpucp packet (i.e.
* only the FW can set a redundancy value). for user it'll always be 0.
*/
num_faulty = hweight32(xbar_edge_iso_mask);
/*
* check for error condition in which number of binning candidates
* is higher than the maximum supported by the driver
*/
if (num_faulty > MAX_FAULTY_XBARS) {
dev_err(hdev->dev, "we cannot have more than %d faulty XBAR EDGE\n",
MAX_FAULTY_XBARS);
return -EINVAL;
}
seq = __ffs((unsigned long)xbar_edge_iso_mask);
/* set binning constraints */
prop->faulty_dram_cluster_map |= BIT(xbar_edge_to_hbm_cluster[seq]);
prop->xbar_edge_enabled_mask = (~xbar_edge_iso_mask) & GAUDI2_XBAR_EDGE_FULL_MASK;
return 0;
}
static int gaudi2_set_cluster_binning_masks_common(struct hl_device *hdev, u8 xbar_edge_iso_mask)
{
int rc;
/*
* mark all clusters as good, each component will "fail" cluster
* based on eFuse/user values.
* If more than single cluster is faulty- the chip is unusable
*/
hdev->asic_prop.faulty_dram_cluster_map = 0;
gaudi2_set_dram_binning_masks(hdev);
rc = gaudi2_set_edma_binning_masks(hdev);
if (rc)
return rc;
rc = gaudi2_set_xbar_edge_enable_mask(hdev, xbar_edge_iso_mask);
if (rc)
return rc;
/* always initially set to full mask */
hdev->asic_prop.hmmu_hif_enabled_mask = GAUDI2_HIF_HMMU_FULL_MASK;
return 0;
}
static int gaudi2_set_cluster_binning_masks(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
rc = gaudi2_set_cluster_binning_masks_common(hdev, prop->cpucp_info.xbar_binning_mask);
if (rc)
return rc;
/* if we have DRAM binning reported by FW we should perform cluster config */
if (prop->faulty_dram_cluster_map) {
u8 cluster_seq = __ffs((unsigned long)prop->faulty_dram_cluster_map);
prop->hmmu_hif_enabled_mask = cluster_hmmu_hif_enabled_mask[cluster_seq];
}
return 0;
}
static int gaudi2_set_binning_masks(struct hl_device *hdev)
{
int rc;
rc = gaudi2_set_cluster_binning_masks(hdev);
if (rc)
return rc;
rc = gaudi2_set_tpc_binning_masks(hdev);
if (rc)
return rc;
rc = gaudi2_set_dec_binning_masks(hdev);
if (rc)
return rc;
return 0;
}
static int gaudi2_cpucp_info_get(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
long max_power;
u64 dram_size;
int rc;
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
/* No point of asking this information again when not doing hard reset, as the device
* CPU hasn't been reset
*/
if (hdev->reset_info.in_compute_reset)
return 0;
rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
mmCPU_BOOT_ERR1);
if (rc)
return rc;
dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
if (dram_size) {
/* we can have wither 5 or 6 HBMs. other values are invalid */
if ((dram_size != ((GAUDI2_HBM_NUM - 1) * SZ_16G)) &&
(dram_size != (GAUDI2_HBM_NUM * SZ_16G))) {
dev_err(hdev->dev,
"F/W reported invalid DRAM size %llu. Trying to use default size %llu\n",
dram_size, prop->dram_size);
dram_size = prop->dram_size;
}
prop->dram_size = dram_size;
prop->dram_end_address = prop->dram_base_address + dram_size;
}
if (!strlen(prop->cpucp_info.card_name))
strncpy(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN);
/* Overwrite binning masks with the actual binning values from F/W */
hdev->dram_binning = prop->cpucp_info.dram_binning_mask;
hdev->edma_binning = prop->cpucp_info.edma_binning_mask;
hdev->tpc_binning = le64_to_cpu(prop->cpucp_info.tpc_binning_mask);
hdev->decoder_binning = lower_32_bits(le64_to_cpu(prop->cpucp_info.decoder_binning_mask));
dev_dbg(hdev->dev, "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x\n",
hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
hdev->decoder_binning);
/*
* at this point the DRAM parameters need to be updated according to data obtained
* from the FW
*/
rc = hdev->asic_funcs->set_dram_properties(hdev);
if (rc)
return rc;
rc = hdev->asic_funcs->set_binning_masks(hdev);
if (rc)
return rc;
max_power = hl_fw_get_max_power(hdev);
if (max_power < 0)
return max_power;
prop->max_power_default = (u64) max_power;
return 0;
}
static int gaudi2_fetch_psoc_frequency(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS];
int rc;
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI2_CPU_PLL, pll_freq_arr);
if (rc)
return rc;
hdev->asic_prop.psoc_timestamp_frequency = pll_freq_arr[3];
return 0;
}
static int gaudi2_early_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
resource_size_t pci_bar_size;
int rc;
rc = gaudi2_set_fixed_properties(hdev);
if (rc)
return rc;
/* Check BAR sizes */
pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID);
if (pci_bar_size != CFG_BAR_SIZE) {
dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID);
if (pci_bar_size != MSIX_BAR_SIZE) {
dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE);
rc = -ENODEV;
goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DRAM_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, DRAM_BAR_ID);
/*
* Only in pldm driver config iATU
*/
if (hdev->pldm)
hdev->asic_prop.iatu_done_by_fw = false;
else
hdev->asic_prop.iatu_done_by_fw = true;
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
/* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi2_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
rc = hdev->asic_funcs->hw_fini(hdev, true, false);
if (rc) {
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
}
return 0;
pci_fini:
hl_pci_fini(hdev);
free_queue_props:
kfree(hdev->asic_prop.hw_queues_props);
return rc;
}
static int gaudi2_early_fini(struct hl_device *hdev)
{
kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
}
static bool gaudi2_is_arc_nic_owned(u64 arc_id)
{
switch (arc_id) {
case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
return true;
default:
return false;
}
}
static bool gaudi2_is_arc_tpc_owned(u64 arc_id)
{
switch (arc_id) {
case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
return true;
default:
return false;
}
}
static void gaudi2_init_arcs(struct hl_device *hdev)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 arc_id;
u32 i;
for (i = CPU_ID_SCHED_ARC0 ; i <= CPU_ID_SCHED_ARC3 ; i++) {
if (gaudi2_is_arc_enabled(hdev, i))
continue;
gaudi2_set_arc_id_cap(hdev, i);
}
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i += 4) {
if (!gaudi2_is_queue_enabled(hdev, i))
continue;
arc_id = gaudi2_queue_id_to_arc_id[i];
if (gaudi2_is_arc_enabled(hdev, arc_id))
continue;
if (gaudi2_is_arc_nic_owned(arc_id) &&
!(hdev->nic_ports_mask & BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0)))
continue;
if (gaudi2_is_arc_tpc_owned(arc_id) && !(gaudi2->tpc_hw_cap_initialized &
BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0)))
continue;
gaudi2_set_arc_id_cap(hdev, arc_id);
}
/* Fetch ARC scratchpad address */
hdev->asic_prop.engine_core_interrupt_reg_addr =
CFG_BASE + le32_to_cpu(dyn_regs->eng_arc_irq_ctrl);
}
static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id)
{
u32 reg_base, reg_val;
int rc;
switch (cpu_id) {
case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC3:
/* Each ARC scheduler has 2 consecutive DCCM blocks */
rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
ARC_DCCM_BLOCK_SIZE * 2, true);
if (rc)
return rc;
break;
case CPU_ID_SCHED_ARC4:
case CPU_ID_SCHED_ARC5:
case CPU_ID_MME_QMAN_ARC0:
case CPU_ID_MME_QMAN_ARC1:
reg_base = gaudi2_arc_blocks_bases[cpu_id];
/* Scrub lower DCCM block */
rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
ARC_DCCM_BLOCK_SIZE, true);
if (rc)
return rc;
/* Switch to upper DCCM block */
reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 1);
WREG32(reg_base + ARC_DCCM_UPPER_EN_OFFSET, reg_val);
/* Scrub upper DCCM block */
rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
ARC_DCCM_BLOCK_SIZE, true);
if (rc)
return rc;
/* Switch to lower DCCM block */
reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 0);
WREG32(reg_base + ARC_DCCM_UPPER_EN_OFFSET, reg_val);
break;
default:
rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id],
ARC_DCCM_BLOCK_SIZE, true);
if (rc)
return rc;
}
return 0;
}
static int gaudi2_scrub_arcs_dccm(struct hl_device *hdev)
{
u16 arc_id;
int rc;
for (arc_id = CPU_ID_SCHED_ARC0 ; arc_id < CPU_ID_MAX ; arc_id++) {
if (!gaudi2_is_arc_enabled(hdev, arc_id))
continue;
rc = gaudi2_scrub_arc_dccm(hdev, arc_id);
if (rc)
return rc;
}
return 0;
}
static int gaudi2_late_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
hdev->asic_prop.supports_advanced_cpucp_rc = true;
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
gaudi2->virt_msix_db_dma_addr);
if (rc) {
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc;
}
rc = gaudi2_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
goto disable_pci_access;
}
gaudi2_init_arcs(hdev);
rc = gaudi2_scrub_arcs_dccm(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to scrub arcs DCCM\n");
goto disable_pci_access;
}
gaudi2_init_security(hdev);
return 0;
disable_pci_access:
hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
return rc;
}
static void gaudi2_late_fini(struct hl_device *hdev)
{
hl_hwmon_release_resources(hdev);
}
static void gaudi2_user_mapped_dec_init(struct gaudi2_device *gaudi2, u32 start_idx)
{
struct user_mapped_block *blocks = gaudi2->mapped_blocks;
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE0_DEC0_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE0_DEC1_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE1_DEC0_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE1_DEC1_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE2_DEC0_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE2_DEC1_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE3_DEC0_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE3_DEC1_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmPCIE_DEC0_CMD_BASE, HL_BLOCK_SIZE);
HL_USR_MAPPED_BLK_INIT(&blocks[start_idx], mmPCIE_DEC1_CMD_BASE, HL_BLOCK_SIZE);
}
static void gaudi2_user_mapped_blocks_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct user_mapped_block *blocks = gaudi2->mapped_blocks;
u32 block_size, umr_start_idx, num_umr_blocks;
int i;
for (i = 0 ; i < NUM_ARC_CPUS ; i++) {
if (i >= CPU_ID_SCHED_ARC0 && i <= CPU_ID_SCHED_ARC3)
block_size = ARC_DCCM_BLOCK_SIZE * 2;
else
block_size = ARC_DCCM_BLOCK_SIZE;
blocks[i].address = gaudi2_arc_dccm_bases[i];
blocks[i].size = block_size;
}
blocks[NUM_ARC_CPUS].address = mmARC_FARM_ARC0_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 1].address = mmARC_FARM_ARC1_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 1].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 2].address = mmARC_FARM_ARC2_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 2].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 3].address = mmARC_FARM_ARC3_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 3].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 4].address = mmDCORE0_MME_QM_ARC_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 4].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 5].address = mmDCORE1_MME_QM_ARC_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 5].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 6].address = mmDCORE2_MME_QM_ARC_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 6].size = HL_BLOCK_SIZE;
blocks[NUM_ARC_CPUS + 7].address = mmDCORE3_MME_QM_ARC_ACP_ENG_BASE;
blocks[NUM_ARC_CPUS + 7].size = HL_BLOCK_SIZE;
umr_start_idx = NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS;
num_umr_blocks = NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS;
for (i = 0 ; i < num_umr_blocks ; i++) {
u8 nic_id, umr_block_id;
nic_id = i / NUM_OF_USER_NIC_UMR_BLOCKS;
umr_block_id = i % NUM_OF_USER_NIC_UMR_BLOCKS;
blocks[umr_start_idx + i].address =
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE +
(nic_id / NIC_NUMBER_OF_QM_PER_MACRO) * NIC_OFFSET +
(nic_id % NIC_NUMBER_OF_QM_PER_MACRO) * NIC_QM_OFFSET +
umr_block_id * NIC_UMR_OFFSET;
blocks[umr_start_idx + i].size = HL_BLOCK_SIZE;
}
/* Expose decoder HW configuration block to user */
gaudi2_user_mapped_dec_init(gaudi2, USR_MAPPED_BLK_DEC_START_IDX);
for (i = 1; i < NUM_OF_DCORES; ++i) {
blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1)].size = SM_OBJS_BLOCK_SIZE;
blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1) + 1].size = HL_BLOCK_SIZE;
blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1)].address =
mmDCORE0_SYNC_MNGR_OBJS_BASE + i * DCORE_OFFSET;
blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1) + 1].address =
mmDCORE0_SYNC_MNGR_GLBL_BASE + i * DCORE_OFFSET;
}
}
static int gaudi2_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
{
dma_addr_t dma_addr_arr[GAUDI2_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
void *virt_addr_arr[GAUDI2_ALLOC_CPU_MEM_RETRY_CNT] = {};
int i, j, rc = 0;
/* The device ARC works with 32-bits addresses, and because there is a single HW register
* that holds the extension bits (49..28), these bits must be identical in all the allocated
* range.
*/
for (i = 0 ; i < GAUDI2_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
virt_addr_arr[i] = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
&dma_addr_arr[i], GFP_KERNEL | __GFP_ZERO);
if (!virt_addr_arr[i]) {
rc = -ENOMEM;
goto free_dma_mem_arr;
}
end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
if (GAUDI2_ARC_PCI_MSB_ADDR(dma_addr_arr[i]) == GAUDI2_ARC_PCI_MSB_ADDR(end_addr))
break;
}
if (i == GAUDI2_ALLOC_CPU_MEM_RETRY_CNT) {
dev_err(hdev->dev,
"MSB of ARC accessible DMA memory are not identical in all range\n");
rc = -EFAULT;
goto free_dma_mem_arr;
}
hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
hdev->cpu_accessible_dma_address = dma_addr_arr[i];
free_dma_mem_arr:
for (j = 0 ; j < i ; j++)
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, virt_addr_arr[j],
dma_addr_arr[j]);
return rc;
}
static void gaudi2_set_pci_memory_regions(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_mem_region *region;
/* CFG */
region = &hdev->pci_mem_region[PCI_REGION_CFG];
region->region_base = CFG_BASE;
region->region_size = CFG_SIZE;
region->offset_in_bar = CFG_BASE - STM_FLASH_BASE_ADDR;
region->bar_size = CFG_BAR_SIZE;
region->bar_id = SRAM_CFG_BAR_ID;
region->used = 1;
/* SRAM */
region = &hdev->pci_mem_region[PCI_REGION_SRAM];
region->region_base = SRAM_BASE_ADDR;
region->region_size = SRAM_SIZE;
region->offset_in_bar = CFG_REGION_SIZE + BAR0_RSRVD_SIZE;
region->bar_size = CFG_BAR_SIZE;
region->bar_id = SRAM_CFG_BAR_ID;
region->used = 1;
/* DRAM */
region = &hdev->pci_mem_region[PCI_REGION_DRAM];
region->region_base = DRAM_PHYS_BASE;
region->region_size = hdev->asic_prop.dram_size;
region->offset_in_bar = 0;
region->bar_size = prop->dram_pci_bar_size;
region->bar_id = DRAM_BAR_ID;
region->used = 1;
}
static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i, j, k;
/* Initialize TPC interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->tpc_interrupt, hdev, 0, HL_USR_INTERRUPT_TPC);
/* Initialize unexpected error interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->unexpected_error_interrupt, hdev, 0,
HL_USR_INTERRUPT_UNEXPECTED);
/* Initialize common user CQ interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev,
HL_COMMON_USER_CQ_INTERRUPT_ID, HL_USR_INTERRUPT_CQ);
/* Initialize common decoder interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_decoder_interrupt, hdev,
HL_COMMON_DEC_INTERRUPT_ID, HL_USR_INTERRUPT_DECODER);
/* User interrupts structure holds both decoder and user interrupts from various engines.
* We first initialize the decoder interrupts and then we add the user interrupts.
* The only limitation is that the last decoder interrupt id must be smaller
* then GAUDI2_IRQ_NUM_USER_FIRST. This is checked at compilation time.
*/
/* Initialize decoder interrupts, expose only normal interrupts,
* error interrupts to be handled by driver
*/
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM;
i += 2, j++)
HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i,
HL_USR_INTERRUPT_DECODER);
for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++)
HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, HL_USR_INTERRUPT_CQ);
}
static inline int gaudi2_get_non_zero_random_int(void)
{
int rand = get_random_u32();
return rand ? rand : 1;
}
static void gaudi2_special_blocks_free(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_skip_blocks_cfg *skip_special_blocks_cfg =
&prop->skip_special_blocks_cfg;
kfree(prop->special_blocks);
kfree(skip_special_blocks_cfg->block_types);
kfree(skip_special_blocks_cfg->block_ranges);
}
static void gaudi2_special_blocks_iterator_free(struct hl_device *hdev)
{
gaudi2_special_blocks_free(hdev);
}
static bool gaudi2_special_block_skip(struct hl_device *hdev,
struct hl_special_blocks_cfg *special_blocks_cfg,
u32 blk_idx, u32 major, u32 minor, u32 sub_minor)
{
return false;
}
static int gaudi2_special_blocks_config(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i, rc;
/* Configure Special blocks */
prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE;
prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
prop->special_blocks = kmalloc_array(prop->num_of_special_blocks,
sizeof(*prop->special_blocks), GFP_KERNEL);
if (!prop->special_blocks)
return -ENOMEM;
for (i = 0 ; i < prop->num_of_special_blocks ; i++)
memcpy(&prop->special_blocks[i], &gaudi2_special_blocks[i],
sizeof(*prop->special_blocks));
/* Configure when to skip Special blocks */
memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg));
prop->skip_special_blocks_cfg.skip_block_hook = gaudi2_special_block_skip;
if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) {
prop->skip_special_blocks_cfg.block_types =
kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_types),
sizeof(gaudi2_iterator_skip_block_types[0]), GFP_KERNEL);
if (!prop->skip_special_blocks_cfg.block_types) {
rc = -ENOMEM;
goto free_special_blocks;
}
memcpy(prop->skip_special_blocks_cfg.block_types, gaudi2_iterator_skip_block_types,
sizeof(gaudi2_iterator_skip_block_types));
prop->skip_special_blocks_cfg.block_types_len =
ARRAY_SIZE(gaudi2_iterator_skip_block_types);
}
if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) {
prop->skip_special_blocks_cfg.block_ranges =
kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_ranges),
sizeof(gaudi2_iterator_skip_block_ranges[0]), GFP_KERNEL);
if (!prop->skip_special_blocks_cfg.block_ranges) {
rc = -ENOMEM;
goto free_skip_special_blocks_types;
}
for (i = 0 ; i < ARRAY_SIZE(gaudi2_iterator_skip_block_ranges) ; i++)
memcpy(&prop->skip_special_blocks_cfg.block_ranges[i],
&gaudi2_iterator_skip_block_ranges[i],
sizeof(struct range));
prop->skip_special_blocks_cfg.block_ranges_len =
ARRAY_SIZE(gaudi2_iterator_skip_block_ranges);
}
return 0;
free_skip_special_blocks_types:
kfree(prop->skip_special_blocks_cfg.block_types);
free_special_blocks:
kfree(prop->special_blocks);
return rc;
}
static int gaudi2_special_blocks_iterator_config(struct hl_device *hdev)
{
return gaudi2_special_blocks_config(hdev);
}
static void gaudi2_test_queues_msgs_free(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_queues_test_info *msg_info = gaudi2->queues_test_info;
int i;
for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) {
/* bail-out if this is an allocation failure point */
if (!msg_info[i].kern_addr)
break;
hl_asic_dma_pool_free(hdev, msg_info[i].kern_addr, msg_info[i].dma_addr);
msg_info[i].kern_addr = NULL;
}
}
static int gaudi2_test_queues_msgs_alloc(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_queues_test_info *msg_info = gaudi2->queues_test_info;
int i, rc;
/* allocate a message-short buf for each Q we intend to test */
for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) {
msg_info[i].kern_addr =
(void *)hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_short),
GFP_KERNEL, &msg_info[i].dma_addr);
if (!msg_info[i].kern_addr) {
dev_err(hdev->dev,
"Failed to allocate dma memory for H/W queue %d testing\n", i);
rc = -ENOMEM;
goto err_exit;
}
}
return 0;
err_exit:
gaudi2_test_queues_msgs_free(hdev);
return rc;
}
static int gaudi2_sw_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2;
int i, rc;
/* Allocate device structure */
gaudi2 = kzalloc(sizeof(*gaudi2), GFP_KERNEL);
if (!gaudi2)
return -ENOMEM;
for (i = 0 ; i < ARRAY_SIZE(gaudi2_irq_map_table) ; i++) {
if (gaudi2_irq_map_table[i].msg || !gaudi2_irq_map_table[i].valid)
continue;
if (gaudi2->num_of_valid_hw_events == GAUDI2_EVENT_SIZE) {
dev_err(hdev->dev, "H/W events array exceeds the limit of %u events\n",
GAUDI2_EVENT_SIZE);
rc = -EINVAL;
goto free_gaudi2_device;
}
gaudi2->hw_events[gaudi2->num_of_valid_hw_events++] = gaudi2_irq_map_table[i].fc_id;
}
for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++)
gaudi2->lfsr_rand_seeds[i] = gaudi2_get_non_zero_random_int();
gaudi2->cpucp_info_get = gaudi2_cpucp_info_get;
hdev->asic_specific = gaudi2;
/* Create DMA pool for small allocations.
* Use DEVICE_CACHE_LINE_SIZE for alignment since the NIC memory-mapped
* PI/CI registers allocated from this pool have this restriction
*/
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), &hdev->pdev->dev,
GAUDI2_DMA_POOL_BLK_SIZE, DEVICE_CACHE_LINE_SIZE, 0);
if (!hdev->dma_pool) {
dev_err(hdev->dev, "failed to create DMA pool\n");
rc = -ENOMEM;
goto free_gaudi2_device;
}
rc = gaudi2_alloc_cpu_accessible_dma_mem(hdev);
if (rc)
goto free_dma_pool;
hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
if (!hdev->cpu_accessible_dma_pool) {
dev_err(hdev->dev, "Failed to create CPU accessible DMA pool\n");
rc = -ENOMEM;
goto free_cpu_dma_mem;
}
rc = gen_pool_add(hdev->cpu_accessible_dma_pool, (uintptr_t) hdev->cpu_accessible_dma_mem,
HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
if (rc) {
dev_err(hdev->dev, "Failed to add memory to CPU accessible DMA pool\n");
rc = -EFAULT;
goto free_cpu_accessible_dma_pool;
}
gaudi2->virt_msix_db_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, prop->pmmu.page_size,
&gaudi2->virt_msix_db_dma_addr);
if (!gaudi2->virt_msix_db_cpu_addr) {
dev_err(hdev->dev, "Failed to allocate DMA memory for virtual MSI-X doorbell\n");
rc = -ENOMEM;
goto free_cpu_accessible_dma_pool;
}
spin_lock_init(&gaudi2->hw_queues_lock);
gaudi2->scratchpad_kernel_address = hl_asic_dma_alloc_coherent(hdev, PAGE_SIZE,
&gaudi2->scratchpad_bus_address,
GFP_KERNEL | __GFP_ZERO);
if (!gaudi2->scratchpad_kernel_address) {
rc = -ENOMEM;
goto free_virt_msix_db_mem;
}
gaudi2_user_mapped_blocks_init(hdev);
/* Initialize user interrupts */
gaudi2_user_interrupt_setup(hdev);
hdev->supports_coresight = true;
hdev->supports_sync_stream = true;
hdev->supports_cb_mapping = true;
hdev->supports_wait_for_multi_cs = false;
prop->supports_compute_reset = true;
/* Event queue sanity check added in FW version 1.11 */
if (hl_is_fw_sw_ver_below(hdev, 1, 11))
hdev->event_queue.check_eqe_index = false;
else
hdev->event_queue.check_eqe_index = true;
hdev->asic_funcs->set_pci_memory_regions(hdev);
rc = gaudi2_special_blocks_iterator_config(hdev);
if (rc)
goto free_scratchpad_mem;
rc = gaudi2_test_queues_msgs_alloc(hdev);
if (rc)
goto special_blocks_free;
return 0;
special_blocks_free:
gaudi2_special_blocks_iterator_free(hdev);
free_scratchpad_mem:
hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
gaudi2->scratchpad_bus_address);
free_virt_msix_db_mem:
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
free_cpu_accessible_dma_pool:
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
free_cpu_dma_mem:
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
free_dma_pool:
dma_pool_destroy(hdev->dma_pool);
free_gaudi2_device:
kfree(gaudi2);
return rc;
}
static int gaudi2_sw_fini(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
gaudi2_test_queues_msgs_free(hdev);
gaudi2_special_blocks_iterator_free(hdev);
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
hdev->cpu_accessible_dma_address);
hl_asic_dma_free_coherent(hdev, PAGE_SIZE, gaudi2->scratchpad_kernel_address,
gaudi2->scratchpad_bus_address);
dma_pool_destroy(hdev->dma_pool);
kfree(gaudi2);
return 0;
}
static void gaudi2_stop_qman_common(struct hl_device *hdev, u32 reg_base)
{
WREG32(reg_base + QM_GLBL_CFG1_OFFSET, QM_GLBL_CFG1_PQF_STOP |
QM_GLBL_CFG1_CQF_STOP |
QM_GLBL_CFG1_CP_STOP);
/* stop also the ARC */
WREG32(reg_base + QM_GLBL_CFG2_OFFSET, QM_GLBL_CFG2_ARC_CQF_STOP);
}
static void gaudi2_flush_qman_common(struct hl_device *hdev, u32 reg_base)
{
WREG32(reg_base + QM_GLBL_CFG1_OFFSET, QM_GLBL_CFG1_PQF_FLUSH |
QM_GLBL_CFG1_CQF_FLUSH |
QM_GLBL_CFG1_CP_FLUSH);
}
static void gaudi2_flush_qman_arc_common(struct hl_device *hdev, u32 reg_base)
{
WREG32(reg_base + QM_GLBL_CFG2_OFFSET, QM_GLBL_CFG2_ARC_CQF_FLUSH);
}
/**
* gaudi2_clear_qm_fence_counters_common - clear QM's fence counters
*
* @hdev: pointer to the habanalabs device structure
* @queue_id: queue to clear fence counters to
* @skip_fence: if true set maximum fence value to all fence counters to avoid
* getting stuck on any fence value. otherwise set all fence
* counters to 0 (standard clear of fence counters)
*/
static void gaudi2_clear_qm_fence_counters_common(struct hl_device *hdev, u32 queue_id,
bool skip_fence)
{
u32 size, reg_base;
u32 addr, val;
reg_base = gaudi2_qm_blocks_bases[queue_id];
addr = reg_base + QM_CP_FENCE0_CNT_0_OFFSET;
size = mmPDMA0_QM_CP_BARRIER_CFG - mmPDMA0_QM_CP_FENCE0_CNT_0;
/*
* in case we want to make sure that QM that is stuck on a fence will
* be released we should set the fence counter to a higher value that
* the value the QM waiting for. to comply with any fence counter of
* any value we set maximum fence value to all counters
*/
val = skip_fence ? U32_MAX : 0;
gaudi2_memset_device_lbw(hdev, addr, size, val);
}
static void gaudi2_qman_manual_flush_common(struct hl_device *hdev, u32 queue_id)
{
u32 reg_base = gaudi2_qm_blocks_bases[queue_id];
gaudi2_clear_qm_fence_counters_common(hdev, queue_id, true);
gaudi2_flush_qman_common(hdev, reg_base);
gaudi2_flush_qman_arc_common(hdev, reg_base);
}
static void gaudi2_stop_dma_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int dcore, inst;
if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK))
goto stop_edma_qmans;
/* Stop CPs of PDMA QMANs */
gaudi2_stop_qman_common(hdev, mmPDMA0_QM_BASE);
gaudi2_stop_qman_common(hdev, mmPDMA1_QM_BASE);
stop_edma_qmans:
if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
return;
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
u32 qm_base;
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq)))
continue;
qm_base = mmDCORE0_EDMA0_QM_BASE + dcore * DCORE_OFFSET +
inst * DCORE_EDMA_OFFSET;
/* Stop CPs of EDMA QMANs */
gaudi2_stop_qman_common(hdev, qm_base);
}
}
}
static void gaudi2_stop_mme_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 offset, i;
offset = mmDCORE1_MME_QM_BASE - mmDCORE0_MME_QM_BASE;
for (i = 0 ; i < NUM_OF_DCORES ; i++) {
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i)))
continue;
gaudi2_stop_qman_common(hdev, mmDCORE0_MME_QM_BASE + (i * offset));
}
}
static void gaudi2_stop_tpc_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
int i;
if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
return;
for (i = 0 ; i < TPC_ID_SIZE ; i++) {
if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i)))
continue;
reg_base = gaudi2_qm_blocks_bases[gaudi2_tpc_id_to_queue_id[i]];
gaudi2_stop_qman_common(hdev, reg_base);
}
}
static void gaudi2_stop_rot_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
int i;
if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK))
return;
for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) {
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i)))
continue;
reg_base = gaudi2_qm_blocks_bases[gaudi2_rot_id_to_queue_id[i]];
gaudi2_stop_qman_common(hdev, reg_base);
}
}
static void gaudi2_stop_nic_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base, queue_id;
int i;
if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK))
return;
queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
if (!(hdev->nic_ports_mask & BIT(i)))
continue;
reg_base = gaudi2_qm_blocks_bases[queue_id];
gaudi2_stop_qman_common(hdev, reg_base);
}
}
static void gaudi2_stall_dma_common(struct hl_device *hdev, u32 reg_base)
{
u32 reg_val;
reg_val = FIELD_PREP(PDMA0_CORE_CFG_1_HALT_MASK, 0x1);
WREG32(reg_base + DMA_CORE_CFG_1_OFFSET, reg_val);
}
static void gaudi2_dma_stall(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int dcore, inst;
if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK))
goto stall_edma;
gaudi2_stall_dma_common(hdev, mmPDMA0_CORE_BASE);
gaudi2_stall_dma_common(hdev, mmPDMA1_CORE_BASE);
stall_edma:
if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
return;
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
u32 core_base;
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq)))
continue;
core_base = mmDCORE0_EDMA0_CORE_BASE + dcore * DCORE_OFFSET +
inst * DCORE_EDMA_OFFSET;
/* Stall CPs of EDMA QMANs */
gaudi2_stall_dma_common(hdev, core_base);
}
}
}
static void gaudi2_mme_stall(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 offset, i;
offset = mmDCORE1_MME_CTRL_LO_QM_STALL - mmDCORE0_MME_CTRL_LO_QM_STALL;
for (i = 0 ; i < NUM_OF_DCORES ; i++)
if (gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i))
WREG32(mmDCORE0_MME_CTRL_LO_QM_STALL + (i * offset), 1);
}
static void gaudi2_tpc_stall(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
int i;
if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
return;
for (i = 0 ; i < TPC_ID_SIZE ; i++) {
if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i)))
continue;
reg_base = gaudi2_tpc_cfg_blocks_bases[i];
WREG32(reg_base + TPC_CFG_STALL_OFFSET, 1);
}
}
static void gaudi2_rotator_stall(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_val;
int i;
if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK))
return;
reg_val = FIELD_PREP(ROT_MSS_HALT_WBC_MASK, 0x1) |
FIELD_PREP(ROT_MSS_HALT_RSB_MASK, 0x1) |
FIELD_PREP(ROT_MSS_HALT_MRSB_MASK, 0x1);
for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) {
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i)))
continue;
WREG32(mmROT0_MSS_HALT + i * ROT_OFFSET, reg_val);
}
}
static void gaudi2_disable_qman_common(struct hl_device *hdev, u32 reg_base)
{
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, 0);
}
static void gaudi2_disable_dma_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int dcore, inst;
if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK))
goto stop_edma_qmans;
gaudi2_disable_qman_common(hdev, mmPDMA0_QM_BASE);
gaudi2_disable_qman_common(hdev, mmPDMA1_QM_BASE);
stop_edma_qmans:
if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
return;
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
u32 qm_base;
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq)))
continue;
qm_base = mmDCORE0_EDMA0_QM_BASE + dcore * DCORE_OFFSET +
inst * DCORE_EDMA_OFFSET;
/* Disable CPs of EDMA QMANs */
gaudi2_disable_qman_common(hdev, qm_base);
}
}
}
static void gaudi2_disable_mme_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 offset, i;
offset = mmDCORE1_MME_QM_BASE - mmDCORE0_MME_QM_BASE;
for (i = 0 ; i < NUM_OF_DCORES ; i++)
if (gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i))
gaudi2_disable_qman_common(hdev, mmDCORE0_MME_QM_BASE + (i * offset));
}
static void gaudi2_disable_tpc_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
int i;
if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
return;
for (i = 0 ; i < TPC_ID_SIZE ; i++) {
if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i)))
continue;
reg_base = gaudi2_qm_blocks_bases[gaudi2_tpc_id_to_queue_id[i]];
gaudi2_disable_qman_common(hdev, reg_base);
}
}
static void gaudi2_disable_rot_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
int i;
if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK))
return;
for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) {
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i)))
continue;
reg_base = gaudi2_qm_blocks_bases[gaudi2_rot_id_to_queue_id[i]];
gaudi2_disable_qman_common(hdev, reg_base);
}
}
static void gaudi2_disable_nic_qmans(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base, queue_id;
int i;
if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK))
return;
queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
if (!(hdev->nic_ports_mask & BIT(i)))
continue;
reg_base = gaudi2_qm_blocks_bases[queue_id];
gaudi2_disable_qman_common(hdev, reg_base);
}
}
static void gaudi2_enable_timestamp(struct hl_device *hdev)
{
/* Disable the timestamp counter */
WREG32(mmPSOC_TIMESTAMP_BASE, 0);
/* Zero the lower/upper parts of the 64-bit counter */
WREG32(mmPSOC_TIMESTAMP_BASE + 0xC, 0);
WREG32(mmPSOC_TIMESTAMP_BASE + 0x8, 0);
/* Enable the counter */
WREG32(mmPSOC_TIMESTAMP_BASE, 1);
}
static void gaudi2_disable_timestamp(struct hl_device *hdev)
{
/* Disable the timestamp counter */
WREG32(mmPSOC_TIMESTAMP_BASE, 0);
}
static const char *gaudi2_irq_name(u16 irq_number)
{
switch (irq_number) {
case GAUDI2_IRQ_NUM_EVENT_QUEUE:
return "gaudi2 cpu eq";
case GAUDI2_IRQ_NUM_COMPLETION:
return "gaudi2 completion";
case GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ... GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM:
return gaudi2_vdec_irq_name[irq_number - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM];
case GAUDI2_IRQ_NUM_TPC_ASSERT:
return "gaudi2 tpc assert";
case GAUDI2_IRQ_NUM_UNEXPECTED_ERROR:
return "gaudi2 unexpected error";
case GAUDI2_IRQ_NUM_USER_FIRST ... GAUDI2_IRQ_NUM_USER_LAST:
return "gaudi2 user completion";
default:
return "invalid";
}
}
static void gaudi2_dec_disable_msix(struct hl_device *hdev, u32 max_irq_num)
{
int i, irq, relative_idx;
struct hl_dec *dec;
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ; i < max_irq_num ; i++) {
irq = pci_irq_vector(hdev->pdev, i);
relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM;
dec = hdev->dec + relative_idx / 2;
/* We pass different structures depending on the irq handler. For the abnormal
* interrupt we pass hl_dec and for the regular interrupt we pass the relevant
* user_interrupt entry
*/
free_irq(irq, ((relative_idx % 2) ?
(void *) dec :
(void *) &hdev->user_interrupt[dec->core_id]));
}
}
static int gaudi2_dec_enable_msix(struct hl_device *hdev)
{
int rc, i, irq_init_cnt, irq, relative_idx;
struct hl_dec *dec;
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0;
i <= GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM;
i++, irq_init_cnt++) {
irq = pci_irq_vector(hdev->pdev, i);
relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM;
/* We pass different structures depending on the irq handler. For the abnormal
* interrupt we pass hl_dec and for the regular interrupt we pass the relevant
* user_interrupt entry
*
* TODO: change the dec abnrm to threaded irq
*/
dec = hdev->dec + relative_idx / 2;
if (relative_idx % 2) {
rc = request_irq(irq, hl_irq_handler_dec_abnrm, 0,
gaudi2_irq_name(i), (void *) dec);
} else {
rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
gaudi2_irq_name(i),
(void *) &hdev->user_interrupt[dec->core_id]);
}
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_dec_irqs;
}
}
return 0;
free_dec_irqs:
gaudi2_dec_disable_msix(hdev, (GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + irq_init_cnt));
return rc;
}
static int gaudi2_enable_msix(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc, irq, i, j, user_irq_init_cnt;
struct hl_cq *cq;
if (gaudi2->hw_cap_initialized & HW_CAP_MSIX)
return 0;
rc = pci_alloc_irq_vectors(hdev->pdev, GAUDI2_MSIX_ENTRIES, GAUDI2_MSIX_ENTRIES,
PCI_IRQ_MSIX);
if (rc < 0) {
dev_err(hdev->dev, "MSI-X: Failed to enable support -- %d/%d\n",
GAUDI2_MSIX_ENTRIES, rc);
return rc;
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_CS_COMPLETION];
rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_COMPLETION), cq);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_irq_vectors;
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_EVENT_QUEUE),
&hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_completion_irq;
}
rc = gaudi2_dec_enable_msix(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to enable decoder IRQ");
goto free_event_irq;
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
gaudi2_irq_name(GAUDI2_IRQ_NUM_TPC_ASSERT), &hdev->tpc_interrupt);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_dec_irq;
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
rc = request_irq(irq, hl_irq_handler_user_interrupt, 0,
gaudi2_irq_name(GAUDI2_IRQ_NUM_UNEXPECTED_ERROR),
&hdev->unexpected_error_interrupt);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_tpc_irq;
}
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
user_irq_init_cnt < prop->user_interrupt_count;
i++, j++, user_irq_init_cnt++) {
irq = pci_irq_vector(hdev->pdev, i);
rc = request_threaded_irq(irq, hl_irq_handler_user_interrupt,
hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT,
gaudi2_irq_name(i), &hdev->user_interrupt[j]);
if (rc) {
dev_err(hdev->dev, "Failed to request IRQ %d", irq);
goto free_user_irq;
}
}
gaudi2->hw_cap_initialized |= HW_CAP_MSIX;
return 0;
free_user_irq:
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count;
i < GAUDI2_IRQ_NUM_USER_FIRST + user_irq_init_cnt ; i++, j++) {
irq = pci_irq_vector(hdev->pdev, i);
free_irq(irq, &hdev->user_interrupt[j]);
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
free_irq(irq, &hdev->unexpected_error_interrupt);
free_tpc_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
free_irq(irq, &hdev->tpc_interrupt);
free_dec_irq:
gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_DEC_LAST + 1);
free_event_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
free_irq(irq, cq);
free_completion_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
free_irq(irq, cq);
free_irq_vectors:
pci_free_irq_vectors(hdev->pdev);
return rc;
}
static void gaudi2_sync_irqs(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int i, j;
int irq;
if (!(gaudi2->hw_cap_initialized & HW_CAP_MSIX))
return;
/* Wait for all pending IRQs to be finished */
synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION));
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM ; i++) {
irq = pci_irq_vector(hdev->pdev, i);
synchronize_irq(irq);
}
synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT));
synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR));
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count;
i++, j++) {
irq = pci_irq_vector(hdev->pdev, i);
synchronize_irq(irq);
}
synchronize_irq(pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE));
}
static void gaudi2_disable_msix(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct hl_cq *cq;
int irq, i, j, k;
if (!(gaudi2->hw_cap_initialized & HW_CAP_MSIX))
return;
gaudi2_sync_irqs(hdev);
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
free_irq(irq, &hdev->event_queue);
gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_TPC_ASSERT);
free_irq(irq, &hdev->tpc_interrupt);
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_UNEXPECTED_ERROR);
free_irq(irq, &hdev->unexpected_error_interrupt);
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0;
k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) {
irq = pci_irq_vector(hdev->pdev, i);
free_irq(irq, &hdev->user_interrupt[j]);
}
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_CS_COMPLETION];
free_irq(irq, cq);
pci_free_irq_vectors(hdev->pdev);
gaudi2->hw_cap_initialized &= ~HW_CAP_MSIX;
}
static void gaudi2_stop_dcore_dec(struct hl_device *hdev, int dcore_id)
{
u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1);
u32 graceful_pend_mask = DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK;
u32 timeout_usec, dec_id, dec_bit, offset, graceful;
int rc;
if (hdev->pldm)
timeout_usec = GAUDI2_PLDM_VDEC_TIMEOUT_USEC;
else
timeout_usec = GAUDI2_VDEC_TIMEOUT_USEC;
for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) {
dec_bit = dcore_id * NUM_OF_DEC_PER_DCORE + dec_id;
if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
continue;
offset = dcore_id * DCORE_OFFSET + dec_id * DCORE_VDEC_OFFSET;
WREG32(mmDCORE0_DEC0_CMD_SWREG16 + offset, 0);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL + offset, reg_val);
/* Wait till all traffic from decoder stops
* before apply core reset.
*/
rc = hl_poll_timeout(
hdev,
mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL + offset,
graceful,
(graceful & graceful_pend_mask),
100,
timeout_usec);
if (rc)
dev_err(hdev->dev,
"Failed to stop traffic from DCORE%d Decoder %d\n",
dcore_id, dec_id);
}
}
static void gaudi2_stop_pcie_dec(struct hl_device *hdev)
{
u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1);
u32 graceful_pend_mask = PCIE_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK;
u32 timeout_usec, dec_id, dec_bit, offset, graceful;
int rc;
if (hdev->pldm)
timeout_usec = GAUDI2_PLDM_VDEC_TIMEOUT_USEC;
else
timeout_usec = GAUDI2_VDEC_TIMEOUT_USEC;
for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) {
dec_bit = PCIE_DEC_SHIFT + dec_id;
if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
continue;
offset = dec_id * PCIE_VDEC_OFFSET;
WREG32(mmPCIE_DEC0_CMD_SWREG16 + offset, 0);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL + offset, reg_val);
/* Wait till all traffic from decoder stops
* before apply core reset.
*/
rc = hl_poll_timeout(
hdev,
mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL + offset,
graceful,
(graceful & graceful_pend_mask),
100,
timeout_usec);
if (rc)
dev_err(hdev->dev,
"Failed to stop traffic from PCIe Decoder %d\n",
dec_id);
}
}
static void gaudi2_stop_dec(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int dcore_id;
if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == 0)
return;
for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++)
gaudi2_stop_dcore_dec(hdev, dcore_id);
gaudi2_stop_pcie_dec(hdev);
}
static void gaudi2_set_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode)
{
u32 reg_base, reg_val;
reg_base = gaudi2_arc_blocks_bases[cpu_id];
if (run_mode == HL_ENGINE_CORE_RUN)
reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 1);
else
reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1);
WREG32(reg_base + ARC_HALT_REQ_OFFSET, reg_val);
}
static void gaudi2_halt_arcs(struct hl_device *hdev)
{
u16 arc_id;
for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++) {
if (gaudi2_is_arc_enabled(hdev, arc_id))
gaudi2_set_arc_running_mode(hdev, arc_id, HL_ENGINE_CORE_HALT);
}
}
static int gaudi2_verify_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode)
{
int rc;
u32 reg_base, val, ack_mask, timeout_usec = 100000;
if (hdev->pldm)
timeout_usec *= 100;
reg_base = gaudi2_arc_blocks_bases[cpu_id];
if (run_mode == HL_ENGINE_CORE_RUN)
ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_MASK;
else
ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_MASK;
rc = hl_poll_timeout(hdev, reg_base + ARC_HALT_ACK_OFFSET,
val, ((val & ack_mask) == ack_mask),
1000, timeout_usec);
if (!rc) {
/* Clear */
val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 0);
WREG32(reg_base + ARC_HALT_REQ_OFFSET, val);
}
return rc;
}
static void gaudi2_reset_arcs(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u16 arc_id;
if (!gaudi2)
return;
for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++)
if (gaudi2_is_arc_enabled(hdev, arc_id))
gaudi2_clr_arc_id_cap(hdev, arc_id);
}
static void gaudi2_nic_qmans_manual_flush(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 queue_id;
int i;
if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK))
return;
queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
if (!(hdev->nic_ports_mask & BIT(i)))
continue;
gaudi2_qman_manual_flush_common(hdev, queue_id);
}
}
static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
u32 num_cores, u32 core_command)
{
int i, rc;
for (i = 0 ; i < num_cores ; i++) {
if (gaudi2_is_arc_enabled(hdev, core_ids[i]))
gaudi2_set_arc_running_mode(hdev, core_ids[i], core_command);
}
for (i = 0 ; i < num_cores ; i++) {
if (gaudi2_is_arc_enabled(hdev, core_ids[i])) {
rc = gaudi2_verify_arc_running_mode(hdev, core_ids[i], core_command);
if (rc) {
dev_err(hdev->dev, "failed to %s arc: %d\n",
(core_command == HL_ENGINE_CORE_HALT) ?
"HALT" : "RUN", core_ids[i]);
return -1;
}
}
}
return 0;
}
static int gaudi2_set_tpc_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base, reg_addr, reg_val, tpc_id;
if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK))
return 0;
tpc_id = gaudi2_tpc_engine_id_to_tpc_id[engine_id];
if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + tpc_id)))
return 0;
reg_base = gaudi2_tpc_cfg_blocks_bases[tpc_id];
reg_addr = reg_base + TPC_CFG_STALL_OFFSET;
reg_val = FIELD_PREP(DCORE0_TPC0_CFG_TPC_STALL_V_MASK,
(engine_command == HL_ENGINE_STALL) ? 1 : 0);
WREG32(reg_addr, reg_val);
if (engine_command == HL_ENGINE_RESUME) {
reg_base = gaudi2_tpc_eml_cfg_blocks_bases[tpc_id];
reg_addr = reg_base + TPC_EML_CFG_DBG_CNT_OFFSET;
RMWREG32(reg_addr, 0x1, DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK);
}
return 0;
}
static int gaudi2_set_mme_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base, reg_addr, reg_val, mme_id;
mme_id = gaudi2_mme_engine_id_to_mme_id[engine_id];
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + mme_id)))
return 0;
reg_base = gaudi2_mme_ctrl_lo_blocks_bases[mme_id];
reg_addr = reg_base + MME_CTRL_LO_QM_STALL_OFFSET;
reg_val = FIELD_PREP(DCORE0_MME_CTRL_LO_QM_STALL_V_MASK,
(engine_command == HL_ENGINE_STALL) ? 1 : 0);
WREG32(reg_addr, reg_val);
return 0;
}
static int gaudi2_set_edma_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base, reg_addr, reg_val, edma_id;
if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK))
return 0;
edma_id = gaudi2_edma_engine_id_to_edma_id[engine_id];
if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + edma_id)))
return 0;
reg_base = gaudi2_dma_core_blocks_bases[edma_id];
reg_addr = reg_base + EDMA_CORE_CFG_STALL_OFFSET;
reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK,
(engine_command == HL_ENGINE_STALL) ? 1 : 0);
WREG32(reg_addr, reg_val);
if (engine_command == HL_ENGINE_STALL) {
reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, 0x1) |
FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_FLUSH_MASK, 0x1);
WREG32(reg_addr, reg_val);
}
return 0;
}
static int gaudi2_set_engine_modes(struct hl_device *hdev,
u32 *engine_ids, u32 num_engines, u32 engine_command)
{
int i, rc;
for (i = 0 ; i < num_engines ; ++i) {
switch (engine_ids[i]) {
case GAUDI2_DCORE0_ENGINE_ID_TPC_0 ... GAUDI2_DCORE0_ENGINE_ID_TPC_5:
case GAUDI2_DCORE1_ENGINE_ID_TPC_0 ... GAUDI2_DCORE1_ENGINE_ID_TPC_5:
case GAUDI2_DCORE2_ENGINE_ID_TPC_0 ... GAUDI2_DCORE2_ENGINE_ID_TPC_5:
case GAUDI2_DCORE3_ENGINE_ID_TPC_0 ... GAUDI2_DCORE3_ENGINE_ID_TPC_5:
rc = gaudi2_set_tpc_engine_mode(hdev, engine_ids[i], engine_command);
if (rc)
return rc;
break;
case GAUDI2_DCORE0_ENGINE_ID_MME:
case GAUDI2_DCORE1_ENGINE_ID_MME:
case GAUDI2_DCORE2_ENGINE_ID_MME:
case GAUDI2_DCORE3_ENGINE_ID_MME:
rc = gaudi2_set_mme_engine_mode(hdev, engine_ids[i], engine_command);
if (rc)
return rc;
break;
case GAUDI2_DCORE0_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE0_ENGINE_ID_EDMA_1:
case GAUDI2_DCORE1_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE1_ENGINE_ID_EDMA_1:
case GAUDI2_DCORE2_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE2_ENGINE_ID_EDMA_1:
case GAUDI2_DCORE3_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE3_ENGINE_ID_EDMA_1:
rc = gaudi2_set_edma_engine_mode(hdev, engine_ids[i], engine_command);
if (rc)
return rc;
break;
default:
dev_err(hdev->dev, "Invalid engine ID %u\n", engine_ids[i]);
return -EINVAL;
}
}
return 0;
}
static int gaudi2_set_engines(struct hl_device *hdev, u32 *engine_ids,
u32 num_engines, u32 engine_command)
{
switch (engine_command) {
case HL_ENGINE_CORE_HALT:
case HL_ENGINE_CORE_RUN:
return gaudi2_set_engine_cores(hdev, engine_ids, num_engines, engine_command);
case HL_ENGINE_STALL:
case HL_ENGINE_RESUME:
return gaudi2_set_engine_modes(hdev, engine_ids, num_engines, engine_command);
default:
dev_err(hdev->dev, "failed to execute command id %u\n", engine_command);
return -EINVAL;
}
}
static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
if (hdev->pldm)
wait_timeout_ms = GAUDI2_PLDM_RESET_WAIT_MSEC;
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
if (fw_reset)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
gaudi2_stop_mme_qmans(hdev);
gaudi2_stop_tpc_qmans(hdev);
gaudi2_stop_rot_qmans(hdev);
gaudi2_stop_nic_qmans(hdev);
msleep(wait_timeout_ms);
gaudi2_halt_arcs(hdev);
gaudi2_dma_stall(hdev);
gaudi2_mme_stall(hdev);
gaudi2_tpc_stall(hdev);
gaudi2_rotator_stall(hdev);
msleep(wait_timeout_ms);
gaudi2_stop_dec(hdev);
/*
* in case of soft reset do a manual flush for QMANs (currently called
* only for NIC QMANs
*/
if (!hard_reset)
gaudi2_nic_qmans_manual_flush(hdev);
gaudi2_disable_dma_qmans(hdev);
gaudi2_disable_mme_qmans(hdev);
gaudi2_disable_tpc_qmans(hdev);
gaudi2_disable_rot_qmans(hdev);
gaudi2_disable_nic_qmans(hdev);
gaudi2_disable_timestamp(hdev);
skip_engines:
if (hard_reset) {
gaudi2_disable_msix(hdev);
return;
}
gaudi2_sync_irqs(hdev);
}
static void gaudi2_init_firmware_preload_params(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
pre_fw_load->wait_for_preboot_timeout = GAUDI2_PREBOOT_REQ_TIMEOUT_USEC;
}
static void gaudi2_init_firmware_loader(struct hl_device *hdev)
{
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
struct dynamic_fw_load_mgr *dynamic_loader;
struct cpu_dyn_regs *dyn_regs;
/* fill common fields */
fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GAUDI2_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GAUDI2_LINUX_FW_FILE;
fw_loader->boot_fit_timeout = GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC;
fw_loader->skip_bmc = false;
fw_loader->sram_bar_id = SRAM_CFG_BAR_ID;
fw_loader->dram_bar_id = DRAM_BAR_ID;
fw_loader->cpu_timeout = GAUDI2_CPU_TIMEOUT_USEC;
/* here we update initial values for few specific dynamic regs (as
* before reading the first descriptor from FW those value has to be
* hard-coded). in later stages of the protocol those values will be
* updated automatically by reading the FW descriptor so data there
* will always be up-to-date
*/
dynamic_loader = &hdev->fw_loader.dynamic_loader;
dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
dyn_regs->kmd_msg_to_cpu = cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
dyn_regs->cpu_cmd_status_to_host = cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
dynamic_loader->wait_for_bl_timeout = GAUDI2_WAIT_FOR_BL_TIMEOUT_USEC;
}
static int gaudi2_init_cpu(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
if (gaudi2->hw_cap_initialized & HW_CAP_CPU)
return 0;
rc = hl_fw_init_cpu(hdev);
if (rc)
return rc;
gaudi2->hw_cap_initialized |= HW_CAP_CPU;
return 0;
}
static int gaudi2_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
{
struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct cpu_dyn_regs *dyn_regs;
struct hl_eq *eq;
u32 status;
int err;
if (!hdev->cpu_queues_enable)
return 0;
if (gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)
return 0;
eq = &hdev->event_queue;
dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW, lower_32_bits(hdev->cpu_accessible_dma_address));
WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH, upper_32_bits(hdev->cpu_accessible_dma_address));
WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
/* Used for EQ CI */
WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
WREG32(mmCPU_IF_PF_PQ_PI, 0);
WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
/* Let the ARC know we are ready as it is now handling those queues */
WREG32(le32_to_cpu(dyn_regs->gic_host_pi_upd_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_PI_UPDATE].cpu_id);
err = hl_poll_timeout(
hdev,
mmCPU_IF_QUEUE_INIT,
status,
(status == PQ_INIT_STATUS_READY_FOR_HOST),
1000,
cpu_timeout);
if (err) {
dev_err(hdev->dev, "Failed to communicate with device CPU (timeout)\n");
return -EIO;
}
/* update FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid)
prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
if (prop->fw_cpu_boot_dev_sts1_valid)
prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
gaudi2->hw_cap_initialized |= HW_CAP_CPU_Q;
return 0;
}
static void gaudi2_init_qman_pq(struct hl_device *hdev, u32 reg_base,
u32 queue_id_base)
{
struct hl_hw_queue *q;
u32 pq_id, pq_offset;
for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) {
q = &hdev->kernel_queues[queue_id_base + pq_id];
pq_offset = pq_id * 4;
WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset,
lower_32_bits(q->bus_address));
WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset,
upper_32_bits(q->bus_address));
WREG32(reg_base + QM_PQ_SIZE_0_OFFSET + pq_offset, ilog2(HL_QUEUE_LENGTH));
WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0);
WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0);
}
}
static void gaudi2_init_qman_cp(struct hl_device *hdev, u32 reg_base)
{
u32 cp_id, cp_offset, mtr_base_lo, mtr_base_hi, so_base_lo, so_base_hi;
mtr_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
so_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
for (cp_id = 0 ; cp_id < NUM_OF_CP_PER_QMAN; cp_id++) {
cp_offset = cp_id * 4;
WREG32(reg_base + QM_CP_MSG_BASE0_ADDR_LO_0_OFFSET + cp_offset, mtr_base_lo);
WREG32(reg_base + QM_CP_MSG_BASE0_ADDR_HI_0_OFFSET + cp_offset, mtr_base_hi);
WREG32(reg_base + QM_CP_MSG_BASE1_ADDR_LO_0_OFFSET + cp_offset, so_base_lo);
WREG32(reg_base + QM_CP_MSG_BASE1_ADDR_HI_0_OFFSET + cp_offset, so_base_hi);
}
/* allow QMANs to accept work from ARC CQF */
WREG32(reg_base + QM_CP_CFG_OFFSET, FIELD_PREP(PDMA0_QM_CP_CFG_SWITCH_EN_MASK, 0x1));
}
static void gaudi2_init_qman_pqc(struct hl_device *hdev, u32 reg_base,
u32 queue_id_base)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 pq_id, pq_offset, so_base_lo, so_base_hi;
so_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
so_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0);
for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) {
pq_offset = pq_id * 4;
/* Configure QMAN HBW to scratchpad as it is not needed */
WREG32(reg_base + QM_PQC_HBW_BASE_LO_0_OFFSET + pq_offset,
lower_32_bits(gaudi2->scratchpad_bus_address));
WREG32(reg_base + QM_PQC_HBW_BASE_HI_0_OFFSET + pq_offset,
upper_32_bits(gaudi2->scratchpad_bus_address));
WREG32(reg_base + QM_PQC_SIZE_0_OFFSET + pq_offset,
ilog2(PAGE_SIZE / sizeof(struct hl_cq_entry)));
WREG32(reg_base + QM_PQC_PI_0_OFFSET + pq_offset, 0);
WREG32(reg_base + QM_PQC_LBW_WDATA_0_OFFSET + pq_offset, QM_PQC_LBW_WDATA);
WREG32(reg_base + QM_PQC_LBW_BASE_LO_0_OFFSET + pq_offset, so_base_lo);
WREG32(reg_base + QM_PQC_LBW_BASE_HI_0_OFFSET + pq_offset, so_base_hi);
}
/* Enable QMAN H/W completion */
WREG32(reg_base + QM_PQC_CFG_OFFSET, 1 << PDMA0_QM_PQC_CFG_EN_SHIFT);
}
static u32 gaudi2_get_dyn_sp_reg(struct hl_device *hdev, u32 queue_id_base)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 sp_reg_addr;
switch (queue_id_base) {
case GAUDI2_QUEUE_ID_PDMA_0_0...GAUDI2_QUEUE_ID_PDMA_1_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3:
sp_reg_addr = le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
break;
case GAUDI2_QUEUE_ID_DCORE0_MME_0_0...GAUDI2_QUEUE_ID_DCORE0_MME_0_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE1_MME_0_0...GAUDI2_QUEUE_ID_DCORE1_MME_0_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE2_MME_0_0...GAUDI2_QUEUE_ID_DCORE2_MME_0_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE3_MME_0_0...GAUDI2_QUEUE_ID_DCORE3_MME_0_3:
sp_reg_addr = le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl);
break;
case GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_6_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE1_TPC_5_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE2_TPC_5_3:
fallthrough;
case GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE3_TPC_5_3:
sp_reg_addr = le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl);
break;
case GAUDI2_QUEUE_ID_ROT_0_0...GAUDI2_QUEUE_ID_ROT_1_3:
sp_reg_addr = le32_to_cpu(dyn_regs->gic_rot_qm_irq_ctrl);
break;
case GAUDI2_QUEUE_ID_NIC_0_0...GAUDI2_QUEUE_ID_NIC_23_3:
sp_reg_addr = le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl);
break;
default:
dev_err(hdev->dev, "Unexpected h/w queue %d\n", queue_id_base);
return 0;
}
return sp_reg_addr;
}
static void gaudi2_init_qman_common(struct hl_device *hdev, u32 reg_base,
u32 queue_id_base)
{
u32 glbl_prot = QMAN_MAKE_TRUSTED, irq_handler_offset;
int map_table_entry;
WREG32(reg_base + QM_GLBL_PROT_OFFSET, glbl_prot);
irq_handler_offset = gaudi2_get_dyn_sp_reg(hdev, queue_id_base);
WREG32(reg_base + QM_GLBL_ERR_ADDR_LO_OFFSET, lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(reg_base + QM_GLBL_ERR_ADDR_HI_OFFSET, upper_32_bits(CFG_BASE + irq_handler_offset));
map_table_entry = gaudi2_qman_async_event_id[queue_id_base];
WREG32(reg_base + QM_GLBL_ERR_WDATA_OFFSET,
gaudi2_irq_map_table[map_table_entry].cpu_id);
WREG32(reg_base + QM_ARB_ERR_MSG_EN_OFFSET, QM_ARB_ERR_MSG_EN_MASK);
WREG32(reg_base + QM_ARB_SLV_CHOISE_WDT_OFFSET, GAUDI2_ARB_WDT_TIMEOUT);
WREG32(reg_base + QM_GLBL_CFG1_OFFSET, 0);
WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0);
/* Enable the QMAN channel.
* PDMA QMAN configuration is different, as we do not allow user to
* access some of the CPs.
* PDMA0: CP2/3 are reserved for the ARC usage.
* PDMA1: CP1/2/3 are reserved for the ARC usage.
*/
if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0])
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA1_QMAN_ENABLE);
else if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0])
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA0_QMAN_ENABLE);
else
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, QMAN_ENABLE);
}
static void gaudi2_init_qman(struct hl_device *hdev, u32 reg_base,
u32 queue_id_base)
{
u32 pq_id;
for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++)
hdev->kernel_queues[queue_id_base + pq_id].cq_id = GAUDI2_RESERVED_CQ_CS_COMPLETION;
gaudi2_init_qman_pq(hdev, reg_base, queue_id_base);
gaudi2_init_qman_cp(hdev, reg_base);
gaudi2_init_qman_pqc(hdev, reg_base, queue_id_base);
gaudi2_init_qman_common(hdev, reg_base, queue_id_base);
}
static void gaudi2_init_dma_core(struct hl_device *hdev, u32 reg_base,
u32 dma_core_id, bool is_secure)
{
u32 prot, irq_handler_offset;
struct cpu_dyn_regs *dyn_regs;
int map_table_entry;
prot = 1 << ARC_FARM_KDMA_PROT_ERR_VAL_SHIFT;
if (is_secure)
prot |= 1 << ARC_FARM_KDMA_PROT_VAL_SHIFT;
WREG32(reg_base + DMA_CORE_PROT_OFFSET, prot);
dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
irq_handler_offset = le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl);
WREG32(reg_base + DMA_CORE_ERRMSG_ADDR_LO_OFFSET,
lower_32_bits(CFG_BASE + irq_handler_offset));
WREG32(reg_base + DMA_CORE_ERRMSG_ADDR_HI_OFFSET,
upper_32_bits(CFG_BASE + irq_handler_offset));
map_table_entry = gaudi2_dma_core_async_event_id[dma_core_id];
WREG32(reg_base + DMA_CORE_ERRMSG_WDATA_OFFSET,
gaudi2_irq_map_table[map_table_entry].cpu_id);
/* Enable the DMA channel */
WREG32(reg_base + DMA_CORE_CFG_0_OFFSET, 1 << ARC_FARM_KDMA_CFG_0_EN_SHIFT);
}
static void gaudi2_init_kdma(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
if ((gaudi2->hw_cap_initialized & HW_CAP_KDMA) == HW_CAP_KDMA)
return;
reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_KDMA];
gaudi2_init_dma_core(hdev, reg_base, DMA_CORE_ID_KDMA, true);
gaudi2->hw_cap_initialized |= HW_CAP_KDMA;
}
static void gaudi2_init_pdma(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_base;
if ((gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK) == HW_CAP_PDMA_MASK)
return;
reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_PDMA0];
gaudi2_init_dma_core(hdev, reg_base, DMA_CORE_ID_PDMA0, false);
reg_base = gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0];
gaudi2_init_qman(hdev, reg_base, GAUDI2_QUEUE_ID_PDMA_0_0);
reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_PDMA1];
gaudi2_init_dma_core(hdev, reg_base, DMA_CORE_ID_PDMA1, false);
reg_base = gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0];
gaudi2_init_qman(hdev, reg_base, GAUDI2_QUEUE_ID_PDMA_1_0);
gaudi2->hw_cap_initialized |= HW_CAP_PDMA_MASK;
}
static void gaudi2_init_edma_instance(struct hl_device *hdev, u8 seq)
{
u32 reg_base, base_edma_core_id, base_edma_qman_id;
base_edma_core_id = DMA_CORE_ID_EDMA0 + seq;
base_edma_qman_id = edma_stream_base[seq];
reg_base = gaudi2_dma_core_blocks_bases[base_edma_core_id];
gaudi2_init_dma_core(hdev, reg_base, base_edma_core_id, false);
reg_base = gaudi2_qm_blocks_bases[base_edma_qman_id];
gaudi2_init_qman(hdev, reg_base, base_edma_qman_id);
}
static void gaudi2_init_edma(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int dcore, inst;
if ((gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK) == HW_CAP_EDMA_MASK)
return;
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) {
u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst;
if (!(prop->edma_enabled_mask & BIT(seq)))
continue;
gaudi2_init_edma_instance(hdev, seq);
gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_EDMA_SHIFT + seq);
}
}
}
/*
* gaudi2_arm_monitors_for_virt_msix_db() - Arm monitors for writing to the virtual MSI-X doorbell.
* @hdev: pointer to habanalabs device structure.
* @sob_id: sync object ID.
* @first_mon_id: ID of first monitor out of 3 consecutive monitors.
* @interrupt_id: interrupt ID.
*
* Some initiators cannot have HBW address in their completion address registers, and thus cannot
* write directly to the HBW host memory of the virtual MSI-X doorbell.
* Instead, they are configured to LBW write to a sync object, and a monitor will do the HBW write.
*
* The mechanism in the sync manager block is composed of a master monitor with 3 messages.
* In addition to the HBW write, the other 2 messages are for preparing the monitor to next
* completion, by decrementing the sync object value and re-arming the monitor.
*/
static void gaudi2_arm_monitors_for_virt_msix_db(struct hl_device *hdev, u32 sob_id,
u32 first_mon_id, u32 interrupt_id)
{
u32 sob_offset, first_mon_offset, mon_offset, payload, sob_group, mode, arm, config;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 addr;
u8 mask;
/* Reset the SOB value */
sob_offset = sob_id * sizeof(u32);
WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0);
/* Configure 3 monitors:
* 1. Write interrupt ID to the virtual MSI-X doorbell (master monitor)
* 2. Decrement SOB value by 1.
* 3. Re-arm the master monitor.
*/
first_mon_offset = first_mon_id * sizeof(u32);
/* 2nd monitor: Decrement SOB value by 1 */
mon_offset = first_mon_offset + sizeof(u32);
addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr));
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr));
payload = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 0x7FFF) | /* "-1" */
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK, 1) |
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1);
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload);
/* 3rd monitor: Re-arm the master monitor */
mon_offset = first_mon_offset + 2 * sizeof(u32);
addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + first_mon_offset;
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr));
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr));
sob_group = sob_id / 8;
mask = ~BIT(sob_id & 0x7);
mode = 0; /* comparison mode is "greater than or equal to" */
arm = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK, sob_group) |
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK, mask) |
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK, mode) |
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK, 1);
payload = arm;
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload);
/* 1st monitor (master): Write interrupt ID to the virtual MSI-X doorbell */
mon_offset = first_mon_offset;
config = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_WR_NUM_MASK, 2); /* "2": 3 writes */
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + mon_offset, config);
addr = gaudi2->virt_msix_db_dma_addr;
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr));
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr));
payload = interrupt_id;
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload);
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + mon_offset, arm);
}
static void gaudi2_prepare_sm_for_virt_msix_db(struct hl_device *hdev)
{
u32 decoder_id, sob_id, first_mon_id, interrupt_id;
struct asic_fixed_properties *prop = &hdev->asic_prop;
/* Decoder normal/abnormal interrupts */
for (decoder_id = 0 ; decoder_id < NUMBER_OF_DEC ; ++decoder_id) {
if (!(prop->decoder_enabled_mask & BIT(decoder_id)))
continue;
sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id;
first_mon_id = GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * decoder_id;
interrupt_id = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 2 * decoder_id;
gaudi2_arm_monitors_for_virt_msix_db(hdev, sob_id, first_mon_id, interrupt_id);
sob_id = GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + decoder_id;
first_mon_id = GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * decoder_id;
interrupt_id += 1;
gaudi2_arm_monitors_for_virt_msix_db(hdev, sob_id, first_mon_id, interrupt_id);
}
}
static void gaudi2_init_sm(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 cq_address;
u32 reg_val;
int i;
/* Enable HBW/LBW CQ for completion monitors */
reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1);
reg_val |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LBW_EN_MASK, 1);
for (i = 0 ; i < GAUDI2_MAX_PENDING_CS ; i++)
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val);
/* Enable only HBW CQ for KDMA completion monitor */
reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1);
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val);
/* Init CQ0 DB - configure the monitor to trigger MSI-X interrupt */
WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, lower_32_bits(gaudi2->virt_msix_db_dma_addr));
WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, upper_32_bits(gaudi2->virt_msix_db_dma_addr));
WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, GAUDI2_IRQ_NUM_COMPLETION);
for (i = 0 ; i < GAUDI2_RESERVED_CQ_NUMBER ; i++) {
cq_address =
hdev->completion_queue[i].bus_address;
WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + (4 * i),
lower_32_bits(cq_address));
WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + (4 * i),
upper_32_bits(cq_address));
WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + (4 * i),
ilog2(HL_CQ_SIZE_IN_BYTES));
}
/* Configure kernel ASID and MMU BP*/
WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_SEC, 0x10000);
WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV, 0);
/* Initialize sync objects and monitors which are used for the virtual MSI-X doorbell */
gaudi2_prepare_sm_for_virt_msix_db(hdev);
}
static void gaudi2_init_mme_acc(struct hl_device *hdev, u32 reg_base)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 reg_val;
int i;
reg_val = FIELD_PREP(MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK, 0);
reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_POS_INF_MASK, 1);
reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_NEG_INF_MASK, 1);
reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_NAN_MASK, 1);
reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_RESULT_POS_INF_MASK, 1);
reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_RESULT_NEG_INF_MASK, 1);
WREG32(reg_base + MME_ACC_INTR_MASK_OFFSET, reg_val);
WREG32(reg_base + MME_ACC_AP_LFSR_POLY_OFFSET, 0x80DEADAF);
for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) {
WREG32(reg_base + MME_ACC_AP_LFSR_SEED_SEL_OFFSET, i);
WREG32(reg_base + MME_ACC_AP_LFSR_SEED_WDATA_OFFSET, gaudi2->lfsr_rand_seeds[i]);
}
}
static void gaudi2_init_dcore_mme(struct hl_device *hdev, int dcore_id,
bool config_qman_only)
{
u32 queue_id_base, reg_base;
switch (dcore_id) {
case 0:
queue_id_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0;
break;
case 1:
queue_id_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0;
break;
case 2:
queue_id_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0;
break;
case 3:
queue_id_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0;
break;
default:
dev_err(hdev->dev, "Invalid dcore id %u\n", dcore_id);
return;
}
if (!config_qman_only) {
reg_base = gaudi2_mme_acc_blocks_bases[dcore_id];
gaudi2_init_mme_acc(hdev, reg_base);
}
reg_base = gaudi2_qm_blocks_bases[queue_id_base];
gaudi2_init_qman(hdev, reg_base, queue_id_base);
}
static void gaudi2_init_mme(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int i;
if ((gaudi2->hw_cap_initialized & HW_CAP_MME_MASK) == HW_CAP_MME_MASK)
return;
for (i = 0 ; i < NUM_OF_DCORES ; i++) {
gaudi2_init_dcore_mme(hdev, i, false);
gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_MME_SHIFT + i);
}
}
static void gaudi2_init_tpc_cfg(struct hl_device *hdev, u32 reg_base)
{
/* Mask arithmetic and QM interrupts in TPC */
WREG32(reg_base + TPC_CFG_TPC_INTR_MASK_OFFSET, 0x23FFFE);
/* Set 16 cache lines */
WREG32(reg_base + TPC_CFG_MSS_CONFIG_OFFSET,
2 << DCORE0_TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT);
}
struct gaudi2_tpc_init_cfg_data {
enum gaudi2_queue_id dcore_tpc_qid_base[NUM_OF_DCORES];
};
static void gaudi2_init_tpc_config(struct hl_device *hdev, int dcore, int inst,
u32 offset, struct iterate_module_ctx *ctx)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_tpc_init_cfg_data *cfg_data = ctx->data;
u32 queue_id_base;
u8 seq;
queue_id_base = cfg_data->dcore_tpc_qid_base[dcore] + (inst * NUM_OF_PQ_PER_QMAN);
if (dcore == 0 && inst == (NUM_DCORE0_TPC - 1))
/* gets last sequence number */
seq = NUM_OF_DCORES * NUM_OF_TPC_PER_DCORE;
else
seq = dcore * NUM_OF_TPC_PER_DCORE + inst;
gaudi2_init_tpc_cfg(hdev, mmDCORE0_TPC0_CFG_BASE + offset);
gaudi2_init_qman(hdev, mmDCORE0_TPC0_QM_BASE + offset, queue_id_base);
gaudi2->tpc_hw_cap_initialized |= BIT_ULL(HW_CAP_TPC_SHIFT + seq);
}
static void gaudi2_init_tpc(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_tpc_init_cfg_data init_cfg_data;
struct iterate_module_ctx tpc_iter;
if (!hdev->asic_prop.tpc_enabled_mask)
return;
if ((gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK) == HW_CAP_TPC_MASK)
return;
init_cfg_data.dcore_tpc_qid_base[0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0;
init_cfg_data.dcore_tpc_qid_base[1] = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0;
init_cfg_data.dcore_tpc_qid_base[2] = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0;
init_cfg_data.dcore_tpc_qid_base[3] = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0;
tpc_iter.fn = &gaudi2_init_tpc_config;
tpc_iter.data = &init_cfg_data;
gaudi2_iterate_tpcs(hdev, &tpc_iter);
}
static void gaudi2_init_rotator(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 i, reg_base, queue_id;
queue_id = GAUDI2_QUEUE_ID_ROT_0_0;
for (i = 0 ; i < NUM_OF_ROT ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
reg_base = gaudi2_qm_blocks_bases[queue_id];
gaudi2_init_qman(hdev, reg_base, queue_id);
gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_ROT_SHIFT + i);
}
}
static void gaudi2_init_vdec_brdg_ctrl(struct hl_device *hdev, u64 base_addr, u32 decoder_id)
{
u32 sob_id;
/* VCMD normal interrupt */
sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id;
WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_AWADDR,
mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_id * sizeof(u32));
WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_WDATA, GAUDI2_SOB_INCREMENT_BY_ONE);
/* VCMD abnormal interrupt */
sob_id = GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + decoder_id;
WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR,
mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_id * sizeof(u32));
WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_WDATA, GAUDI2_SOB_INCREMENT_BY_ONE);
}
static void gaudi2_init_dec(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 dcore_id, dec_id, dec_bit;
u64 base_addr;
if (!hdev->asic_prop.decoder_enabled_mask)
return;
if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == HW_CAP_DEC_MASK)
return;
for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++)
for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) {
dec_bit = dcore_id * NUM_OF_DEC_PER_DCORE + dec_id;
if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
continue;
base_addr = mmDCORE0_DEC0_CMD_BASE +
BRDG_CTRL_BLOCK_OFFSET +
dcore_id * DCORE_OFFSET +
dec_id * DCORE_VDEC_OFFSET;
gaudi2_init_vdec_brdg_ctrl(hdev, base_addr, dec_bit);
gaudi2->dec_hw_cap_initialized |= BIT_ULL(HW_CAP_DEC_SHIFT + dec_bit);
}
for (dec_id = 0 ; dec_id < NUM_OF_PCIE_VDEC ; dec_id++) {
dec_bit = PCIE_DEC_SHIFT + dec_id;
if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit)))
continue;
base_addr = mmPCIE_DEC0_CMD_BASE + BRDG_CTRL_BLOCK_OFFSET +
dec_id * DCORE_VDEC_OFFSET;
gaudi2_init_vdec_brdg_ctrl(hdev, base_addr, dec_bit);
gaudi2->dec_hw_cap_initialized |= BIT_ULL(HW_CAP_DEC_SHIFT + dec_bit);
}
}
static int gaudi2_mmu_update_asid_hop0_addr(struct hl_device *hdev,
u32 stlb_base, u32 asid, u64 phys_addr)
{
u32 status, timeout_usec;
int rc;
if (hdev->pldm || !hdev->pdev)
timeout_usec = GAUDI2_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
WREG32(stlb_base + STLB_ASID_OFFSET, asid);
WREG32(stlb_base + STLB_HOP0_PA43_12_OFFSET, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
WREG32(stlb_base + STLB_HOP0_PA63_44_OFFSET, phys_addr >> MMU_HOP0_PA63_44_SHIFT);
WREG32(stlb_base + STLB_BUSY_OFFSET, 0x80000000);
rc = hl_poll_timeout(
hdev,
stlb_base + STLB_BUSY_OFFSET,
status,
!(status & 0x80000000),
1000,
timeout_usec);
if (rc) {
dev_err(hdev->dev, "Timeout during MMU hop0 config of asid %d\n", asid);
return rc;
}
return 0;
}
static void gaudi2_mmu_send_invalidate_cache_cmd(struct hl_device *hdev, u32 stlb_base,
u32 start_offset, u32 inv_start_val,
u32 flags)
{
/* clear PMMU mem line cache (only needed in mmu range invalidation) */
if (flags & MMU_OP_CLEAR_MEMCACHE)
WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INVALIDATION, 0x1);
if (flags & MMU_OP_SKIP_LOW_CACHE_INV)
return;
WREG32(stlb_base + start_offset, inv_start_val);
}
static int gaudi2_mmu_invalidate_cache_status_poll(struct hl_device *hdev, u32 stlb_base,
struct gaudi2_cache_invld_params *inv_params)
{
u32 status, timeout_usec, start_offset;
int rc;
timeout_usec = (hdev->pldm) ? GAUDI2_PLDM_MMU_TIMEOUT_USEC :
GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC;
/* poll PMMU mem line cache (only needed in mmu range invalidation) */
if (inv_params->flags & MMU_OP_CLEAR_MEMCACHE) {
rc = hl_poll_timeout(
hdev,
mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS,
status,
status & 0x1,
1000,
timeout_usec);
if (rc)
return rc;
/* Need to manually reset the status to 0 */
WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS, 0x0);
}
/* Lower cache does not work with cache lines, hence we can skip its
* invalidation upon map and invalidate only upon unmap
*/
if (inv_params->flags & MMU_OP_SKIP_LOW_CACHE_INV)
return 0;
start_offset = inv_params->range_invalidation ?
STLB_RANGE_CACHE_INVALIDATION_OFFSET : STLB_INV_ALL_START_OFFSET;
rc = hl_poll_timeout(
hdev,
stlb_base + start_offset,
status,
!(status & 0x1),
1000,
timeout_usec);
return rc;
}
bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 hw_cap;
hw_cap = HW_CAP_DCORE0_DMMU0 << (NUM_OF_HMMU_PER_DCORE * dcore_id + hmmu_id);
if (gaudi2->hw_cap_initialized & hw_cap)
return true;
return false;
}
/* this function shall be called only for HMMUs for which capability bit is set */
static inline u32 get_hmmu_stlb_base(int dcore_id, int hmmu_id)
{
u32 offset;
offset = (u32) (dcore_id * DCORE_OFFSET + hmmu_id * DCORE_HMMU_OFFSET);
return (u32)(mmDCORE0_HMMU0_STLB_BASE + offset);
}
static void gaudi2_mmu_invalidate_cache_trigger(struct hl_device *hdev, u32 stlb_base,
struct gaudi2_cache_invld_params *inv_params)
{
u32 start_offset;
if (inv_params->range_invalidation) {
/* Set the addresses range
* Note: that the start address we set in register, is not included in
* the range of the invalidation, by design.
* that's why we need to set lower address than the one we actually
* want to be included in the range invalidation.
*/
u64 start = inv_params->start_va - 1;
start_offset = STLB_RANGE_CACHE_INVALIDATION_OFFSET;
WREG32(stlb_base + STLB_RANGE_INV_START_LSB_OFFSET,
start >> MMU_RANGE_INV_VA_LSB_SHIFT);
WREG32(stlb_base + STLB_RANGE_INV_START_MSB_OFFSET,
start >> MMU_RANGE_INV_VA_MSB_SHIFT);
WREG32(stlb_base + STLB_RANGE_INV_END_LSB_OFFSET,
inv_params->end_va >> MMU_RANGE_INV_VA_LSB_SHIFT);
WREG32(stlb_base + STLB_RANGE_INV_END_MSB_OFFSET,
inv_params->end_va >> MMU_RANGE_INV_VA_MSB_SHIFT);
} else {
start_offset = STLB_INV_ALL_START_OFFSET;
}
gaudi2_mmu_send_invalidate_cache_cmd(hdev, stlb_base, start_offset,
inv_params->inv_start_val, inv_params->flags);
}
static inline void gaudi2_hmmu_invalidate_cache_trigger(struct hl_device *hdev,
int dcore_id, int hmmu_id,
struct gaudi2_cache_invld_params *inv_params)
{
u32 stlb_base = get_hmmu_stlb_base(dcore_id, hmmu_id);
gaudi2_mmu_invalidate_cache_trigger(hdev, stlb_base, inv_params);
}
static inline int gaudi2_hmmu_invalidate_cache_status_poll(struct hl_device *hdev,
int dcore_id, int hmmu_id,
struct gaudi2_cache_invld_params *inv_params)
{
u32 stlb_base = get_hmmu_stlb_base(dcore_id, hmmu_id);
return gaudi2_mmu_invalidate_cache_status_poll(hdev, stlb_base, inv_params);
}
static int gaudi2_hmmus_invalidate_cache(struct hl_device *hdev,
struct gaudi2_cache_invld_params *inv_params)
{
int dcore_id, hmmu_id;
/* first send all invalidation commands */
for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) {
if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id))
continue;
gaudi2_hmmu_invalidate_cache_trigger(hdev, dcore_id, hmmu_id, inv_params);
}
}
/* next, poll all invalidations status */
for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) {
int rc;
if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id))
continue;
rc = gaudi2_hmmu_invalidate_cache_status_poll(hdev, dcore_id, hmmu_id,
inv_params);
if (rc)
return rc;
}
}
return 0;
}
static int gaudi2_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_cache_invld_params invld_params;
int rc = 0;
if (hdev->reset_info.hard_reset_pending)
return rc;
invld_params.range_invalidation = false;
invld_params.inv_start_val = 1;
if ((flags & MMU_OP_USERPTR) && (gaudi2->hw_cap_initialized & HW_CAP_PMMU)) {
invld_params.flags = flags;
gaudi2_mmu_invalidate_cache_trigger(hdev, mmPMMU_HBW_STLB_BASE, &invld_params);
rc = gaudi2_mmu_invalidate_cache_status_poll(hdev, mmPMMU_HBW_STLB_BASE,
&invld_params);
} else if (flags & MMU_OP_PHYS_PACK) {
invld_params.flags = 0;
rc = gaudi2_hmmus_invalidate_cache(hdev, &invld_params);
}
return rc;
}
static int gaudi2_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
u32 flags, u32 asid, u64 va, u64 size)
{
struct gaudi2_cache_invld_params invld_params = {0};
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 start_va, end_va;
u32 inv_start_val;
int rc = 0;
if (hdev->reset_info.hard_reset_pending)
return 0;
inv_start_val = (1 << MMU_RANGE_INV_EN_SHIFT |
1 << MMU_RANGE_INV_ASID_EN_SHIFT |
asid << MMU_RANGE_INV_ASID_SHIFT);
start_va = va;
end_va = start_va + size;
if ((flags & MMU_OP_USERPTR) && (gaudi2->hw_cap_initialized & HW_CAP_PMMU)) {
/* As range invalidation does not support zero address we will
* do full invalidation in this case
*/
if (start_va) {
invld_params.range_invalidation = true;
invld_params.start_va = start_va;
invld_params.end_va = end_va;
invld_params.inv_start_val = inv_start_val;
invld_params.flags = flags | MMU_OP_CLEAR_MEMCACHE;
} else {
invld_params.range_invalidation = false;
invld_params.inv_start_val = 1;
invld_params.flags = flags;
}
gaudi2_mmu_invalidate_cache_trigger(hdev, mmPMMU_HBW_STLB_BASE, &invld_params);
rc = gaudi2_mmu_invalidate_cache_status_poll(hdev, mmPMMU_HBW_STLB_BASE,
&invld_params);
if (rc)
return rc;
} else if (flags & MMU_OP_PHYS_PACK) {
invld_params.start_va = gaudi2_mmu_scramble_addr(hdev, start_va);
invld_params.end_va = gaudi2_mmu_scramble_addr(hdev, end_va);
invld_params.inv_start_val = inv_start_val;
invld_params.flags = flags;
rc = gaudi2_hmmus_invalidate_cache(hdev, &invld_params);
}
return rc;
}
static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 hop0_addr;
u32 asid, max_asid = prop->max_asid;
int rc;
/* it takes too much time to init all of the ASIDs on palladium */
if (hdev->pldm)
max_asid = min((u32) 8, max_asid);
for (asid = 0 ; asid < max_asid ; asid++) {
hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr;
rc = gaudi2_mmu_update_asid_hop0_addr(hdev, stlb_base, asid, hop0_addr);
if (rc) {
dev_err(hdev->dev, "failed to set hop0 addr for asid %d\n", asid);
return rc;
}
}
return 0;
}
static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base)
{
u32 status, timeout_usec;
int rc;
if (hdev->pldm || !hdev->pdev)
timeout_usec = GAUDI2_PLDM_MMU_TIMEOUT_USEC;
else
timeout_usec = GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC;
WREG32(stlb_base + STLB_INV_ALL_START_OFFSET, 1);
rc = hl_poll_timeout(
hdev,
stlb_base + STLB_SRAM_INIT_OFFSET,
status,
!status,
1000,
timeout_usec);
if (rc)
dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU SRAM init\n");
rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base);
if (rc)
return rc;
WREG32(mmu_base + MMU_BYPASS_OFFSET, 0);
rc = hl_poll_timeout(
hdev,
stlb_base + STLB_INV_ALL_START_OFFSET,
status,
!status,
1000,
timeout_usec);
if (rc)
dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU invalidate all\n");
WREG32(mmu_base + MMU_ENABLE_OFFSET, 1);
return rc;
}
static int gaudi2_pci_mmu_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 mmu_base, stlb_base;
int rc;
if (gaudi2->hw_cap_initialized & HW_CAP_PMMU)
return 0;
mmu_base = mmPMMU_HBW_MMU_BASE;
stlb_base = mmPMMU_HBW_STLB_BASE;
RMWREG32_SHIFTED(stlb_base + STLB_HOP_CONFIGURATION_OFFSET,
(0 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT) |
(5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_SHIFT) |
(4 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_SHIFT) |
(5 << PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT) |
(5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_SHIFT),
PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK |
PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK |
PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK |
PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_MASK |
PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK);
WREG32(stlb_base + STLB_LL_LOOKUP_MASK_63_32_OFFSET, 0);
if (PAGE_SIZE == SZ_64K) {
/* Set page sizes to 64K on hop5 and 16M on hop4 + enable 8 bit hops */
RMWREG32_SHIFTED(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET,
FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK, 4) |
FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK, 3) |
FIELD_PREP(
DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK,
1),
DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK |
DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK |
DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK);
}
WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_PMMU_SPI_SEI_ENABLE_MASK);
rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
if (rc)
return rc;
gaudi2->hw_cap_initialized |= HW_CAP_PMMU;
return 0;
}
static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id,
int hmmu_id)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 offset, mmu_base, stlb_base, hw_cap;
u8 dmmu_seq;
int rc;
dmmu_seq = NUM_OF_HMMU_PER_DCORE * dcore_id + hmmu_id;
hw_cap = HW_CAP_DCORE0_DMMU0 << dmmu_seq;
/*
* return if DMMU is already initialized or if it's not out of
* isolation (due to cluster binning)
*/
if ((gaudi2->hw_cap_initialized & hw_cap) || !(prop->hmmu_hif_enabled_mask & BIT(dmmu_seq)))
return 0;
offset = (u32) (dcore_id * DCORE_OFFSET + hmmu_id * DCORE_HMMU_OFFSET);
mmu_base = mmDCORE0_HMMU0_MMU_BASE + offset;
stlb_base = mmDCORE0_HMMU0_STLB_BASE + offset;
RMWREG32(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, 5 /* 64MB */,
MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK);
RMWREG32_SHIFTED(stlb_base + STLB_HOP_CONFIGURATION_OFFSET,
FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK, 0) |
FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK, 3) |
FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK, 3) |
FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK, 3) |
FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK, 3),
DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK |
DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK |
DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK |
DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK |
DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK);
RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, 1,
STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK);
WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_HMMU_SPI_SEI_ENABLE_MASK);
rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
if (rc)
return rc;
gaudi2->hw_cap_initialized |= hw_cap;
return 0;
}
static int gaudi2_hbm_mmu_init(struct hl_device *hdev)
{
int rc, dcore_id, hmmu_id;
for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++)
for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) {
rc = gaudi2_dcore_hmmu_init(hdev, dcore_id, hmmu_id);
if (rc)
return rc;
}
return 0;
}
static int gaudi2_mmu_init(struct hl_device *hdev)
{
int rc;
rc = gaudi2_pci_mmu_init(hdev);
if (rc)
return rc;
rc = gaudi2_hbm_mmu_init(hdev);
if (rc)
return rc;
return 0;
}
static int gaudi2_hw_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
/* Let's mark in the H/W that we have reached this point. We check
* this value in the reset_before_init function to understand whether
* we need to reset the chip before doing H/W init. This register is
* cleared by the H/W upon H/W reset
*/
WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
/* Perform read from the device to make sure device is up */
RREG32(mmHW_STATE);
/* If iATU is done by FW, the HBM bar ALWAYS points to DRAM_PHYS_BASE.
* So we set it here and if anyone tries to move it later to
* a different address, there will be an error
*/
if (hdev->asic_prop.iatu_done_by_fw)
gaudi2->dram_bar_cur_addr = DRAM_PHYS_BASE;
/*
* Before pushing u-boot/linux to device, need to set the hbm bar to
* base address of dram
*/
if (gaudi2_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
dev_err(hdev->dev, "failed to map HBM bar to DRAM base address\n");
return -EIO;
}
rc = gaudi2_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
return rc;
}
gaudi2_init_scrambler_hbm(hdev);
gaudi2_init_kdma(hdev);
rc = gaudi2_init_cpu_queues(hdev, GAUDI2_CPU_TIMEOUT_USEC);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", rc);
return rc;
}
rc = gaudi2->cpucp_info_get(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to get cpucp info\n");
return rc;
}
rc = gaudi2_mmu_init(hdev);
if (rc)
return rc;
gaudi2_init_pdma(hdev);
gaudi2_init_edma(hdev);
gaudi2_init_sm(hdev);
gaudi2_init_tpc(hdev);
gaudi2_init_mme(hdev);
gaudi2_init_rotator(hdev);
gaudi2_init_dec(hdev);
gaudi2_enable_timestamp(hdev);
rc = gaudi2_coresight_init(hdev);
if (rc)
goto disable_queues;
rc = gaudi2_enable_msix(hdev);
if (rc)
goto disable_queues;
/* Perform read from the device to flush all configuration */
RREG32(mmHW_STATE);
return 0;
disable_queues:
gaudi2_disable_dma_qmans(hdev);
gaudi2_disable_mme_qmans(hdev);
gaudi2_disable_tpc_qmans(hdev);
gaudi2_disable_rot_qmans(hdev);
gaudi2_disable_nic_qmans(hdev);
gaudi2_disable_timestamp(hdev);
return rc;
}
/**
* gaudi2_send_hard_reset_cmd - common function to handle reset
*
* @hdev: pointer to the habanalabs device structure
*
* This function handles the various possible scenarios for reset.
* It considers if reset is handled by driver\FW and what FW components are loaded
*/
static void gaudi2_send_hard_reset_cmd(struct hl_device *hdev)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
bool heartbeat_reset, preboot_only, cpu_initialized = false;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 cpu_boot_status;
preboot_only = (hdev->fw_loader.fw_comp_loaded == FW_TYPE_PREBOOT_CPU);
heartbeat_reset = (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT);
/*
* Handle corner case where failure was at cpu management app load,
* and driver didn't detect any failure while loading the FW,
* then at such scenario driver will send only HALT_MACHINE
* and no one will respond to this request since FW already back to preboot
* and it cannot handle such cmd.
* In this case next time the management app loads it'll check on events register
* which will still have the halt indication, and will reboot the device.
* The solution is to let preboot clear all relevant registers before next boot
* once driver send COMMS_RST_DEV.
*/
cpu_boot_status = RREG32(mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS);
if (gaudi2 && (gaudi2->hw_cap_initialized & HW_CAP_CPU) &&
(cpu_boot_status == CPU_BOOT_STATUS_SRAM_AVAIL))
cpu_initialized = true;
/*
* when Linux/Bootfit exist this write to the SP can be interpreted in 2 ways:
* 1. FW reset: FW initiate the reset sequence
* 2. driver reset: FW will start HALT sequence (the preparations for the
* reset but not the reset itself as it is not implemented
* on their part) and LKD will wait to let FW complete the
* sequence before issuing the reset
*/
if (!preboot_only && cpu_initialized) {
WREG32(le32_to_cpu(dyn_regs->gic_host_halt_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_HALT_MACHINE].cpu_id);
msleep(GAUDI2_CPU_RESET_WAIT_MSEC);
}
/*
* When working with preboot (without Linux/Boot fit) we can
* communicate only using the COMMS commands to issue halt/reset.
*
* For the case in which we are working with Linux/Bootfit this is a hail-mary
* attempt to revive the card in the small chance that the f/w has
* experienced a watchdog event, which caused it to return back to preboot.
* In that case, triggering reset through GIC won't help. We need to
* trigger the reset as if Linux wasn't loaded.
*
* We do it only if the reset cause was HB, because that would be the
* indication of such an event.
*
* In case watchdog hasn't expired but we still got HB, then this won't
* do any damage.
*/
if (heartbeat_reset || preboot_only || !cpu_initialized) {
if (hdev->asic_prop.hard_reset_done_by_fw)
hl_fw_ask_hard_reset_without_linux(hdev);
else
hl_fw_ask_halt_machine_without_linux(hdev);
}
}
/**
* gaudi2_execute_hard_reset - execute hard reset by driver/FW
*
* @hdev: pointer to the habanalabs device structure
*
* This function executes hard reset based on if driver/FW should do the reset
*/
static void gaudi2_execute_hard_reset(struct hl_device *hdev)
{
if (hdev->asic_prop.hard_reset_done_by_fw) {
gaudi2_send_hard_reset_cmd(hdev);
return;
}
/* Set device to handle FLR by H/W as we will put the device
* CPU to halt mode
*/
WREG32(mmPCIE_AUX_FLR_CTRL,
(PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
gaudi2_send_hard_reset_cmd(hdev);
WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
}
static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
{
int i, rc = 0;
u32 reg_val;
for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
rc = hl_poll_timeout(
hdev,
mmCPU_RST_STATUS_TO_HOST,
reg_val,
reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
1000,
poll_timeout_us);
if (rc)
dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
reg_val);
return rc;
}
/**
* gaudi2_execute_soft_reset - execute soft reset by driver/FW
*
* @hdev: pointer to the habanalabs device structure
* @driver_performs_reset: true if driver should perform reset instead of f/w.
* @poll_timeout_us: time to wait for response from f/w.
*
* This function executes soft reset based on if driver/FW should do the reset
*/
static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset,
u32 poll_timeout_us)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
int rc = 0;
if (!driver_performs_reset) {
if (hl_is_fw_sw_ver_below(hdev, 1, 10)) {
/* set SP to indicate reset request sent to FW */
if (dyn_regs->cpu_rst_status)
WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA);
else
WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
/* wait for f/w response */
rc = gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
} else {
rc = hl_fw_send_soft_reset(hdev);
}
return rc;
}
/* Block access to engines, QMANs and SM during reset, these
* RRs will be reconfigured after soft reset.
* PCIE_MSIX is left unsecured to allow NIC packets processing during the reset.
*/
gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, NUM_LONG_LBW_RR - 1,
mmDCORE0_TPC0_QM_DCCM_BASE, mmPCIE_MSIX_BASE);
gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, NUM_LONG_LBW_RR - 2,
mmPCIE_MSIX_BASE + HL_BLOCK_SIZE,
mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE + HL_BLOCK_SIZE);
WREG32(mmPSOC_RESET_CONF_SOFT_RST, 1);
return 0;
}
static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 poll_timeout_us)
{
int i, rc = 0;
u32 reg_val;
/* We poll the BTM done indication multiple times after reset due to
* a HW errata 'GAUDI2_0300'
*/
for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
rc = hl_poll_timeout(
hdev,
mmPSOC_GLOBAL_CONF_BTM_FSM,
reg_val,
reg_val == 0,
1000,
poll_timeout_us);
if (rc)
dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", reg_val);
}
static int gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 poll_timeout_us, reset_sleep_ms;
bool driver_performs_reset = false;
int rc;
if (hdev->pldm) {
reset_sleep_ms = hard_reset ? GAUDI2_PLDM_HRESET_TIMEOUT_MSEC :
GAUDI2_PLDM_SRESET_TIMEOUT_MSEC;
poll_timeout_us = GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC;
} else {
reset_sleep_ms = GAUDI2_RESET_TIMEOUT_MSEC;
poll_timeout_us = GAUDI2_RESET_POLL_TIMEOUT_USEC;
}
if (fw_reset)
goto skip_reset;
gaudi2_reset_arcs(hdev);
if (hard_reset) {
driver_performs_reset = !hdev->asic_prop.hard_reset_done_by_fw;
gaudi2_execute_hard_reset(hdev);
} else {
/*
* As we have to support also work with preboot only (which does not supports
* soft reset) we have to make sure that security is disabled before letting driver
* do the reset. user shall control the BFE flags to avoid asking soft reset in
* secured device with preboot only.
*/
driver_performs_reset = (hdev->fw_components == FW_TYPE_PREBOOT_CPU &&
!hdev->asic_prop.fw_security_enabled);
rc = gaudi2_execute_soft_reset(hdev, driver_performs_reset, poll_timeout_us);
if (rc)
return rc;
}
skip_reset:
if (driver_performs_reset || hard_reset) {
/*
* Instead of waiting for BTM indication we should wait for preboot ready:
* Consider the below scenario:
* 1. FW update is being triggered
* - setting the dirty bit
* 2. hard reset will be triggered due to the dirty bit
* 3. FW initiates the reset:
* - dirty bit cleared
* - BTM indication cleared
* - preboot ready indication cleared
* 4. during hard reset:
* - BTM indication will be set
* - BIST test performed and another reset triggered
* 5. only after this reset the preboot will set the preboot ready
*
* when polling on BTM indication alone we can lose sync with FW while trying to
* communicate with FW that is during reset.
* to overcome this we will always wait to preboot ready indication
*/
/* without this sleep reset will not work */
msleep(reset_sleep_ms);
if (hdev->fw_components & FW_TYPE_PREBOOT_CPU)
hl_fw_wait_preboot_ready(hdev);
else
gaudi2_poll_btm_indication(hdev, poll_timeout_us);
}
if (!gaudi2)
return 0;
gaudi2->dec_hw_cap_initialized &= ~(HW_CAP_DEC_MASK);
gaudi2->tpc_hw_cap_initialized &= ~(HW_CAP_TPC_MASK);
/*
* Clear NIC capability mask in order for driver to re-configure
* NIC QMANs. NIC ports will not be re-configured during soft
* reset as we call gaudi2_nic_init only during hard reset
*/
gaudi2->nic_hw_cap_initialized &= ~(HW_CAP_NIC_MASK);
if (hard_reset) {
gaudi2->hw_cap_initialized &=
~(HW_CAP_DRAM | HW_CAP_CLK_GATE | HW_CAP_HBM_SCRAMBLER_MASK |
HW_CAP_PMMU | HW_CAP_CPU | HW_CAP_CPU_Q |
HW_CAP_SRAM_SCRAMBLER | HW_CAP_DMMU_MASK |
HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_KDMA |
HW_CAP_MME_MASK | HW_CAP_ROT_MASK);
memset(gaudi2->events_stat, 0, sizeof(gaudi2->events_stat));
} else {
gaudi2->hw_cap_initialized &=
~(HW_CAP_CLK_GATE | HW_CAP_HBM_SCRAMBLER_SW_RESET |
HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_MME_MASK |
HW_CAP_ROT_MASK);
}
return 0;
}
static int gaudi2_suspend(struct hl_device *hdev)
{
int rc;
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
return rc;
}
static int gaudi2_resume(struct hl_device *hdev)
{
return gaudi2_init_iatu(hdev);
}
static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
#else
rc = remap_pfn_range(vma, vma->vm_start,
virt_to_phys(cpu_addr) >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (rc)
dev_err(hdev->dev, "remap_pfn_range error %d", rc);
#endif
return rc;
}
static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 hw_cap_mask = 0;
u64 hw_tpc_cap_bit = 0;
u64 hw_nic_cap_bit = 0;
u64 hw_test_cap_bit = 0;
switch (hw_queue_id) {
case GAUDI2_QUEUE_ID_PDMA_0_0:
case GAUDI2_QUEUE_ID_PDMA_0_1:
case GAUDI2_QUEUE_ID_PDMA_1_0:
hw_cap_mask = HW_CAP_PDMA_MASK;
break;
case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
hw_test_cap_bit = HW_CAP_EDMA_SHIFT +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3:
hw_test_cap_bit = HW_CAP_EDMA_SHIFT + NUM_OF_EDMA_PER_DCORE +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3:
hw_test_cap_bit = HW_CAP_EDMA_SHIFT + 2 * NUM_OF_EDMA_PER_DCORE +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3:
hw_test_cap_bit = HW_CAP_EDMA_SHIFT + 3 * NUM_OF_EDMA_PER_DCORE +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE0_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE0_MME_0_3:
hw_test_cap_bit = HW_CAP_MME_SHIFT;
break;
case GAUDI2_QUEUE_ID_DCORE1_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE1_MME_0_3:
hw_test_cap_bit = HW_CAP_MME_SHIFT + 1;
break;
case GAUDI2_QUEUE_ID_DCORE2_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE2_MME_0_3:
hw_test_cap_bit = HW_CAP_MME_SHIFT + 2;
break;
case GAUDI2_QUEUE_ID_DCORE3_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE3_MME_0_3:
hw_test_cap_bit = HW_CAP_MME_SHIFT + 3;
break;
case GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_5_3:
hw_tpc_cap_bit = HW_CAP_TPC_SHIFT +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_TPC_0_0) >> 2);
/* special case where cap bit refers to the first queue id */
if (!hw_tpc_cap_bit)
return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(0));
break;
case GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE1_TPC_5_3:
hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + NUM_OF_TPC_PER_DCORE +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE1_TPC_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE2_TPC_5_3:
hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (2 * NUM_OF_TPC_PER_DCORE) +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE2_TPC_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE3_TPC_5_3:
hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (3 * NUM_OF_TPC_PER_DCORE) +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE3_TPC_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_DCORE0_TPC_6_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_6_3:
hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (4 * NUM_OF_TPC_PER_DCORE);
break;
case GAUDI2_QUEUE_ID_ROT_0_0 ... GAUDI2_QUEUE_ID_ROT_1_3:
hw_test_cap_bit = HW_CAP_ROT_SHIFT + ((hw_queue_id - GAUDI2_QUEUE_ID_ROT_0_0) >> 2);
break;
case GAUDI2_QUEUE_ID_NIC_0_0 ... GAUDI2_QUEUE_ID_NIC_23_3:
hw_nic_cap_bit = HW_CAP_NIC_SHIFT + ((hw_queue_id - GAUDI2_QUEUE_ID_NIC_0_0) >> 2);
/* special case where cap bit refers to the first queue id */
if (!hw_nic_cap_bit)
return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(0));
break;
case GAUDI2_QUEUE_ID_CPU_PQ:
return !!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q);
default:
return false;
}
if (hw_tpc_cap_bit)
return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(hw_tpc_cap_bit));
if (hw_nic_cap_bit)
return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(hw_nic_cap_bit));
if (hw_test_cap_bit)
hw_cap_mask = BIT_ULL(hw_test_cap_bit);
return !!(gaudi2->hw_cap_initialized & hw_cap_mask);
}
static bool gaudi2_is_arc_enabled(struct hl_device *hdev, u64 arc_id)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
switch (arc_id) {
case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5:
case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1:
return !!(gaudi2->active_hw_arc & BIT_ULL(arc_id));
case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
return !!(gaudi2->active_tpc_arc & BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0));
case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
return !!(gaudi2->active_nic_arc & BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0));
default:
return false;
}
}
static void gaudi2_clr_arc_id_cap(struct hl_device *hdev, u64 arc_id)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
switch (arc_id) {
case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5:
case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1:
gaudi2->active_hw_arc &= ~(BIT_ULL(arc_id));
break;
case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
gaudi2->active_tpc_arc &= ~(BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0));
break;
case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
gaudi2->active_nic_arc &= ~(BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0));
break;
default:
return;
}
}
static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
switch (arc_id) {
case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5:
case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1:
gaudi2->active_hw_arc |= BIT_ULL(arc_id);
break;
case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24:
gaudi2->active_tpc_arc |= BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0);
break;
case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23:
gaudi2->active_nic_arc |= BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0);
break;
default:
return;
}
}
static void gaudi2_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
{
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 pq_offset, reg_base, db_reg_offset, db_value;
if (hw_queue_id != GAUDI2_QUEUE_ID_CPU_PQ) {
/*
* QMAN has 4 successive PQ_PI registers, 1 for each of the QMAN PQs.
* Masking the H/W queue ID with 0x3 extracts the QMAN internal PQ
* number.
*/
pq_offset = (hw_queue_id & 0x3) * 4;
reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
db_reg_offset = reg_base + QM_PQ_PI_0_OFFSET + pq_offset;
} else {
db_reg_offset = mmCPU_IF_PF_PQ_PI;
}
db_value = pi;
/* ring the doorbell */
WREG32(db_reg_offset, db_value);
if (hw_queue_id == GAUDI2_QUEUE_ID_CPU_PQ) {
/* make sure device CPU will read latest data from host */
mb();
WREG32(le32_to_cpu(dyn_regs->gic_host_pi_upd_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_PI_UPDATE].cpu_id);
}
}
static void gaudi2_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
{
__le64 *pbd = (__le64 *) bd;
/* The QMANs are on the host memory so a simple copy suffice */
pqe[0] = pbd[0];
pqe[1] = pbd[1];
}
static void *gaudi2_dma_alloc_coherent(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
}
static void gaudi2_dma_free_coherent(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
}
static int gaudi2_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
u32 timeout, u64 *result)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) {
if (result)
*result = 0;
return 0;
}
if (!timeout)
timeout = GAUDI2_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GAUDI2_QUEUE_ID_CPU_PQ, msg, len, timeout, result);
}
static void *gaudi2_dma_pool_zalloc(struct hl_device *hdev, size_t size,
gfp_t mem_flags, dma_addr_t *dma_handle)
{
if (size > GAUDI2_DMA_POOL_BLK_SIZE)
return NULL;
return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
}
static void gaudi2_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
{
dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
}
static void *gaudi2_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle)
{
return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
}
static void gaudi2_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
{
hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
}
static dma_addr_t gaudi2_dma_map_single(struct hl_device *hdev, void *addr, int len,
enum dma_data_direction dir)
{
dma_addr_t dma_addr;
dma_addr = dma_map_single(&hdev->pdev->dev, addr, len, dir);
if (unlikely(dma_mapping_error(&hdev->pdev->dev, dma_addr)))
return 0;
return dma_addr;
}
static void gaudi2_dma_unmap_single(struct hl_device *hdev, dma_addr_t addr, int len,
enum dma_data_direction dir)
{
dma_unmap_single(&hdev->pdev->dev, addr, len, dir);
}
static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
return -EINVAL;
}
/* Just check if CB address is valid */
if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->sram_user_base_address,
asic_prop->sram_end_address))
return 0;
if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->dram_user_base_address,
asic_prop->dram_end_address))
return 0;
if ((gaudi2->hw_cap_initialized & HW_CAP_DMMU_MASK) &&
hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->dmmu.start_addr,
asic_prop->dmmu.end_addr))
return 0;
if (gaudi2->hw_cap_initialized & HW_CAP_PMMU) {
if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->pmmu.start_addr,
asic_prop->pmmu.end_addr) ||
hl_mem_area_inside_range(
(u64) (uintptr_t) parser->user_cb,
parser->user_cb_size,
asic_prop->pmmu_huge.start_addr,
asic_prop->pmmu_huge.end_addr))
return 0;
} else if (gaudi2_host_phys_addr_valid((u64) (uintptr_t) parser->user_cb)) {
if (!hdev->pdev)
return 0;
if (!device_iommu_mapped(&hdev->pdev->dev))
return 0;
}
dev_err(hdev->dev, "CB address %p + 0x%x for internal QMAN is not valid\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
}
static int gaudi2_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!parser->is_kernel_allocated_cb)
return gaudi2_validate_cb_address(hdev, parser);
if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU)) {
dev_err(hdev->dev, "PMMU not initialized - Unsupported mode in Gaudi2\n");
return -EINVAL;
}
return 0;
}
static int gaudi2_send_heartbeat(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_send_heartbeat(hdev);
}
/* This is an internal helper function, used to update the KDMA mmu props.
* Should be called with a proper kdma lock.
*/
static void gaudi2_kdma_set_mmbp_asid(struct hl_device *hdev,
bool mmu_bypass, u32 asid)
{
u32 rw_asid, rw_mmu_bp;
rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) |
(asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT);
rw_mmu_bp = (!!mmu_bypass << ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_SHIFT) |
(!!mmu_bypass << ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_SHIFT);
WREG32(mmARC_FARM_KDMA_CTX_AXUSER_HB_ASID, rw_asid);
WREG32(mmARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP, rw_mmu_bp);
}
static void gaudi2_arm_cq_monitor(struct hl_device *hdev, u32 sob_id, u32 mon_id, u32 cq_id,
u32 mon_payload, u32 sync_value)
{
u32 sob_offset, mon_offset, sync_group_id, mode, mon_arm;
u8 mask;
sob_offset = sob_id * 4;
mon_offset = mon_id * 4;
/* Reset the SOB value */
WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0);
/* Configure this address with CQ_ID 0 because CQ_EN is set */
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, cq_id);
/* Configure this address with CS index because CQ_EN is set */
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, mon_payload);
sync_group_id = sob_id / 8;
mask = ~(1 << (sob_id & 0x7));
mode = 1; /* comparison mode is "equal to" */
mon_arm = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK, sync_value);
mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK, mode);
mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK, mask);
mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK, sync_group_id);
WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + mon_offset, mon_arm);
}
/* This is an internal helper function used by gaudi2_send_job_to_kdma only */
static int gaudi2_send_job_to_kdma(struct hl_device *hdev,
u64 src_addr, u64 dst_addr,
u32 size, bool is_memset)
{
u32 comp_val, commit_mask, *polling_addr, timeout, status = 0;
struct hl_cq_entry *cq_base;
struct hl_cq *cq;
u64 comp_addr;
int rc;
gaudi2_arm_cq_monitor(hdev, GAUDI2_RESERVED_SOB_KDMA_COMPLETION,
GAUDI2_RESERVED_MON_KDMA_COMPLETION,
GAUDI2_RESERVED_CQ_KDMA_COMPLETION, 1, 1);
comp_addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 +
(GAUDI2_RESERVED_SOB_KDMA_COMPLETION * sizeof(u32));
comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) |
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1);
WREG32(mmARC_FARM_KDMA_CTX_SRC_BASE_LO, lower_32_bits(src_addr));
WREG32(mmARC_FARM_KDMA_CTX_SRC_BASE_HI, upper_32_bits(src_addr));
WREG32(mmARC_FARM_KDMA_CTX_DST_BASE_LO, lower_32_bits(dst_addr));
WREG32(mmARC_FARM_KDMA_CTX_DST_BASE_HI, upper_32_bits(dst_addr));
WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO, lower_32_bits(comp_addr));
WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI, upper_32_bits(comp_addr));
WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_WDATA, comp_val);
WREG32(mmARC_FARM_KDMA_CTX_DST_TSIZE_0, size);
commit_mask = FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK, 1) |
FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK, 1);
if (is_memset)
commit_mask |= FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK, 1);
WREG32(mmARC_FARM_KDMA_CTX_COMMIT, commit_mask);
/* Wait for completion */
cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_KDMA_COMPLETION];
cq_base = cq->kernel_address;
polling_addr = (u32 *)&cq_base[cq->ci];
if (hdev->pldm)
/* for each 1MB 20 second of timeout */
timeout = ((size / SZ_1M) + 1) * USEC_PER_SEC * 20;
else
timeout = KDMA_TIMEOUT_USEC;
/* Polling */
rc = hl_poll_timeout_memory(
hdev,
polling_addr,
status,
(status == 1),
1000,
timeout,
true);
*polling_addr = 0;
if (rc) {
dev_err(hdev->dev, "Timeout while waiting for KDMA to be idle\n");
WREG32(mmARC_FARM_KDMA_CFG_1, 1 << ARC_FARM_KDMA_CFG_1_HALT_SHIFT);
return rc;
}
cq->ci = hl_cq_inc_ptr(cq->ci);
return 0;
}
static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val)
{
u32 i;
for (i = 0 ; i < size ; i += sizeof(u32))
WREG32(addr + i, val);
}
static void gaudi2_qman_set_test_mode(struct hl_device *hdev, u32 hw_queue_id, bool enable)
{
u32 reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
if (enable) {
WREG32(reg_base + QM_GLBL_PROT_OFFSET, QMAN_MAKE_TRUSTED_TEST_MODE);
WREG32(reg_base + QM_PQC_CFG_OFFSET, 0);
} else {
WREG32(reg_base + QM_GLBL_PROT_OFFSET, QMAN_MAKE_TRUSTED);
WREG32(reg_base + QM_PQC_CFG_OFFSET, 1 << PDMA0_QM_PQC_CFG_EN_SHIFT);
}
}
static inline u32 gaudi2_test_queue_hw_queue_id_to_sob_id(struct hl_device *hdev, u32 hw_queue_id)
{
return hdev->asic_prop.first_available_user_sob[0] +
hw_queue_id - GAUDI2_QUEUE_ID_PDMA_0_0;
}
static void gaudi2_test_queue_clear(struct hl_device *hdev, u32 hw_queue_id)
{
u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4;
u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
/* Reset the SOB value */
WREG32(sob_addr, 0);
}
static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue_id, u32 sob_val,
struct gaudi2_queues_test_info *msg_info)
{
u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4;
u32 tmp, sob_base = 1;
struct packet_msg_short *msg_short_pkt = msg_info->kern_addr;
size_t pkt_size = sizeof(struct packet_msg_short);
int rc;
tmp = (PACKET_MSG_SHORT << GAUDI2_PKT_CTL_OPCODE_SHIFT) |
(1 << GAUDI2_PKT_CTL_EB_SHIFT) |
(1 << GAUDI2_PKT_CTL_MB_SHIFT) |
(sob_base << GAUDI2_PKT_SHORT_CTL_BASE_SHIFT) |
(sob_offset << GAUDI2_PKT_SHORT_CTL_ADDR_SHIFT);
msg_short_pkt->value = cpu_to_le32(sob_val);
msg_short_pkt->ctl = cpu_to_le32(tmp);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
"Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
return rc;
}
static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queue_id, u32 sob_val)
{
u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4;
u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
u32 timeout_usec, tmp;
int rc;
if (hdev->pldm)
timeout_usec = GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC;
else
timeout_usec = GAUDI2_TEST_QUEUE_WAIT_USEC;
rc = hl_poll_timeout(
hdev,
sob_addr,
tmp,
(tmp == sob_val),
1000,
timeout_usec);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
hw_queue_id, tmp);
rc = -EIO;
}
return rc;
}
static int gaudi2_test_cpu_queue(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
/*
* check capability here as send_cpu_message() won't update the result
* value if no capability
*/
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_test_cpu_queue(hdev);
}
static int gaudi2_test_queues(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_queues_test_info *msg_info;
u32 sob_val = 0x5a5a;
int i, rc;
/* send test message on all enabled Qs */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
if (!gaudi2_is_queue_enabled(hdev, i))
continue;
msg_info = &gaudi2->queues_test_info[i - GAUDI2_QUEUE_ID_PDMA_0_0];
gaudi2_qman_set_test_mode(hdev, i, true);
gaudi2_test_queue_clear(hdev, i);
rc = gaudi2_test_queue_send_msg_short(hdev, i, sob_val, msg_info);
if (rc)
goto done;
}
rc = gaudi2_test_cpu_queue(hdev);
if (rc)
goto done;
/* verify that all messages were processed */
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) {
if (!gaudi2_is_queue_enabled(hdev, i))
continue;
rc = gaudi2_test_queue_wait_completion(hdev, i, sob_val);
if (rc)
/* chip is not usable, no need for cleanups, just bail-out with error */
goto done;
gaudi2_test_queue_clear(hdev, i);
gaudi2_qman_set_test_mode(hdev, i, false);
}
done:
return rc;
}
static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
size_t irq_arr_size;
int rc;
gaudi2_init_arcs(hdev);
rc = gaudi2_scrub_arcs_dccm(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to scrub arcs DCCM\n");
return rc;
}
gaudi2_init_security(hdev);
/* Unmask all IRQs since some could have been received during the soft reset */
irq_arr_size = gaudi2->num_of_valid_hw_events * sizeof(gaudi2->hw_events[0]);
return hl_fw_unmask_irq_arr(hdev, gaudi2->hw_events, irq_arr_size);
}
static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
unsigned long *mask = (unsigned long *) mask_arr;
const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#-15x%#x\n";
bool is_idle = true, is_eng_idle;
int engine_idx, i, j;
u64 offset;
if (e)
hl_engine_data_sprintf(e,
"\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n"
"---- ---- ------- ------------ ------------- -------------\n");
for (i = 0; i < NUM_OF_DCORES; i++) {
for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) {
int seq = i * NUM_OF_EDMA_PER_DCORE + j;
if (!(prop->edma_enabled_mask & BIT(seq)))
continue;
engine_idx = GAUDI2_DCORE0_ENGINE_ID_EDMA_0 +
i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j;
offset = i * DCORE_OFFSET + j * DCORE_EDMA_OFFSET;
dma_core_sts0 = RREG32(mmDCORE0_EDMA0_CORE_STS0 + offset);
dma_core_sts1 = RREG32(mmDCORE0_EDMA0_CORE_STS1 + offset);
qm_glbl_sts0 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmDCORE0_EDMA0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
hl_engine_data_sprintf(e, edma_fmt, i, j, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0, dma_core_sts1);
}
}
return is_idle;
}
static bool gaudi2_get_pdma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1;
unsigned long *mask = (unsigned long *) mask_arr;
const char *pdma_fmt = "%-6d%-9s%#-14x%#-15x%#x\n";
bool is_idle = true, is_eng_idle;
int engine_idx, i;
u64 offset;
if (e)
hl_engine_data_sprintf(e,
"\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n"
"---- ------- ------------ ------------- -------------\n");
for (i = 0 ; i < NUM_OF_PDMA ; i++) {
engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i;
offset = i * PDMA_OFFSET;
dma_core_sts0 = RREG32(mmPDMA0_CORE_STS0 + offset);
dma_core_sts1 = RREG32(mmPDMA0_CORE_STS1 + offset);
qm_glbl_sts0 = RREG32(mmPDMA0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmPDMA0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmPDMA0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
hl_engine_data_sprintf(e, pdma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0, dma_core_sts1);
}
return is_idle;
}
static bool gaudi2_get_nic_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
unsigned long *mask = (unsigned long *) mask_arr;
const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n";
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
bool is_idle = true, is_eng_idle;
int engine_idx, i;
u64 offset = 0;
/* NIC, twelve macros in Full chip */
if (e && hdev->nic_ports_mask)
hl_engine_data_sprintf(e,
"\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
"--- ------- ------------ ----------\n");
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
if (!(i & 1))
offset = i / 2 * NIC_OFFSET;
else
offset += NIC_QM_OFFSET;
if (!(hdev->nic_ports_mask & BIT(i)))
continue;
engine_idx = GAUDI2_ENGINE_ID_NIC0_0 + i;
qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmNIC0_QM0_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
hl_engine_data_sprintf(e, nic_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
return is_idle;
}
static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, mme_arch_sts;
unsigned long *mask = (unsigned long *) mask_arr;
const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n";
bool is_idle = true, is_eng_idle;
int engine_idx, i;
u64 offset;
if (e)
hl_engine_data_sprintf(e,
"\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
"--- ---- ------- ------------ ---------------\n");
/* MME, one per Dcore */
for (i = 0 ; i < NUM_OF_DCORES ; i++) {
engine_idx = GAUDI2_DCORE0_ENGINE_ID_MME + i * GAUDI2_ENGINE_ID_DCORE_OFFSET;
offset = i * DCORE_OFFSET;
qm_glbl_sts0 = RREG32(mmDCORE0_MME_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmDCORE0_MME_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmDCORE0_MME_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
is_idle &= is_eng_idle;
mme_arch_sts = RREG32(mmDCORE0_MME_CTRL_LO_ARCH_STATUS + offset);
is_eng_idle &= IS_MME_IDLE(mme_arch_sts);
is_idle &= is_eng_idle;
if (e)
hl_engine_data_sprintf(e, mme_fmt, i, "N",
is_eng_idle ? "Y" : "N",
qm_glbl_sts0,
mme_arch_sts);
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
}
return is_idle;
}
static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
struct iterate_module_ctx *ctx)
{
struct gaudi2_tpc_idle_data *idle_data = ctx->data;
u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
bool is_eng_idle;
int engine_idx;
if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1)))
engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6;
else
engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 +
dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst;
tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset);
qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) &&
IS_TPC_IDLE(tpc_cfg_sts);
*(idle_data->is_idle) &= is_eng_idle;
if (idle_data->mask && !is_eng_idle)
set_bit(engine_idx, idle_data->mask);
if (idle_data->e)
hl_engine_data_sprintf(idle_data->e,
idle_data->tpc_fmt, dcore, inst,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
}
static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
unsigned long *mask = (unsigned long *) mask_arr;
bool is_idle = true;
struct gaudi2_tpc_idle_data tpc_idle_data = {
.tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
.e = e,
.mask = mask,
.is_idle = &is_idle,
};
struct iterate_module_ctx tpc_iter = {
.fn = &gaudi2_is_tpc_engine_idle,
.data = &tpc_idle_data,
};
if (e && prop->tpc_enabled_mask)
hl_engine_data_sprintf(e,
"\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS STATUS\n"
"---- --- ------- ------------ ---------- ------\n");
gaudi2_iterate_tpcs(hdev, &tpc_iter);
return *tpc_idle_data.is_idle;
}
static bool gaudi2_get_decoder_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
unsigned long *mask = (unsigned long *) mask_arr;
const char *pcie_dec_fmt = "%-10d%-9s%#x\n";
const char *dec_fmt = "%-6d%-5d%-9s%#x\n";
bool is_idle = true, is_eng_idle;
u32 dec_swreg15, dec_enabled_bit;
int engine_idx, i, j;
u64 offset;
/* Decoders, two each Dcore and two shared PCIe decoders */
if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
hl_engine_data_sprintf(e,
"\nCORE DEC is_idle VSI_CMD_SWREG15\n"
"---- --- ------- ---------------\n");
for (i = 0 ; i < NUM_OF_DCORES ; i++) {
for (j = 0 ; j < NUM_OF_DEC_PER_DCORE ; j++) {
dec_enabled_bit = 1 << (i * NUM_OF_DEC_PER_DCORE + j);
if (!(prop->decoder_enabled_mask & dec_enabled_bit))
continue;
engine_idx = GAUDI2_DCORE0_ENGINE_ID_DEC_0 +
i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j;
offset = i * DCORE_OFFSET + j * DCORE_DEC_OFFSET;
dec_swreg15 = RREG32(mmDCORE0_DEC0_CMD_SWREG15 + offset);
is_eng_idle = IS_DEC_IDLE(dec_swreg15);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
hl_engine_data_sprintf(e, dec_fmt, i, j,
is_eng_idle ? "Y" : "N", dec_swreg15);
}
}
if (e && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
hl_engine_data_sprintf(e,
"\nPCIe DEC is_idle VSI_CMD_SWREG15\n"
"-------- ------- ---------------\n");
/* Check shared(PCIe) decoders */
for (i = 0 ; i < NUM_OF_DEC_PER_DCORE ; i++) {
dec_enabled_bit = PCIE_DEC_SHIFT + i;
if (!(prop->decoder_enabled_mask & BIT(dec_enabled_bit)))
continue;
engine_idx = GAUDI2_PCIE_ENGINE_ID_DEC_0 + i;
offset = i * DCORE_DEC_OFFSET;
dec_swreg15 = RREG32(mmPCIE_DEC0_CMD_SWREG15 + offset);
is_eng_idle = IS_DEC_IDLE(dec_swreg15);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
hl_engine_data_sprintf(e, pcie_dec_fmt, i,
is_eng_idle ? "Y" : "N", dec_swreg15);
}
return is_idle;
}
static bool gaudi2_get_rotator_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-14x%#x\n";
unsigned long *mask = (unsigned long *) mask_arr;
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
bool is_idle = true, is_eng_idle;
int engine_idx, i;
u64 offset;
if (e)
hl_engine_data_sprintf(e,
"\nCORE ROT is_idle QM_GLBL_STS0 QM_GLBL_STS1 QM_CGM_STS\n"
"---- --- ------- ------------ ------------ ----------\n");
for (i = 0 ; i < NUM_OF_ROT ; i++) {
engine_idx = GAUDI2_ENGINE_ID_ROT_0 + i;
offset = i * ROT_OFFSET;
qm_glbl_sts0 = RREG32(mmROT0_QM_GLBL_STS0 + offset);
qm_glbl_sts1 = RREG32(mmROT0_QM_GLBL_STS1 + offset);
qm_cgm_sts = RREG32(mmROT0_QM_CGM_STS + offset);
is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
is_idle &= is_eng_idle;
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
if (e)
hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts);
}
return is_idle;
}
static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
struct engines_data *e)
{
bool is_idle = true;
is_idle &= gaudi2_get_edma_idle_status(hdev, mask_arr, mask_len, e);
is_idle &= gaudi2_get_pdma_idle_status(hdev, mask_arr, mask_len, e);
is_idle &= gaudi2_get_nic_idle_status(hdev, mask_arr, mask_len, e);
is_idle &= gaudi2_get_mme_idle_status(hdev, mask_arr, mask_len, e);
is_idle &= gaudi2_get_tpc_idle_status(hdev, mask_arr, mask_len, e);
is_idle &= gaudi2_get_decoder_idle_status(hdev, mask_arr, mask_len, e);
is_idle &= gaudi2_get_rotator_idle_status(hdev, mask_arr, mask_len, e);
return is_idle;
}
static void gaudi2_hw_queues_lock(struct hl_device *hdev)
__acquires(&gaudi2->hw_queues_lock)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
spin_lock(&gaudi2->hw_queues_lock);
}
static void gaudi2_hw_queues_unlock(struct hl_device *hdev)
__releases(&gaudi2->hw_queues_lock)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
spin_unlock(&gaudi2->hw_queues_lock);
}
static u32 gaudi2_get_pci_id(struct hl_device *hdev)
{
return hdev->pdev->device;
}
static int gaudi2_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_get_eeprom_data(hdev, data, max_size);
}
static void gaudi2_update_eq_ci(struct hl_device *hdev, u32 val)
{
WREG32(mmCPU_IF_EQ_RD_OFFS, val);
}
static void *gaudi2_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (aggregate) {
*size = (u32) sizeof(gaudi2->events_stat_aggregate);
return gaudi2->events_stat_aggregate;
}
*size = (u32) sizeof(gaudi2->events_stat);
return gaudi2->events_stat;
}
static void gaudi2_mmu_vdec_dcore_prepare(struct hl_device *hdev, int dcore_id,
int dcore_vdec_id, u32 rw_asid, u32 rw_mmu_bp)
{
u32 offset = (mmDCORE0_VDEC1_BRDG_CTRL_BASE - mmDCORE0_VDEC0_BRDG_CTRL_BASE) *
dcore_vdec_id + DCORE_OFFSET * dcore_id;
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID + offset, rw_asid);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID + offset, rw_asid);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID + offset, rw_asid);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID + offset, rw_asid);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID + offset, rw_asid);
}
static void gaudi2_mmu_dcore_prepare(struct hl_device *hdev, int dcore_id, u32 asid)
{
u32 rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) |
(asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT);
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 dcore_offset = dcore_id * DCORE_OFFSET;
u32 vdec_id, i, ports_offset, reg_val;
u8 edma_seq_base;
/* EDMA */
edma_seq_base = dcore_id * NUM_OF_EDMA_PER_DCORE;
if (prop->edma_enabled_mask & BIT(edma_seq_base)) {
WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0);
WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid);
WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0);
WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_ASID + dcore_offset, rw_asid);
}
if (prop->edma_enabled_mask & BIT(edma_seq_base + 1)) {
WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0);
WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid);
WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_ASID + dcore_offset, rw_asid);
WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0);
}
/* Sync Mngr */
WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV + dcore_offset, asid);
/*
* Sync Mngrs on dcores 1 - 3 are exposed to user, so must use user ASID
* for any access type
*/
if (dcore_id > 0) {
reg_val = (asid << DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_RD_SHIFT) |
(asid << DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_WR_SHIFT);
WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID + dcore_offset, reg_val);
WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP + dcore_offset, 0);
}
WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_MMU_BP + dcore_offset, 0);
WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_ASID + dcore_offset, rw_asid);
for (i = 0 ; i < NUM_OF_MME_SBTE_PORTS ; i++) {
ports_offset = i * DCORE_MME_SBTE_OFFSET;
WREG32(mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_MMU_BP +
dcore_offset + ports_offset, 0);
WREG32(mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_ASID +
dcore_offset + ports_offset, rw_asid);
}
for (i = 0 ; i < NUM_OF_MME_WB_PORTS ; i++) {
ports_offset = i * DCORE_MME_WB_OFFSET;
WREG32(mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_MMU_BP +
dcore_offset + ports_offset, 0);
WREG32(mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_ASID +
dcore_offset + ports_offset, rw_asid);
}
WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0);
WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid);
/*
* Decoders
*/
for (vdec_id = 0 ; vdec_id < NUM_OF_DEC_PER_DCORE ; vdec_id++) {
if (prop->decoder_enabled_mask & BIT(dcore_id * NUM_OF_DEC_PER_DCORE + vdec_id))
gaudi2_mmu_vdec_dcore_prepare(hdev, dcore_id, vdec_id, rw_asid, 0);
}
}
static void gudi2_mmu_vdec_shared_prepare(struct hl_device *hdev,
int shared_vdec_id, u32 rw_asid, u32 rw_mmu_bp)
{
u32 offset = (mmPCIE_VDEC1_BRDG_CTRL_BASE - mmPCIE_VDEC0_BRDG_CTRL_BASE) * shared_vdec_id;
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID + offset, rw_asid);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID + offset, rw_asid);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID + offset, rw_asid);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID + offset, rw_asid);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID + offset, rw_asid);
}
static void gudi2_mmu_arc_farm_arc_dup_eng_prepare(struct hl_device *hdev, int arc_farm_id,
u32 rw_asid, u32 rw_mmu_bp)
{
u32 offset = (mmARC_FARM_ARC1_DUP_ENG_BASE - mmARC_FARM_ARC0_DUP_ENG_BASE) * arc_farm_id;
WREG32(mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_MMU_BP + offset, rw_mmu_bp);
WREG32(mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_ASID + offset, rw_asid);
}
static void gaudi2_arc_mmu_prepare(struct hl_device *hdev, u32 cpu_id, u32 asid)
{
u32 reg_base, reg_offset, reg_val = 0;
reg_base = gaudi2_arc_blocks_bases[cpu_id];
/* Enable MMU and configure asid for all relevant ARC regions */
reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK, 0);
reg_val |= FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_MASK, asid);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION3_GENERAL);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION4_HBM0_FW);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION5_HBM1_GC_DATA);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION6_HBM2_GC_DATA);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION7_HBM3_GC_DATA);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION9_PCIE);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION10_GENERAL);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION11_GENERAL);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION12_GENERAL);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION13_GENERAL);
WREG32(reg_base + reg_offset, reg_val);
reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION14_GENERAL);
WREG32(reg_base + reg_offset, reg_val);
}
static int gaudi2_arc_mmu_prepare_all(struct hl_device *hdev, u32 asid)
{
int i;
if (hdev->fw_components & FW_TYPE_BOOT_CPU)
return hl_fw_cpucp_engine_core_asid_set(hdev, asid);
for (i = CPU_ID_SCHED_ARC0 ; i < NUM_OF_ARC_FARMS_ARC ; i++)
gaudi2_arc_mmu_prepare(hdev, i, asid);
for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i += 4) {
if (!gaudi2_is_queue_enabled(hdev, i))
continue;
gaudi2_arc_mmu_prepare(hdev, gaudi2_queue_id_to_arc_id[i], asid);
}
return 0;
}
static int gaudi2_mmu_shared_prepare(struct hl_device *hdev, u32 asid)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 rw_asid, offset;
int rc, i;
rw_asid = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_MASK, asid) |
FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_MASK, asid);
WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_ASID, rw_asid);
WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP, 0);
WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_ASID, rw_asid);
WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_MMU_BP, 0);
WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_ASID, rw_asid);
WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP, 0);
WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_ASID, rw_asid);
WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_MMU_BP, 0);
/* ROT */
for (i = 0 ; i < NUM_OF_ROT ; i++) {
offset = i * ROT_OFFSET;
WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_ASID + offset, rw_asid);
WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0);
RMWREG32(mmROT0_CPL_QUEUE_AWUSER + offset, asid, MMUBP_ASID_MASK);
RMWREG32(mmROT0_DESC_HBW_ARUSER_LO + offset, asid, MMUBP_ASID_MASK);
RMWREG32(mmROT0_DESC_HBW_AWUSER_LO + offset, asid, MMUBP_ASID_MASK);
}
/* Shared Decoders are the last bits in the decoders mask */
if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 0))
gudi2_mmu_vdec_shared_prepare(hdev, 0, rw_asid, 0);
if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 1))
gudi2_mmu_vdec_shared_prepare(hdev, 1, rw_asid, 0);
/* arc farm arc dup eng */
for (i = 0 ; i < NUM_OF_ARC_FARMS_ARC ; i++)
gudi2_mmu_arc_farm_arc_dup_eng_prepare(hdev, i, rw_asid, 0);
rc = gaudi2_arc_mmu_prepare_all(hdev, asid);
if (rc)
return rc;
return 0;
}
static void gaudi2_tpc_mmu_prepare(struct hl_device *hdev, int dcore, int inst, u32 offset,
struct iterate_module_ctx *ctx)
{
struct gaudi2_tpc_mmu_data *mmu_data = ctx->data;
WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0);
WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_ASID + offset, mmu_data->rw_asid);
WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0);
WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_ASID + offset, mmu_data->rw_asid);
}
/* zero the MMUBP and set the ASID */
static int gaudi2_mmu_prepare(struct hl_device *hdev, u32 asid)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct gaudi2_tpc_mmu_data tpc_mmu_data;
struct iterate_module_ctx tpc_iter = {
.fn = &gaudi2_tpc_mmu_prepare,
.data = &tpc_mmu_data,
};
int rc, i;
if (asid & ~DCORE0_HMMU0_STLB_ASID_ASID_MASK) {
dev_crit(hdev->dev, "asid %u is too big\n", asid);
return -EINVAL;
}
if (!(gaudi2->hw_cap_initialized & HW_CAP_MMU_MASK))
return 0;
rc = gaudi2_mmu_shared_prepare(hdev, asid);
if (rc)
return rc;
/* configure DCORE MMUs */
tpc_mmu_data.rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) |
(asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT);
gaudi2_iterate_tpcs(hdev, &tpc_iter);
for (i = 0 ; i < NUM_OF_DCORES ; i++)
gaudi2_mmu_dcore_prepare(hdev, i, asid);
return 0;
}
static inline bool is_info_event(u32 event)
{
switch (event) {
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S ... GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
/* return in case of NIC status event - these events are received periodically and not as
* an indication to an error.
*/
case GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 ... GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1:
return true;
default:
return false;
}
}
static void gaudi2_print_event(struct hl_device *hdev, u16 event_type,
bool ratelimited, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (ratelimited)
dev_err_ratelimited(hdev->dev, "%s: %pV\n",
gaudi2_irq_map_table[event_type].valid ?
gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf);
else
dev_err(hdev->dev, "%s: %pV\n",
gaudi2_irq_map_table[event_type].valid ?
gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf);
va_end(args);
}
static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
struct hl_eq_ecc_data *ecc_data)
{
u64 ecc_address = 0, ecc_syndrom = 0;
u8 memory_wrapper_idx = 0;
ecc_address = le64_to_cpu(ecc_data->ecc_address);
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
memory_wrapper_idx = ecc_data->memory_wrapper_idx;
gaudi2_print_event(hdev, event_type, !ecc_data->is_critical,
"ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.",
ecc_address, ecc_syndrom, memory_wrapper_idx, ecc_data->is_critical);
return !!ecc_data->is_critical;
}
static void print_lower_qman_data_on_err(struct hl_device *hdev, u64 qman_base)
{
u32 lo, hi, cq_ptr_size, arc_cq_ptr_size;
u64 cq_ptr, arc_cq_ptr, cp_current_inst;
lo = RREG32(qman_base + QM_CQ_PTR_LO_4_OFFSET);
hi = RREG32(qman_base + QM_CQ_PTR_HI_4_OFFSET);
cq_ptr = ((u64) hi) << 32 | lo;
cq_ptr_size = RREG32(qman_base + QM_CQ_TSIZE_4_OFFSET);
lo = RREG32(qman_base + QM_ARC_CQ_PTR_LO_OFFSET);
hi = RREG32(qman_base + QM_ARC_CQ_PTR_HI_OFFSET);
arc_cq_ptr = ((u64) hi) << 32 | lo;
arc_cq_ptr_size = RREG32(qman_base + QM_ARC_CQ_TSIZE_OFFSET);
lo = RREG32(qman_base + QM_CP_CURRENT_INST_LO_4_OFFSET);
hi = RREG32(qman_base + QM_CP_CURRENT_INST_HI_4_OFFSET);
cp_current_inst = ((u64) hi) << 32 | lo;
dev_info(hdev->dev,
"LowerQM. CQ: {ptr %#llx, size %u}, ARC_CQ: {ptr %#llx, size %u}, CP: {instruction %#llx}\n",
cq_ptr, cq_ptr_size, arc_cq_ptr, arc_cq_ptr_size, cp_current_inst);
}
static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type,
u64 qman_base, u32 qid_base)
{
u32 i, j, glbl_sts_val, arb_err_val, num_error_causes, error_count = 0;
u64 glbl_sts_addr, arb_err_addr;
char reg_desc[32];
glbl_sts_addr = qman_base + (mmDCORE0_TPC0_QM_GLBL_ERR_STS_0 - mmDCORE0_TPC0_QM_BASE);
arb_err_addr = qman_base + (mmDCORE0_TPC0_QM_ARB_ERR_CAUSE - mmDCORE0_TPC0_QM_BASE);
/* Iterate through all stream GLBL_ERR_STS registers + Lower CP */
for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
if (!glbl_sts_val)
continue;
if (i == QMAN_STREAMS) {
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerQM");
num_error_causes = GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE;
} else {
snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
num_error_causes = GAUDI2_NUM_OF_QM_ERR_CAUSE;
}
for (j = 0 ; j < num_error_causes ; j++)
if (glbl_sts_val & BIT(j)) {
gaudi2_print_event(hdev, event_type, true,
"%s. err cause: %s", reg_desc,
i == QMAN_STREAMS ?
gaudi2_lower_qman_error_cause[j] :
gaudi2_qman_error_cause[j]);
error_count++;
}
if (i == QMAN_STREAMS)
print_lower_qman_data_on_err(hdev, qman_base);
}
arb_err_val = RREG32(arb_err_addr);
if (!arb_err_val)
goto out;
for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
if (arb_err_val & BIT(j)) {
gaudi2_print_event(hdev, event_type, true,
"ARB_ERR. err cause: %s",
gaudi2_qman_arb_error_cause[j]);
error_count++;
}
}
out:
return error_count;
}
static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
u64 rtr_mstr_if_base_addr, bool is_write, char *name,
enum gaudi2_engine_id id, u64 *event_mask)
{
u32 razwi_hi, razwi_lo, razwi_xy;
u16 eng_id = id;
u8 rd_wr_flag;
if (is_write) {
razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI);
razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO);
razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY);
rd_wr_flag = HL_RAZWI_WRITE;
} else {
razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO);
razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY);
rd_wr_flag = HL_RAZWI_READ;
}
hl_handle_razwi(hdev, (u64)razwi_hi << 32 | razwi_lo, &eng_id, 1,
rd_wr_flag | HL_RAZWI_HBW, event_mask);
dev_err_ratelimited(hdev->dev,
"%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n",
name, is_write ? "WR" : "RD", (u64)razwi_hi << 32 | razwi_lo, razwi_xy);
}
static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev,
u64 rtr_mstr_if_base_addr, bool is_write, char *name,
enum gaudi2_engine_id id, u64 *event_mask)
{
u64 razwi_addr = CFG_BASE;
u32 razwi_xy;
u16 eng_id = id;
u8 rd_wr_flag;
if (is_write) {
razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI);
razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY);
rd_wr_flag = HL_RAZWI_WRITE;
} else {
razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI);
razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY);
rd_wr_flag = HL_RAZWI_READ;
}
hl_handle_razwi(hdev, razwi_addr, &eng_id, 1, rd_wr_flag | HL_RAZWI_LBW, event_mask);
dev_err_ratelimited(hdev->dev,
"%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%llX Initiator coordinates 0x%x\n",
name, is_write ? "WR" : "RD", rtr_mstr_if_base_addr, razwi_addr,
razwi_xy);
}
static enum gaudi2_engine_id gaudi2_razwi_calc_engine_id(struct hl_device *hdev,
enum razwi_event_sources module, u8 module_idx)
{
switch (module) {
case RAZWI_TPC:
if (module_idx == (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES))
return GAUDI2_DCORE0_ENGINE_ID_TPC_6;
return (((module_idx / NUM_OF_TPC_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) +
(module_idx % NUM_OF_TPC_PER_DCORE) +
(GAUDI2_DCORE0_ENGINE_ID_TPC_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0));
case RAZWI_MME:
return ((GAUDI2_DCORE0_ENGINE_ID_MME - GAUDI2_DCORE0_ENGINE_ID_EDMA_0) +
(module_idx * ENGINE_ID_DCORE_OFFSET));
case RAZWI_EDMA:
return (((module_idx / NUM_OF_EDMA_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) +
(module_idx % NUM_OF_EDMA_PER_DCORE));
case RAZWI_PDMA:
return (GAUDI2_ENGINE_ID_PDMA_0 + module_idx);
case RAZWI_NIC:
return (GAUDI2_ENGINE_ID_NIC0_0 + (NIC_NUMBER_OF_QM_PER_MACRO * module_idx));
case RAZWI_DEC:
if (module_idx == 8)
return GAUDI2_PCIE_ENGINE_ID_DEC_0;
if (module_idx == 9)
return GAUDI2_PCIE_ENGINE_ID_DEC_1;
;
return (((module_idx / NUM_OF_DEC_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) +
(module_idx % NUM_OF_DEC_PER_DCORE) +
(GAUDI2_DCORE0_ENGINE_ID_DEC_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0));
case RAZWI_ROT:
return GAUDI2_ENGINE_ID_ROT_0 + module_idx;
default:
return GAUDI2_ENGINE_ID_SIZE;
}
}
/*
* This function handles RR(Range register) hit events.
* raised be initiators not PSOC RAZWI.
*/
static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
enum razwi_event_sources module, u8 module_idx,
u8 module_sub_idx, u64 *event_mask)
{
bool via_sft = false;
u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id, binned_idx;
u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr;
u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0;
u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0;
char initiator_name[64];
switch (module) {
case RAZWI_TPC:
sprintf(initiator_name, "TPC_%u", module_idx);
if (hdev->tpc_binning) {
binned_idx = __ffs(hdev->tpc_binning);
if (binned_idx == module_idx)
module_idx = TPC_ID_DCORE0_TPC6;
}
hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
if (hl_is_fw_sw_ver_below(hdev, 1, 9) &&
!hdev->asic_prop.fw_security_enabled &&
((module_idx == 0) || (module_idx == 1)))
lbw_rtr_id = DCORE0_RTR0;
else
lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
break;
case RAZWI_MME:
sprintf(initiator_name, "MME_%u", module_idx);
switch (module_sub_idx) {
case MME_WAP0:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0;
break;
case MME_WAP1:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1;
break;
case MME_WRITE:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write;
break;
case MME_READ:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read;
break;
case MME_SBTE0:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0;
break;
case MME_SBTE1:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1;
break;
case MME_SBTE2:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2;
break;
case MME_SBTE3:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3;
break;
case MME_SBTE4:
hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4;
break;
default:
return;
}
lbw_rtr_id = hbw_rtr_id;
break;
case RAZWI_EDMA:
hbw_rtr_mstr_if_base_addr = gaudi2_edma_initiator_hbw_sft[module_idx];
dcore_id = module_idx / NUM_OF_EDMA_PER_DCORE;
/* SFT has separate MSTR_IF for LBW, only there we can
* read the LBW razwi related registers
*/
lbw_rtr_mstr_if_base_addr = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE +
dcore_id * SFT_DCORE_OFFSET;
via_sft = true;
sprintf(initiator_name, "EDMA_%u", module_idx);
break;
case RAZWI_PDMA:
hbw_rtr_id = gaudi2_pdma_initiator_hbw_rtr_id[module_idx];
lbw_rtr_id = gaudi2_pdma_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "PDMA_%u", module_idx);
break;
case RAZWI_NIC:
hbw_rtr_id = gaudi2_nic_initiator_hbw_rtr_id[module_idx];
lbw_rtr_id = gaudi2_nic_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "NIC_%u", module_idx);
break;
case RAZWI_DEC:
sprintf(initiator_name, "DEC_%u", module_idx);
if (hdev->decoder_binning) {
binned_idx = __ffs(hdev->decoder_binning);
if (binned_idx == module_idx)
module_idx = DEC_ID_PCIE_VDEC1;
}
hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx];
lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx];
break;
case RAZWI_ROT:
hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx];
lbw_rtr_id = gaudi2_rot_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "ROT_%u", module_idx);
break;
default:
return;
}
/* Find router mstr_if register base */
if (!via_sft) {
dcore_id = hbw_rtr_id / NUM_OF_RTR_PER_DCORE;
dcore_rtr_id = hbw_rtr_id % NUM_OF_RTR_PER_DCORE;
hbw_rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE +
dcore_id * DCORE_OFFSET +
dcore_rtr_id * DCORE_RTR_OFFSET +
RTR_MSTR_IF_OFFSET;
lbw_rtr_mstr_if_base_addr = hbw_rtr_mstr_if_base_addr +
(((s32)lbw_rtr_id - hbw_rtr_id) * DCORE_RTR_OFFSET);
}
/* Find out event cause by reading "RAZWI_HAPPENED" registers */
hbw_shrd_aw = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED);
hbw_shrd_ar = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED);
lbw_shrd_aw = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
lbw_shrd_ar = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
eng_id = gaudi2_razwi_calc_engine_id(hdev, module, module_idx);
if (hbw_shrd_aw) {
gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, true,
initiator_name, eng_id, event_mask);
/* Clear event indication */
WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw);
}
if (hbw_shrd_ar) {
gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, false,
initiator_name, eng_id, event_mask);
/* Clear event indication */
WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar);
}
if (lbw_shrd_aw) {
gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, true,
initiator_name, eng_id, event_mask);
/* Clear event indication */
WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw);
}
if (lbw_shrd_ar) {
gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, false,
initiator_name, eng_id, event_mask);
/* Clear event indication */
WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar);
}
}
static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 mod_idx, sub_mod;
/* check all TPCs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) {
if (prop->tpc_enabled_mask & BIT(mod_idx))
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL);
}
/* check all MMEs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mod_idx,
sub_mod, NULL);
/* check all EDMAs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
if (prop->edma_enabled_mask & BIT(mod_idx))
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL);
/* check all PDMAs */
for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL);
/* check all NICs */
for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++)
if (hdev->nic_ports_mask & BIT(mod_idx))
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0,
NULL);
/* check all DECs */
for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++)
if (prop->decoder_enabled_mask & BIT(mod_idx))
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL);
/* check all ROTs */
for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
}
static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u32 array_size,
u32 axuser_xy, u32 *base, u16 *eng_id,
char *eng_name)
{
int i, num_of_eng = 0;
u16 str_size = 0;
for (i = 0 ; i < array_size ; i++) {
if (axuser_xy != razwi_info[i].axuser_xy)
continue;
eng_id[num_of_eng] = razwi_info[i].eng_id;
base[num_of_eng] = razwi_info[i].rtr_ctrl;
if (!num_of_eng)
str_size += snprintf(eng_name + str_size,
PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
razwi_info[i].eng_name);
else
str_size += snprintf(eng_name + str_size,
PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
razwi_info[i].eng_name);
num_of_eng++;
}
return num_of_eng;
}
static bool gaudi2_handle_psoc_razwi_happened(struct hl_device *hdev, u32 razwi_reg,
u64 *event_mask)
{
u32 axuser_xy = RAZWI_GET_AXUSER_XY(razwi_reg), addr_hi = 0, addr_lo = 0;
u32 base[PSOC_RAZWI_MAX_ENG_PER_RTR];
u16 num_of_eng, eng_id[PSOC_RAZWI_MAX_ENG_PER_RTR];
char eng_name_str[PSOC_RAZWI_ENG_STR_SIZE];
bool razwi_happened = false;
u64 addr;
int i;
num_of_eng = gaudi2_psoc_razwi_get_engines(common_razwi_info, ARRAY_SIZE(common_razwi_info),
axuser_xy, base, eng_id, eng_name_str);
/* If no match for XY coordinates, try to find it in MME razwi table */
if (!num_of_eng) {
axuser_xy = RAZWI_GET_AXUSER_LOW_XY(razwi_reg);
num_of_eng = gaudi2_psoc_razwi_get_engines(mme_razwi_info,
ARRAY_SIZE(mme_razwi_info),
axuser_xy, base, eng_id,
eng_name_str);
}
for (i = 0 ; i < num_of_eng ; i++) {
if (RREG32(base[i] + DEC_RAZWI_HBW_AW_SET)) {
addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_HI);
addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_LO);
addr = ((u64)addr_hi << 32) + addr_lo;
if (addr) {
dev_err(hdev->dev,
"PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
eng_name_str, addr);
hl_handle_razwi(hdev, addr, &eng_id[0],
num_of_eng, HL_RAZWI_HBW | HL_RAZWI_WRITE, event_mask);
razwi_happened = true;
}
}
if (RREG32(base[i] + DEC_RAZWI_HBW_AR_SET)) {
addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_HI);
addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_LO);
addr = ((u64)addr_hi << 32) + addr_lo;
if (addr) {
dev_err(hdev->dev,
"PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n",
eng_name_str, addr);
hl_handle_razwi(hdev, addr, &eng_id[0],
num_of_eng, HL_RAZWI_HBW | HL_RAZWI_READ, event_mask);
razwi_happened = true;
}
}
if (RREG32(base[i] + DEC_RAZWI_LBW_AW_SET)) {
addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AW_ADDR);
if (addr_lo) {
dev_err(hdev->dev,
"PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
eng_name_str, addr_lo);
hl_handle_razwi(hdev, addr_lo, &eng_id[0],
num_of_eng, HL_RAZWI_LBW | HL_RAZWI_WRITE, event_mask);
razwi_happened = true;
}
}
if (RREG32(base[i] + DEC_RAZWI_LBW_AR_SET)) {
addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AR_ADDR);
if (addr_lo) {
dev_err(hdev->dev,
"PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n",
eng_name_str, addr_lo);
hl_handle_razwi(hdev, addr_lo, &eng_id[0],
num_of_eng, HL_RAZWI_LBW | HL_RAZWI_READ, event_mask);
razwi_happened = true;
}
}
/* In common case the loop will break, when there is only one engine id, or
* several engines with the same router. The exceptional case is with psoc razwi
* from EDMA, where it's possible to get axuser id which fits 2 routers (2
* interfaces of sft router). In this case, maybe the first router won't hold info
* and we will need to iterate on the other router.
*/
if (razwi_happened)
break;
}
return razwi_happened;
}
/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask)
{
u32 razwi_mask_info, razwi_intr = 0, error_count = 0;
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) {
razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT);
if (!razwi_intr)
return 0;
}
razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
dev_err_ratelimited(hdev->dev,
"PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info),
FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK, razwi_mask_info));
if (gaudi2_handle_psoc_razwi_happened(hdev, razwi_mask_info, event_mask))
error_count++;
else
dev_err_ratelimited(hdev->dev,
"PSOC RAZWI interrupt: invalid razwi info (0x%x)\n",
razwi_mask_info);
/* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX))
WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr);
return error_count;
}
static int _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base, u16 event_type)
{
u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(qman_base + QM_SEI_STATUS_OFFSET);
for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_qm_sei_error_cause[i]);
sts_clr_val |= BIT(i);
error_count++;
}
}
WREG32(qman_base + QM_SEI_STATUS_OFFSET, sts_clr_val);
return error_count;
}
static int gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
bool extended_err_check, u64 *event_mask)
{
enum razwi_event_sources module;
u32 error_count = 0;
u64 qman_base;
u8 index;
switch (event_type) {
case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC23_AXI_ERR_RSP:
index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP;
qman_base = mmDCORE0_TPC0_QM_BASE +
(index / NUM_OF_TPC_PER_DCORE) * DCORE_OFFSET +
(index % NUM_OF_TPC_PER_DCORE) * DCORE_TPC_OFFSET;
module = RAZWI_TPC;
break;
case GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
qman_base = mmDCORE0_TPC6_QM_BASE;
module = RAZWI_TPC;
break;
case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE:
index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) /
(GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE -
GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE);
qman_base = mmDCORE0_MME_QM_BASE + index * DCORE_OFFSET;
module = RAZWI_MME;
break;
case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
index = event_type - GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP;
qman_base = mmPDMA0_QM_BASE + index * PDMA_OFFSET;
module = RAZWI_PDMA;
break;
case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
qman_base = mmROT0_QM_BASE + index * ROT_OFFSET;
module = RAZWI_ROT;
break;
default:
return 0;
}
error_count = _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type);
/* There is a single event per NIC macro, so should check its both QMAN blocks */
if (event_type >= GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE &&
event_type <= GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE)
error_count += _gaudi2_handle_qm_sei_err(hdev,
qman_base + NIC_QM_OFFSET, event_type);
if (extended_err_check) {
/* check if RAZWI happened */
gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, event_mask);
hl_check_for_glbl_errors(hdev);
}
return error_count;
}
static int gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
u32 qid_base, error_count = 0;
u64 qman_base;
u8 index = 0;
switch (event_type) {
case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_TPC5_QM:
index = event_type - GAUDI2_EVENT_TPC0_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE0_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
break;
case GAUDI2_EVENT_TPC6_QM ... GAUDI2_EVENT_TPC11_QM:
index = event_type - GAUDI2_EVENT_TPC6_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE1_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
break;
case GAUDI2_EVENT_TPC12_QM ... GAUDI2_EVENT_TPC17_QM:
index = event_type - GAUDI2_EVENT_TPC12_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE2_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
break;
case GAUDI2_EVENT_TPC18_QM ... GAUDI2_EVENT_TPC23_QM:
index = event_type - GAUDI2_EVENT_TPC18_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE3_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
break;
case GAUDI2_EVENT_TPC24_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0;
qman_base = mmDCORE0_TPC6_QM_BASE;
break;
case GAUDI2_EVENT_MME0_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0;
qman_base = mmDCORE0_MME_QM_BASE;
break;
case GAUDI2_EVENT_MME1_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0;
qman_base = mmDCORE1_MME_QM_BASE;
break;
case GAUDI2_EVENT_MME2_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0;
qman_base = mmDCORE2_MME_QM_BASE;
break;
case GAUDI2_EVENT_MME3_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0;
qman_base = mmDCORE3_MME_QM_BASE;
break;
case GAUDI2_EVENT_HDMA0_QM:
index = 0;
qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0;
qman_base = mmDCORE0_EDMA0_QM_BASE;
break;
case GAUDI2_EVENT_HDMA1_QM:
index = 1;
qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0;
qman_base = mmDCORE0_EDMA1_QM_BASE;
break;
case GAUDI2_EVENT_HDMA2_QM:
index = 2;
qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0;
qman_base = mmDCORE1_EDMA0_QM_BASE;
break;
case GAUDI2_EVENT_HDMA3_QM:
index = 3;
qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0;
qman_base = mmDCORE1_EDMA1_QM_BASE;
break;
case GAUDI2_EVENT_HDMA4_QM:
index = 4;
qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0;
qman_base = mmDCORE2_EDMA0_QM_BASE;
break;
case GAUDI2_EVENT_HDMA5_QM:
index = 5;
qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0;
qman_base = mmDCORE2_EDMA1_QM_BASE;
break;
case GAUDI2_EVENT_HDMA6_QM:
index = 6;
qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0;
qman_base = mmDCORE3_EDMA0_QM_BASE;
break;
case GAUDI2_EVENT_HDMA7_QM:
index = 7;
qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0;
qman_base = mmDCORE3_EDMA1_QM_BASE;
break;
case GAUDI2_EVENT_PDMA0_QM:
qid_base = GAUDI2_QUEUE_ID_PDMA_0_0;
qman_base = mmPDMA0_QM_BASE;
break;
case GAUDI2_EVENT_PDMA1_QM:
qid_base = GAUDI2_QUEUE_ID_PDMA_1_0;
qman_base = mmPDMA1_QM_BASE;
break;
case GAUDI2_EVENT_ROTATOR0_ROT0_QM:
qid_base = GAUDI2_QUEUE_ID_ROT_0_0;
qman_base = mmROT0_QM_BASE;
break;
case GAUDI2_EVENT_ROTATOR1_ROT1_QM:
qid_base = GAUDI2_QUEUE_ID_ROT_1_0;
qman_base = mmROT1_QM_BASE;
break;
default:
return 0;
}
error_count = gaudi2_handle_qman_err_generic(hdev, event_type, qman_base, qid_base);
/* Handle EDMA QM SEI here because there is no AXI error response event for EDMA */
if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM) {
error_count += _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type);
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, index, 0, event_mask);
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static int gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev, u16 event_type)
{
u32 i, sts_val, sts_clr_val, error_count = 0, arc_farm;
for (arc_farm = 0 ; arc_farm < NUM_OF_ARC_FARMS_ARC ; arc_farm++) {
sts_clr_val = 0;
sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS +
(arc_farm * ARC_FARM_OFFSET));
for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"ARC FARM ARC %u err cause: %s",
arc_farm, gaudi2_arc_sei_error_cause[i]);
sts_clr_val |= BIT(i);
error_count++;
}
}
WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR + (arc_farm * ARC_FARM_OFFSET),
sts_clr_val);
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static int gaudi2_handle_cpu_sei_err(struct hl_device *hdev, u16 event_type)
{
u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(mmCPU_IF_CPU_SEI_INTR_STS);
for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_cpu_sei_error_cause[i]);
sts_clr_val |= BIT(i);
error_count++;
}
}
hl_check_for_glbl_errors(hdev);
WREG32(mmCPU_IF_CPU_SEI_INTR_CLR, sts_clr_val);
return error_count;
}
static int gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, u16 event_type,
struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause,
u64 *event_mask)
{
u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++)
if (intr_cause_data & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", guadi2_rot_error_cause[i]);
error_count++;
}
/* check if RAZWI happened */
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, event_mask);
hl_check_for_glbl_errors(hdev);
return error_count;
}
static int gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, u16 event_type,
struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause,
u64 *event_mask)
{
u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++)
if (intr_cause_data & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"interrupt cause: %s", gaudi2_tpc_interrupts_cause[i]);
error_count++;
}
/* check if RAZWI happened */
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, event_mask);
hl_check_for_glbl_errors(hdev);
return error_count;
}
static int gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, u16 event_type,
u64 *event_mask)
{
u32 sts_addr, sts_val, sts_clr_val = 0, error_count = 0;
int i;
if (dec_index < NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES)
/* DCORE DEC */
sts_addr = mmDCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR +
DCORE_OFFSET * (dec_index / NUM_OF_DEC_PER_DCORE) +
DCORE_VDEC_OFFSET * (dec_index % NUM_OF_DEC_PER_DCORE);
else
/* PCIE DEC */
sts_addr = mmPCIE_VDEC0_BRDG_CTRL_CAUSE_INTR + PCIE_VDEC_OFFSET *
(dec_index - NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES);
sts_val = RREG32(sts_addr);
for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_dec_error_cause[i]);
sts_clr_val |= BIT(i);
error_count++;
}
}
/* check if RAZWI happened */
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, event_mask);
hl_check_for_glbl_errors(hdev);
/* Write 1 clear errors */
WREG32(sts_addr, sts_clr_val);
return error_count;
}
static int gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, u16 event_type,
u64 *event_mask)
{
u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0;
int i;
sts_addr = mmDCORE0_MME_CTRL_LO_INTR_CAUSE + DCORE_OFFSET * mme_index;
sts_clr_addr = mmDCORE0_MME_CTRL_LO_INTR_CLEAR + DCORE_OFFSET * mme_index;
sts_val = RREG32(sts_addr);
for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", guadi2_mme_error_cause[i]);
sts_clr_val |= BIT(i);
error_count++;
}
}
/* check if RAZWI happened */
for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, event_mask);
hl_check_for_glbl_errors(hdev);
WREG32(sts_clr_addr, sts_clr_val);
return error_count;
}
static int gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data)
{
int i, error_count = 0;
for (i = 0 ; i < GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE ; i++)
if (intr_cause_data & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", guadi2_mme_sbte_error_cause[i]);
error_count++;
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static int gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, u16 event_type,
u64 *event_mask)
{
u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0;
int i;
sts_addr = mmDCORE0_MME_ACC_INTR_CAUSE + DCORE_OFFSET * mme_index;
sts_clr_addr = mmDCORE0_MME_ACC_INTR_CLEAR + DCORE_OFFSET * mme_index;
sts_val = RREG32(sts_addr);
for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", guadi2_mme_wap_error_cause[i]);
sts_clr_val |= BIT(i);
error_count++;
}
}
/* check if RAZWI happened on WAP0/1 */
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, event_mask);
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, event_mask);
hl_check_for_glbl_errors(hdev);
WREG32(sts_clr_addr, sts_clr_val);
return error_count;
}
static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data)
{
u32 error_count = 0;
int i;
/* If an AXI read or write error is received, an error is reported and
* interrupt message is sent. Due to an HW errata, when reading the cause
* register of the KDMA engine, the reported error is always HBW even if
* the actual error caused by a LBW KDMA transaction.
*/
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
if (intr_cause_data & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_kdma_core_interrupts_cause[i]);
error_count++;
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, u64 intr_cause)
{
u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
if (intr_cause & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_dma_core_interrupts_cause[i]);
error_count++;
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask)
{
u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr;
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE",
GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE",
GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE",
GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE",
GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
}
static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data, u64 *event_mask)
{
u32 error_count = 0;
int i;
gaudi2_print_event(hdev, event_type, true,
"intr_cause_data: %#llx", intr_cause_data);
for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
if (!(intr_cause_data & BIT_ULL(i)))
continue;
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]);
error_count++;
/*
* Always check for LBW and HBW additional info as the indication itself is
* sometimes missing
*/
}
hl_check_for_glbl_errors(hdev);
gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
return error_count;
}
static int gaudi2_handle_pif_fatal(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data)
{
u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) {
if (intr_cause_data & BIT_ULL(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_pmmu_fatal_interrupts_cause[i]);
error_count++;
}
}
return error_count;
}
static int gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data)
{
u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) {
if (intr_cause_data & BIT_ULL(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_hif_fatal_interrupts_cause[i]);
error_count++;
}
}
return error_count;
}
static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu,
u64 *event_mask)
{
u32 valid, val;
u64 addr;
valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID));
if (!(valid & DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_PAGE_ERR_VALID_ENTRY_MASK))
return;
val = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE));
addr = val & DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_63_32_MASK;
addr <<= 32;
addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA));
if (is_pmmu) {
dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n", addr);
} else {
addr = gaudi2_mmu_descramble_addr(hdev, addr);
addr &= HW_UNSCRAMBLED_BITS_MASK;
dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n",
addr, addr + ~HW_UNSCRAMBLED_BITS_MASK);
}
hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask);
WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0);
}
static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu)
{
u32 valid, val;
u64 addr;
valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID));
if (!(valid & DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_ACCESS_ERR_VALID_ENTRY_MASK))
return;
val = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE));
addr = val & DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_63_32_MASK;
addr <<= 32;
addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA));
if (!is_pmmu)
addr = gaudi2_mmu_descramble_addr(hdev, addr);
dev_err_ratelimited(hdev->dev, "%s access error on va 0x%llx\n",
is_pmmu ? "PMMU" : "HMMU", addr);
WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0);
}
static int gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, u16 event_type,
u64 mmu_base, bool is_pmmu, u64 *event_mask)
{
u32 spi_sei_cause, interrupt_clr = 0x0, error_count = 0;
int i;
spi_sei_cause = RREG32(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET);
for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) {
if (spi_sei_cause & BIT(i)) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s", gaudi2_mmu_spi_sei[i].cause);
if (i == 0)
gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, event_mask);
else if (i == 1)
gaudi2_handle_access_error(hdev, mmu_base, is_pmmu);
if (gaudi2_mmu_spi_sei[i].clear_bit >= 0)
interrupt_clr |= BIT(gaudi2_mmu_spi_sei[i].clear_bit);
error_count++;
}
}
/* Clear cause */
WREG32_AND(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET, ~spi_sei_cause);
/* Clear interrupt */
WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr);
return error_count;
}
static int gaudi2_handle_sm_err(struct hl_device *hdev, u16 event_type, u8 sm_index)
{
u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log,
cq_intr_addr, cq_intr_val, cq_intr_queue_index, error_count = 0;
int i;
sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index;
cq_intr_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_INTR + DCORE_OFFSET * sm_index;
sei_cause_val = RREG32(sei_cause_addr);
sei_cause_cause = FIELD_GET(DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_CAUSE_MASK, sei_cause_val);
cq_intr_val = RREG32(cq_intr_addr);
/* SEI interrupt */
if (sei_cause_cause) {
/* There are corresponding SEI_CAUSE_log bits for every SEI_CAUSE_cause bit */
sei_cause_log = FIELD_GET(DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_LOG_MASK,
sei_cause_val);
for (i = 0 ; i < GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE ; i++) {
if (!(sei_cause_cause & BIT(i)))
continue;
gaudi2_print_event(hdev, event_type, true,
"err cause: %s. %s: 0x%X",
gaudi2_sm_sei_cause[i].cause_name,
gaudi2_sm_sei_cause[i].log_name,
sei_cause_log);
error_count++;
break;
}
/* Clear SM_SEI_CAUSE */
WREG32(sei_cause_addr, 0);
}
/* CQ interrupt */
if (cq_intr_val & DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_MASK) {
cq_intr_queue_index =
FIELD_GET(DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_INTR_QUEUE_INDEX_MASK,
cq_intr_val);
dev_err_ratelimited(hdev->dev, "SM%u err. err cause: CQ_INTR. queue index: %u\n",
sm_index, cq_intr_queue_index);
error_count++;
/* Clear CQ_INTR */
WREG32(cq_intr_addr, 0);
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static u64 get_hmmu_base(u16 event_type)
{
u8 dcore, index_in_dcore;
switch (event_type) {
case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU0_SPI_BASE ... GAUDI2_EVENT_HMMU0_SECURITY_ERROR:
dcore = 0;
index_in_dcore = 0;
break;
case GAUDI2_EVENT_HMMU_1_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU1_SPI_BASE ... GAUDI2_EVENT_HMMU1_SECURITY_ERROR:
dcore = 1;
index_in_dcore = 0;
break;
case GAUDI2_EVENT_HMMU_2_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU2_SPI_BASE ... GAUDI2_EVENT_HMMU2_SECURITY_ERROR:
dcore = 0;
index_in_dcore = 1;
break;
case GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU3_SPI_BASE ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR:
dcore = 1;
index_in_dcore = 1;
break;
case GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU4_SPI_BASE ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR:
dcore = 3;
index_in_dcore = 2;
break;
case GAUDI2_EVENT_HMMU_5_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU5_SPI_BASE ... GAUDI2_EVENT_HMMU5_SECURITY_ERROR:
dcore = 2;
index_in_dcore = 2;
break;
case GAUDI2_EVENT_HMMU_6_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU6_SPI_BASE ... GAUDI2_EVENT_HMMU6_SECURITY_ERROR:
dcore = 3;
index_in_dcore = 3;
break;
case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU7_SPI_BASE ... GAUDI2_EVENT_HMMU7_SECURITY_ERROR:
dcore = 2;
index_in_dcore = 3;
break;
case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU8_SPI_BASE ... GAUDI2_EVENT_HMMU8_SECURITY_ERROR:
dcore = 0;
index_in_dcore = 2;
break;
case GAUDI2_EVENT_HMMU_9_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU9_SPI_BASE ... GAUDI2_EVENT_HMMU9_SECURITY_ERROR:
dcore = 1;
index_in_dcore = 2;
break;
case GAUDI2_EVENT_HMMU_10_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU10_SPI_BASE ... GAUDI2_EVENT_HMMU10_SECURITY_ERROR:
dcore = 0;
index_in_dcore = 3;
break;
case GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU11_SPI_BASE ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR:
dcore = 1;
index_in_dcore = 3;
break;
case GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU12_SPI_BASE ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
dcore = 3;
index_in_dcore = 0;
break;
case GAUDI2_EVENT_HMMU_13_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU13_SPI_BASE ... GAUDI2_EVENT_HMMU13_SECURITY_ERROR:
dcore = 2;
index_in_dcore = 0;
break;
case GAUDI2_EVENT_HMMU_14_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU14_SPI_BASE ... GAUDI2_EVENT_HMMU14_SECURITY_ERROR:
dcore = 3;
index_in_dcore = 1;
break;
case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU15_SPI_BASE ... GAUDI2_EVENT_HMMU15_SECURITY_ERROR:
dcore = 2;
index_in_dcore = 1;
break;
default:
return ULONG_MAX;
}
return mmDCORE0_HMMU0_MMU_BASE + dcore * DCORE_OFFSET + index_in_dcore * DCORE_HMMU_OFFSET;
}
static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
bool is_pmmu = false;
u32 error_count = 0;
u64 mmu_base;
switch (event_type) {
case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
case GAUDI2_EVENT_HMMU0_SPI_BASE ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
mmu_base = get_hmmu_base(event_type);
break;
case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
is_pmmu = true;
mmu_base = mmPMMU_HBW_MMU_BASE;
break;
default:
return 0;
}
if (mmu_base == ULONG_MAX)
return 0;
error_count = gaudi2_handle_mmu_spi_sei_generic(hdev, event_type, mmu_base,
is_pmmu, event_mask);
hl_check_for_glbl_errors(hdev);
return error_count;
}
/* returns true if hard reset is required (ECC DERR or Read parity), false otherwise (ECC SERR) */
static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt)
{
u32 addr, beat, beat_shift;
bool rc = false;
dev_err_ratelimited(hdev->dev,
"READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n",
FIELD_GET(HBM_ECC_SERR_CNTR_MASK, err_cnt),
FIELD_GET(HBM_ECC_DERR_CNTR_MASK, err_cnt),
FIELD_GET(HBM_RD_PARITY_CNTR_MASK, err_cnt));
addr = le32_to_cpu(rd_err_data->dbg_rd_err_addr.rd_addr_val);
dev_err_ratelimited(hdev->dev,
"READ ERROR address: sid(%u), bg(%u), ba(%u), col(%u), row(%u)\n",
FIELD_GET(HBM_RD_ADDR_SID_MASK, addr),
FIELD_GET(HBM_RD_ADDR_BG_MASK, addr),
FIELD_GET(HBM_RD_ADDR_BA_MASK, addr),
FIELD_GET(HBM_RD_ADDR_COL_MASK, addr),
FIELD_GET(HBM_RD_ADDR_ROW_MASK, addr));
/* For each beat (RDQS edge), look for possible errors and print relevant info */
for (beat = 0 ; beat < 4 ; beat++) {
if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
(HBM_RD_ERR_SERR_BEAT0_MASK << beat))
dev_err_ratelimited(hdev->dev, "Beat%d ECC SERR: DM: %#x, Syndrome: %#x\n",
beat,
le32_to_cpu(rd_err_data->dbg_rd_err_dm),
le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
(HBM_RD_ERR_DERR_BEAT0_MASK << beat)) {
dev_err_ratelimited(hdev->dev, "Beat%d ECC DERR: DM: %#x, Syndrome: %#x\n",
beat,
le32_to_cpu(rd_err_data->dbg_rd_err_dm),
le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
rc |= true;
}
beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT;
if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
(HBM_RD_ERR_PAR_ERR_BEAT0_MASK << beat_shift)) {
dev_err_ratelimited(hdev->dev,
"Beat%d read PARITY: DM: %#x, PAR data: %#x\n",
beat,
le32_to_cpu(rd_err_data->dbg_rd_err_dm),
(le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
(HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >>
(HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift));
rc |= true;
}
dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n", beat);
dev_err_ratelimited(hdev->dev, "\t0x%08x\n",
le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2]));
dev_err_ratelimited(hdev->dev, "\t0x%08x\n",
le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1]));
}
return rc;
}
static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev,
struct hl_eq_hbm_sei_wr_par_intr_info *wr_par_err_data, u32 err_cnt)
{
struct hbm_sei_wr_cmd_address *wr_cmd_addr = wr_par_err_data->dbg_last_wr_cmds;
u32 i, curr_addr, derr = wr_par_err_data->dbg_derr;
dev_err_ratelimited(hdev->dev, "WRITE PARITY ERROR count: %d\n", err_cnt);
dev_err_ratelimited(hdev->dev, "CK-0 DERR: 0x%02x, CK-1 DERR: 0x%02x\n",
derr & 0x3, derr & 0xc);
/* JIRA H6-3286 - the following prints may not be valid */
dev_err_ratelimited(hdev->dev, "Last latched write commands addresses:\n");
for (i = 0 ; i < HBM_WR_PAR_CMD_LIFO_LEN ; i++) {
curr_addr = le32_to_cpu(wr_cmd_addr[i].dbg_wr_cmd_addr);
dev_err_ratelimited(hdev->dev,
"\twrite cmd[%u]: Address: SID(%u) BG(%u) BA(%u) COL(%u).\n",
i,
FIELD_GET(WR_PAR_LAST_CMD_SID_MASK, curr_addr),
FIELD_GET(WR_PAR_LAST_CMD_BG_MASK, curr_addr),
FIELD_GET(WR_PAR_LAST_CMD_BA_MASK, curr_addr),
FIELD_GET(WR_PAR_LAST_CMD_COL_MASK, curr_addr));
}
}
static void gaudi2_hbm_sei_print_ca_par_info(struct hl_device *hdev,
struct hl_eq_hbm_sei_ca_par_intr_info *ca_par_err_data, u32 err_cnt)
{
__le32 *col_cmd = ca_par_err_data->dbg_col;
__le16 *row_cmd = ca_par_err_data->dbg_row;
u32 i;
dev_err_ratelimited(hdev->dev, "CA ERROR count: %d\n", err_cnt);
dev_err_ratelimited(hdev->dev, "Last latched C&R bus commands:\n");
for (i = 0 ; i < HBM_CA_ERR_CMD_LIFO_LEN ; i++)
dev_err_ratelimited(hdev->dev, "cmd%u: ROW(0x%04x) COL(0x%05x)\n", i,
le16_to_cpu(row_cmd[i]) & (u16)GENMASK(13, 0),
le32_to_cpu(col_cmd[i]) & (u32)GENMASK(17, 0));
}
/* Returns true if hard reset is needed or false otherwise */
static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
struct hl_eq_hbm_sei_data *sei_data)
{
bool require_hard_reset = false;
u32 hbm_id, mc_id, cause_idx;
hbm_id = (event_type - GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE) / 4;
mc_id = ((event_type - GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE) / 2) % 2;
cause_idx = sei_data->hdr.sei_cause;
if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) {
gaudi2_print_event(hdev, event_type, true,
"err cause: %s",
"Invalid HBM SEI event cause (%d) provided by FW", cause_idx);
return true;
}
gaudi2_print_event(hdev, event_type, !sei_data->hdr.is_critical,
"System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s",
sei_data->hdr.is_critical ? "Critical" : "Non-critical",
hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
hbm_mc_sei_cause[cause_idx]);
/* Print error-specific info */
switch (cause_idx) {
case HBM_SEI_CATTRIP:
require_hard_reset = true;
break;
case HBM_SEI_CMD_PARITY_EVEN:
gaudi2_hbm_sei_print_ca_par_info(hdev, &sei_data->ca_parity_even_info,
le32_to_cpu(sei_data->hdr.cnt));
require_hard_reset = true;
break;
case HBM_SEI_CMD_PARITY_ODD:
gaudi2_hbm_sei_print_ca_par_info(hdev, &sei_data->ca_parity_odd_info,
le32_to_cpu(sei_data->hdr.cnt));
require_hard_reset = true;
break;
case HBM_SEI_WRITE_DATA_PARITY_ERR:
gaudi2_hbm_sei_print_wr_par_info(hdev, &sei_data->wr_parity_info,
le32_to_cpu(sei_data->hdr.cnt));
require_hard_reset = true;
break;
case HBM_SEI_READ_ERR:
/* Unlike other SEI events, read error requires further processing of the
* raw data in order to determine the root cause.
*/
require_hard_reset = gaudi2_hbm_sei_handle_read_err(hdev,
&sei_data->read_err_info,
le32_to_cpu(sei_data->hdr.cnt));
break;
default:
break;
}
require_hard_reset |= !!sei_data->hdr.is_critical;
return require_hard_reset;
}
static int gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data)
{
if (intr_cause_data) {
gaudi2_print_event(hdev, event_type, true,
"temperature error cause: %#llx", intr_cause_data);
return 1;
}
return 0;
}
static int gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data)
{
u32 i, error_count = 0;
for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++)
if (intr_cause_data & hbm_mc_spi[i].mask) {
dev_dbg(hdev->dev, "HBM spi event: notification cause(%s)\n",
hbm_mc_spi[i].cause);
error_count++;
}
return error_count;
}
static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
ktime_t zero_time = ktime_set(0, 0);
mutex_lock(&hdev->clk_throttling.lock);
switch (event_type) {
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S:
hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
break;
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
break;
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
break;
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
break;
default:
dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);
break;
}
mutex_unlock(&hdev->clk_throttling.lock);
}
static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, u16 event_type,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
gaudi2_print_event(hdev, event_type, false,
"FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci),
q->pi, atomic_read(&q->ci));
}
static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
{
u32 p2p_intr, msix_gw_intr, error_count = 0;
p2p_intr = RREG32(mmPCIE_WRAP_P2P_INTR);
msix_gw_intr = RREG32(mmPCIE_WRAP_MSIX_GW_INTR);
if (p2p_intr) {
gaudi2_print_event(hdev, event_type, true,
"pcie p2p transaction terminated due to security, req_id(0x%x)",
RREG32(mmPCIE_WRAP_P2P_REQ_ID));
WREG32(mmPCIE_WRAP_P2P_INTR, 0x1);
error_count++;
}
if (msix_gw_intr) {
gaudi2_print_event(hdev, event_type, true,
"pcie msi-x gen denied due to vector num check failure, vec(0x%X)",
RREG32(mmPCIE_WRAP_MSIX_GW_VEC));
WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1);
error_count++;
}
return error_count;
}
static int gaudi2_handle_pcie_drain(struct hl_device *hdev,
struct hl_eq_pcie_drain_ind_data *drain_data)
{
u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause, error_count = 0;
cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data);
lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw);
lbw_wr = le64_to_cpu(drain_data->drain_wr_addr_lbw);
hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw);
hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw);
if (cause & BIT_ULL(0)) {
dev_err_ratelimited(hdev->dev,
"PCIE AXI drain LBW completed, read_err %u, write_err %u\n",
!!lbw_rd, !!lbw_wr);
error_count++;
}
if (cause & BIT_ULL(1)) {
dev_err_ratelimited(hdev->dev,
"PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n",
hbw_rd, hbw_wr);
error_count++;
}
return error_count;
}
static int gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data)
{
u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) {
if (intr_cause_data & BIT_ULL(i)) {
dev_err_ratelimited(hdev->dev, "PSOC %s completed\n",
gaudi2_psoc_axi_drain_interrupts_cause[i]);
error_count++;
}
}
hl_check_for_glbl_errors(hdev);
return error_count;
}
static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, u16 event_type,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
gaudi2_print_event(hdev, event_type, false,
"FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
}
static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
struct hl_eq_engine_arc_intr_data *data)
{
struct hl_engine_arc_dccm_queue_full_irq *q;
u32 intr_type, engine_id;
u64 payload;
intr_type = le32_to_cpu(data->intr_type);
engine_id = le32_to_cpu(data->engine_id);
payload = le64_to_cpu(data->payload);
switch (intr_type) {
case ENGINE_ARC_DCCM_QUEUE_FULL_IRQ:
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
"ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
engine_id, intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
return 0;
}
}
static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
bool reset_required = false, is_critical = false;
u32 index, ctl, reset_flags = 0, error_count = 0;
u64 event_mask = 0;
u16 event_type;
ctl = le32_to_cpu(eq_entry->hdr.ctl);
event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT);
if (event_type >= GAUDI2_EVENT_SIZE) {
dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
event_type, GAUDI2_EVENT_SIZE - 1);
return;
}
gaudi2->events_stat[event_type]++;
gaudi2->events_stat_aggregate[event_type]++;
switch (event_type) {
case GAUDI2_EVENT_PCIE_CORE_SERR ... GAUDI2_EVENT_ARC0_ECC_DERR:
fallthrough;
case GAUDI2_EVENT_ROTATOR0_SERR ... GAUDI2_EVENT_ROTATOR1_DERR:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
is_critical = eq_entry->ecc_data.is_critical;
error_count++;
break;
case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM:
fallthrough;
case GAUDI2_EVENT_ROTATOR0_ROT0_QM ... GAUDI2_EVENT_ROTATOR1_ROT1_QM:
fallthrough;
case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1:
error_count = gaudi2_handle_qman_err(hdev, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0:
error_count = gaudi2_handle_arc_farm_sei_err(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ERR_RSP:
error_count = gaudi2_handle_cpu_sei_err(hdev, event_type);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
break;
case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
error_count = gaudi2_handle_qm_sei_err(hdev, event_type, true, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
error_count = gaudi2_handle_rot_err(hdev, index, event_type,
&eq_entry->razwi_with_intr_cause, &event_mask);
error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP;
error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type,
&eq_entry->razwi_with_intr_cause, &event_mask);
error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE:
index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE;
error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_KERNEL_ERR:
case GAUDI2_EVENT_TPC1_KERNEL_ERR:
case GAUDI2_EVENT_TPC2_KERNEL_ERR:
case GAUDI2_EVENT_TPC3_KERNEL_ERR:
case GAUDI2_EVENT_TPC4_KERNEL_ERR:
case GAUDI2_EVENT_TPC5_KERNEL_ERR:
case GAUDI2_EVENT_TPC6_KERNEL_ERR:
case GAUDI2_EVENT_TPC7_KERNEL_ERR:
case GAUDI2_EVENT_TPC8_KERNEL_ERR:
case GAUDI2_EVENT_TPC9_KERNEL_ERR:
case GAUDI2_EVENT_TPC10_KERNEL_ERR:
case GAUDI2_EVENT_TPC11_KERNEL_ERR:
case GAUDI2_EVENT_TPC12_KERNEL_ERR:
case GAUDI2_EVENT_TPC13_KERNEL_ERR:
case GAUDI2_EVENT_TPC14_KERNEL_ERR:
case GAUDI2_EVENT_TPC15_KERNEL_ERR:
case GAUDI2_EVENT_TPC16_KERNEL_ERR:
case GAUDI2_EVENT_TPC17_KERNEL_ERR:
case GAUDI2_EVENT_TPC18_KERNEL_ERR:
case GAUDI2_EVENT_TPC19_KERNEL_ERR:
case GAUDI2_EVENT_TPC20_KERNEL_ERR:
case GAUDI2_EVENT_TPC21_KERNEL_ERR:
case GAUDI2_EVENT_TPC22_KERNEL_ERR:
case GAUDI2_EVENT_TPC23_KERNEL_ERR:
case GAUDI2_EVENT_TPC24_KERNEL_ERR:
index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) /
(GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR);
error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type,
&eq_entry->razwi_with_intr_cause, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_SPI:
case GAUDI2_EVENT_DEC1_SPI:
case GAUDI2_EVENT_DEC2_SPI:
case GAUDI2_EVENT_DEC3_SPI:
case GAUDI2_EVENT_DEC4_SPI:
case GAUDI2_EVENT_DEC5_SPI:
case GAUDI2_EVENT_DEC6_SPI:
case GAUDI2_EVENT_DEC7_SPI:
case GAUDI2_EVENT_DEC8_SPI:
case GAUDI2_EVENT_DEC9_SPI:
index = (event_type - GAUDI2_EVENT_DEC0_SPI) /
(GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI);
error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE:
index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) /
(GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE -
GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE);
error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask);
error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_QMAN_SW_ERROR:
case GAUDI2_EVENT_MME1_QMAN_SW_ERROR:
case GAUDI2_EVENT_MME2_QMAN_SW_ERROR:
case GAUDI2_EVENT_MME3_QMAN_SW_ERROR:
index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) /
(GAUDI2_EVENT_MME1_QMAN_SW_ERROR -
GAUDI2_EVENT_MME0_QMAN_SW_ERROR);
error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID:
case GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID:
case GAUDI2_EVENT_MME2_WAP_SOURCE_RESULT_INVALID:
case GAUDI2_EVENT_MME3_WAP_SOURCE_RESULT_INVALID:
index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) /
(GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID -
GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID);
error_count = gaudi2_handle_mme_wap_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_KDMA0_CORE:
error_count = gaudi2_handle_kdma_core_event(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_HDMA5_CORE:
error_count = gaudi2_handle_dma_core_event(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PDMA0_CORE ... GAUDI2_EVENT_PDMA1_CORE:
error_count = gaudi2_handle_dma_core_event(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR:
error_count = gaudi2_print_pcie_addr_dec_info(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data), &event_mask);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
error_count = gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL:
error_count = gaudi2_handle_hif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PMMU_FATAL_0:
error_count = gaudi2_handle_pif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT:
error_count = gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE ... GAUDI2_EVENT_HBM5_MC1_SEI_NON_SEVERE:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true;
}
error_count++;
break;
case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5:
error_count = gaudi2_handle_hbm_cattrip(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI:
error_count = gaudi2_handle_hbm_mc_spi(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE:
error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
error_count = gaudi2_handle_psoc_drain(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ECC:
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_L2_RAM_ECC:
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME0_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP:
error_count = gaudi2_handle_mme_sbte_err(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B:
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_AXI_ERR_RSP:
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_PRSTN_FALL:
error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_APB_TIMEOUT:
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FATAL_ERR:
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_TPC0_BMON_SPMU:
case GAUDI2_EVENT_TPC1_BMON_SPMU:
case GAUDI2_EVENT_TPC2_BMON_SPMU:
case GAUDI2_EVENT_TPC3_BMON_SPMU:
case GAUDI2_EVENT_TPC4_BMON_SPMU:
case GAUDI2_EVENT_TPC5_BMON_SPMU:
case GAUDI2_EVENT_TPC6_BMON_SPMU:
case GAUDI2_EVENT_TPC7_BMON_SPMU:
case GAUDI2_EVENT_TPC8_BMON_SPMU:
case GAUDI2_EVENT_TPC9_BMON_SPMU:
case GAUDI2_EVENT_TPC10_BMON_SPMU:
case GAUDI2_EVENT_TPC11_BMON_SPMU:
case GAUDI2_EVENT_TPC12_BMON_SPMU:
case GAUDI2_EVENT_TPC13_BMON_SPMU:
case GAUDI2_EVENT_TPC14_BMON_SPMU:
case GAUDI2_EVENT_TPC15_BMON_SPMU:
case GAUDI2_EVENT_TPC16_BMON_SPMU:
case GAUDI2_EVENT_TPC17_BMON_SPMU:
case GAUDI2_EVENT_TPC18_BMON_SPMU:
case GAUDI2_EVENT_TPC19_BMON_SPMU:
case GAUDI2_EVENT_TPC20_BMON_SPMU:
case GAUDI2_EVENT_TPC21_BMON_SPMU:
case GAUDI2_EVENT_TPC22_BMON_SPMU:
case GAUDI2_EVENT_TPC23_BMON_SPMU:
case GAUDI2_EVENT_TPC24_BMON_SPMU:
case GAUDI2_EVENT_MME0_CTRL_BMON_SPMU:
case GAUDI2_EVENT_MME0_SBTE_BMON_SPMU:
case GAUDI2_EVENT_MME0_WAP_BMON_SPMU:
case GAUDI2_EVENT_MME1_CTRL_BMON_SPMU:
case GAUDI2_EVENT_MME1_SBTE_BMON_SPMU:
case GAUDI2_EVENT_MME1_WAP_BMON_SPMU:
case GAUDI2_EVENT_MME2_CTRL_BMON_SPMU:
case GAUDI2_EVENT_MME2_SBTE_BMON_SPMU:
case GAUDI2_EVENT_MME2_WAP_BMON_SPMU:
case GAUDI2_EVENT_MME3_CTRL_BMON_SPMU:
case GAUDI2_EVENT_MME3_SBTE_BMON_SPMU:
case GAUDI2_EVENT_MME3_WAP_BMON_SPMU:
case GAUDI2_EVENT_HDMA2_BM_SPMU ... GAUDI2_EVENT_PDMA1_BM_SPMU:
fallthrough;
case GAUDI2_EVENT_DEC0_BMON_SPMU:
case GAUDI2_EVENT_DEC1_BMON_SPMU:
case GAUDI2_EVENT_DEC2_BMON_SPMU:
case GAUDI2_EVENT_DEC3_BMON_SPMU:
case GAUDI2_EVENT_DEC4_BMON_SPMU:
case GAUDI2_EVENT_DEC5_BMON_SPMU:
case GAUDI2_EVENT_DEC6_BMON_SPMU:
case GAUDI2_EVENT_DEC7_BMON_SPMU:
case GAUDI2_EVENT_DEC8_BMON_SPMU:
case GAUDI2_EVENT_DEC9_BMON_SPMU:
case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU:
error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S:
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
gaudi2_print_clk_change_info(hdev, event_type, &event_mask);
error_count = GAUDI2_NA_EVENT_CAUSE;
break;
case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC:
gaudi2_print_out_of_sync_info(hdev, event_type, &eq_entry->pkt_sync_err);
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FLR_REQUESTED:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
error_count = GAUDI2_NA_EVENT_CAUSE;
/* Do nothing- FW will handle it */
break;
case GAUDI2_EVENT_PCIE_P2P_MSIX:
error_count = gaudi2_handle_pcie_p2p_msix(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE;
error_count = gaudi2_handle_sm_err(hdev, event_type, index);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR:
error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT:
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
gaudi2_print_cpu_pkt_failure_info(hdev, event_type, &eq_entry->pkt_sync_err);
error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_ARC_DCCM_FULL:
error_count = hl_arc_event_handle(hdev, event_type, &eq_entry->arc_data);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED:
case GAUDI2_EVENT_CPU_DEV_RESET_REQ:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
error_count = GAUDI2_NA_EVENT_CAUSE;
is_critical = true;
break;
default:
if (gaudi2_irq_map_table[event_type].valid) {
dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n",
event_type);
error_count = GAUDI2_NA_EVENT_CAUSE;
}
}
/* Make sure to dump an error in case no error cause was printed so far.
* Note that although we have counted the errors, we use this number as
* a boolean.
*/
if (error_count == GAUDI2_NA_EVENT_CAUSE && !is_info_event(event_type))
gaudi2_print_event(hdev, event_type, true, "%d", event_type);
else if (error_count == 0)
gaudi2_print_event(hdev, event_type, true,
"No error cause for H/W event %u", event_type);
if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) ||
reset_required) {
if (reset_required ||
(gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD))
reset_flags |= HL_DRV_RESET_HARD;
if (hdev->hard_reset_on_fw_events ||
(hdev->asic_prop.fw_security_enabled && is_critical))
goto reset_device;
}
/* Send unmask irq only for interrupts not classified as MSG */
if (!gaudi2_irq_map_table[event_type].msg)
hl_fw_unmask_irq(hdev, event_type);
if (event_mask)
hl_notifier_event_send_all(hdev, event_mask);
return;
reset_device:
if (hdev->asic_prop.fw_security_enabled && is_critical) {
reset_flags |= HL_DRV_RESET_BYPASS_REQ_TO_FW;
event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
} else {
reset_flags |= HL_DRV_RESET_DELAY;
}
/* escalate general hw errors to critical/fatal error */
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
struct packet_lin_dma *lin_dma_pkt, dma_addr_t pkt_dma_addr,
u32 hw_queue_id, u32 size, u64 addr, u32 val)
{
u32 ctl, pkt_size;
int rc = 0;
ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_WRCOMP_MASK, 1);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 1);
lin_dma_pkt->ctl = cpu_to_le32(ctl);
lin_dma_pkt->src_addr = cpu_to_le64(val);
lin_dma_pkt->dst_addr = cpu_to_le64(addr);
lin_dma_pkt->tsize = cpu_to_le32(size);
pkt_size = sizeof(struct packet_lin_dma);
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, pkt_dma_addr);
if (rc)
dev_err(hdev->dev, "Failed to send lin dma packet to H/W queue %d\n",
hw_queue_id);
return rc;
}
static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val)
{
u32 edma_queues_id[] = {GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0,
GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0};
u32 chunk_size, dcore, edma_idx, sob_offset, sob_addr, comp_val,
old_mmubp, mmubp, num_of_pkts, busy, pkt_size;
u64 comp_addr, cur_addr = addr, end_addr = addr + size;
struct asic_fixed_properties *prop = &hdev->asic_prop;
void *lin_dma_pkts_arr;
dma_addr_t pkt_dma_addr;
int rc = 0, dma_num = 0;
if (prop->edma_enabled_mask == 0) {
dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n");
return -EIO;
}
sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4;
sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset;
comp_addr = CFG_BASE + sob_addr;
comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) |
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1);
mmubp = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_MASK, 1) |
FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_MASK, 1);
/* Calculate how many lin dma pkts we'll need */
num_of_pkts = div64_u64(round_up(size, SZ_2G), SZ_2G);
pkt_size = sizeof(struct packet_lin_dma);
lin_dma_pkts_arr = hl_asic_dma_alloc_coherent(hdev, pkt_size * num_of_pkts,
&pkt_dma_addr, GFP_KERNEL);
if (!lin_dma_pkts_arr)
return -ENOMEM;
/*
* set mmu bypass for the scrubbing - all ddmas are configured the same so save
* only the first one to restore later
* also set the sob addr for all edma cores for completion.
* set QM as trusted to allow it to access physical address with MMU bp.
*/
old_mmubp = RREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP);
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) {
u32 edma_offset = dcore * DCORE_OFFSET + edma_idx * DCORE_EDMA_OFFSET;
u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx;
if (!(prop->edma_enabled_mask & BIT(edma_bit)))
continue;
WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP +
edma_offset, mmubp);
WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset,
lower_32_bits(comp_addr));
WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset,
upper_32_bits(comp_addr));
WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset,
comp_val);
gaudi2_qman_set_test_mode(hdev,
edma_queues_id[dcore] + 4 * edma_idx, true);
}
}
WREG32(sob_addr, 0);
while (cur_addr < end_addr) {
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) {
u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx;
if (!(prop->edma_enabled_mask & BIT(edma_bit)))
continue;
chunk_size = min_t(u64, SZ_2G, end_addr - cur_addr);
rc = gaudi2_memset_memory_chunk_using_edma_qm(hdev,
(struct packet_lin_dma *)lin_dma_pkts_arr + dma_num,
pkt_dma_addr + dma_num * pkt_size,
edma_queues_id[dcore] + edma_idx * 4,
chunk_size, cur_addr, val);
if (rc)
goto end;
dma_num++;
cur_addr += chunk_size;
if (cur_addr == end_addr)
break;
}
}
}
rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000);
if (rc) {
dev_err(hdev->dev, "DMA Timeout during HBM scrubbing\n");
goto end;
}
end:
for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) {
for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) {
u32 edma_offset = dcore * DCORE_OFFSET + edma_idx * DCORE_EDMA_OFFSET;
u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx;
if (!(prop->edma_enabled_mask & BIT(edma_bit)))
continue;
WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + edma_offset, old_mmubp);
WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, 0);
WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, 0);
WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, 0);
gaudi2_qman_set_test_mode(hdev,
edma_queues_id[dcore] + 4 * edma_idx, false);
}
}
WREG32(sob_addr, 0);
hl_asic_dma_free_coherent(hdev, pkt_size * num_of_pkts, lin_dma_pkts_arr, pkt_dma_addr);
return rc;
}
static int gaudi2_scrub_device_dram(struct hl_device *hdev, u64 val)
{
int rc;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 size = prop->dram_end_address - prop->dram_user_base_address;
rc = gaudi2_memset_device_memory(hdev, prop->dram_user_base_address, size, val);
if (rc)
dev_err(hdev->dev, "Failed to scrub dram, address: 0x%llx size: %llu\n",
prop->dram_user_base_address, size);
return rc;
}
static int gaudi2_scrub_device_mem(struct hl_device *hdev)
{
int rc;
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 val = hdev->memory_scrub_val;
u64 addr, size;
if (!hdev->memory_scrub)
return 0;
/* scrub SRAM */
addr = prop->sram_user_base_address;
size = hdev->pldm ? 0x10000 : (prop->sram_size - SRAM_USER_BASE_OFFSET);
dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx, val: 0x%llx\n",
addr, addr + size, val);
rc = gaudi2_memset_device_memory(hdev, addr, size, val);
if (rc) {
dev_err(hdev->dev, "scrubbing SRAM failed (%d)\n", rc);
return rc;
}
/* scrub DRAM */
rc = gaudi2_scrub_device_dram(hdev, val);
if (rc) {
dev_err(hdev->dev, "scrubbing DRAM failed (%d)\n", rc);
return rc;
}
return 0;
}
static void gaudi2_restore_user_sm_registers(struct hl_device *hdev)
{
u64 addr, mon_sts_addr, mon_cfg_addr, cq_lbw_l_addr, cq_lbw_h_addr,
cq_lbw_data_addr, cq_base_l_addr, cq_base_h_addr, cq_size_addr;
u32 val, size, offset;
int dcore_id;
offset = hdev->asic_prop.first_available_cq[0] * 4;
cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + offset;
cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + offset;
cq_lbw_data_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 + offset;
cq_base_l_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + offset;
cq_base_h_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + offset;
cq_size_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + offset;
size = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 -
(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + offset);
/* memset dcore0 CQ registers */
gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0);
cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + DCORE_OFFSET;
cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + DCORE_OFFSET;
cq_lbw_data_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 + DCORE_OFFSET;
cq_base_l_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + DCORE_OFFSET;
cq_base_h_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + DCORE_OFFSET;
cq_size_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + DCORE_OFFSET;
size = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 - mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0;
for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0);
gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0);
cq_lbw_l_addr += DCORE_OFFSET;
cq_lbw_h_addr += DCORE_OFFSET;
cq_lbw_data_addr += DCORE_OFFSET;
cq_base_l_addr += DCORE_OFFSET;
cq_base_h_addr += DCORE_OFFSET;
cq_size_addr += DCORE_OFFSET;
}
offset = hdev->asic_prop.first_available_user_mon[0] * 4;
addr = mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + offset;
val = 1 << DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PROT_SHIFT;
size = mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - (mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + offset);
/* memset dcore0 monitors */
gaudi2_memset_device_lbw(hdev, addr, size, val);
addr = mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + offset;
gaudi2_memset_device_lbw(hdev, addr, size, 0);
mon_sts_addr = mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + DCORE_OFFSET;
mon_cfg_addr = mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + DCORE_OFFSET;
size = mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0;
for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
gaudi2_memset_device_lbw(hdev, mon_sts_addr, size, val);
gaudi2_memset_device_lbw(hdev, mon_cfg_addr, size, 0);
mon_sts_addr += DCORE_OFFSET;
mon_cfg_addr += DCORE_OFFSET;
}
offset = hdev->asic_prop.first_available_user_sob[0] * 4;
addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset;
val = 0;
size = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 -
(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset);
/* memset dcore0 sobs */
gaudi2_memset_device_lbw(hdev, addr, size, val);
addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + DCORE_OFFSET;
size = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 - mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0;
for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
gaudi2_memset_device_lbw(hdev, addr, size, val);
addr += DCORE_OFFSET;
}
/* Flush all WREG to prevent race */
val = RREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset);
}
static void gaudi2_restore_user_qm_registers(struct hl_device *hdev)
{
u32 reg_base, hw_queue_id;
for (hw_queue_id = GAUDI2_QUEUE_ID_PDMA_0_0 ; hw_queue_id <= GAUDI2_QUEUE_ID_ROT_1_0;
hw_queue_id += NUM_OF_PQ_PER_QMAN) {
if (!gaudi2_is_queue_enabled(hdev, hw_queue_id))
continue;
gaudi2_clear_qm_fence_counters_common(hdev, hw_queue_id, false);
reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0);
}
/* Flush all WREG to prevent race */
RREG32(mmPDMA0_QM_ARB_CFG_0);
}
static void gaudi2_restore_nic_qm_registers(struct hl_device *hdev)
{
u32 reg_base, hw_queue_id;
for (hw_queue_id = GAUDI2_QUEUE_ID_NIC_0_0 ; hw_queue_id <= GAUDI2_QUEUE_ID_NIC_23_3;
hw_queue_id += NUM_OF_PQ_PER_QMAN) {
if (!gaudi2_is_queue_enabled(hdev, hw_queue_id))
continue;
gaudi2_clear_qm_fence_counters_common(hdev, hw_queue_id, false);
reg_base = gaudi2_qm_blocks_bases[hw_queue_id];
WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0);
}
/* Flush all WREG to prevent race */
RREG32(mmPDMA0_QM_ARB_CFG_0);
}
static int gaudi2_context_switch(struct hl_device *hdev, u32 asid)
{
return 0;
}
static void gaudi2_restore_phase_topology(struct hl_device *hdev)
{
}
static void gaudi2_init_block_instances(struct hl_device *hdev, u32 block_idx,
struct dup_block_ctx *cfg_ctx)
{
u64 block_base = cfg_ctx->base + block_idx * cfg_ctx->block_off;
u8 seq;
int i;
for (i = 0 ; i < cfg_ctx->instances ; i++) {
seq = block_idx * cfg_ctx->instances + i;
/* skip disabled instance */
if (!(cfg_ctx->enabled_mask & BIT_ULL(seq)))
continue;
cfg_ctx->instance_cfg_fn(hdev, block_base + i * cfg_ctx->instance_off,
cfg_ctx->data);
}
}
static void gaudi2_init_blocks_with_mask(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx,
u64 mask)
{
int i;
cfg_ctx->enabled_mask = mask;
for (i = 0 ; i < cfg_ctx->blocks ; i++)
gaudi2_init_block_instances(hdev, i, cfg_ctx);
}
void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx)
{
gaudi2_init_blocks_with_mask(hdev, cfg_ctx, U64_MAX);
}
static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
{
void *host_mem_virtual_addr;
dma_addr_t host_mem_dma_addr;
u64 reserved_va_base;
u32 pos, size_left, size_to_dma;
struct hl_ctx *ctx;
int rc = 0;
/* Fetch the ctx */
ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "No ctx available\n");
return -EINVAL;
}
/* Allocate buffers for read and for poll */
host_mem_virtual_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &host_mem_dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (host_mem_virtual_addr == NULL) {
dev_err(hdev->dev, "Failed to allocate memory for KDMA read\n");
rc = -ENOMEM;
goto put_ctx;
}
/* Reserve VM region on asic side */
reserved_va_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST, SZ_2M,
HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
if (!reserved_va_base) {
dev_err(hdev->dev, "Failed to reserve vmem on asic\n");
rc = -ENOMEM;
goto free_data_buffer;
}
/* Create mapping on asic side */
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, reserved_va_base, host_mem_dma_addr, SZ_2M);
if (rc) {
dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
goto unreserve_va;
}
rc = hl_mmu_invalidate_cache_range(hdev, false,
MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, reserved_va_base, SZ_2M);
if (rc) {
hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
goto unreserve_va;
}
mutex_unlock(&hdev->mmu_lock);
/* Enable MMU on KDMA */
gaudi2_kdma_set_mmbp_asid(hdev, false, ctx->asid);
pos = 0;
size_left = size;
size_to_dma = SZ_2M;
while (size_left > 0) {
if (size_left < SZ_2M)
size_to_dma = size_left;
rc = gaudi2_send_job_to_kdma(hdev, addr, reserved_va_base, size_to_dma, false);
if (rc)
break;
memcpy(blob_addr + pos, host_mem_virtual_addr, size_to_dma);
if (size_left <= SZ_2M)
break;
pos += SZ_2M;
addr += SZ_2M;
size_left -= SZ_2M;
}
gaudi2_kdma_set_mmbp_asid(hdev, true, HL_KERNEL_ASID_ID);
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
if (rc)
goto unreserve_va;
rc = hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
ctx->asid, reserved_va_base, SZ_2M);
unreserve_va:
mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, reserved_va_base, SZ_2M);
free_data_buffer:
hl_asic_dma_free_coherent(hdev, SZ_2M, host_mem_virtual_addr, host_mem_dma_addr);
put_ctx:
hl_ctx_put(ctx);
return rc;
}
static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int min_alloc_order, rc;
if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
return 0;
hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev,
HOST_SPACE_INTERNAL_CB_SZ,
&hdev->internal_cb_pool_dma_addr,
GFP_KERNEL | __GFP_ZERO);
if (!hdev->internal_cb_pool_virt_addr)
return -ENOMEM;
min_alloc_order = ilog2(min(gaudi2_get_signal_cb_size(hdev),
gaudi2_get_wait_cb_size(hdev)));
hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1);
if (!hdev->internal_cb_pool) {
dev_err(hdev->dev, "Failed to create internal CB pool\n");
rc = -ENOMEM;
goto free_internal_cb_pool;
}
rc = gen_pool_add(hdev->internal_cb_pool, (uintptr_t) hdev->internal_cb_pool_virt_addr,
HOST_SPACE_INTERNAL_CB_SZ, -1);
if (rc) {
dev_err(hdev->dev, "Failed to add memory to internal CB pool\n");
rc = -EFAULT;
goto destroy_internal_cb_pool;
}
hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
HOST_SPACE_INTERNAL_CB_SZ, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
if (!hdev->internal_cb_va_base) {
rc = -ENOMEM;
goto destroy_internal_cb_pool;
}
mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
if (rc)
goto unreserve_internal_cb_pool;
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
if (rc)
goto unmap_internal_cb_pool;
mutex_unlock(&hdev->mmu_lock);
return 0;
unmap_internal_cb_pool:
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
unreserve_internal_cb_pool:
mutex_unlock(&hdev->mmu_lock);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
destroy_internal_cb_pool:
gen_pool_destroy(hdev->internal_cb_pool);
free_internal_cb_pool:
hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
hdev->internal_cb_pool_dma_addr);
return rc;
}
static void gaudi2_internal_cb_pool_fini(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
return;
mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&hdev->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
hdev->internal_cb_pool_dma_addr);
}
static void gaudi2_restore_user_registers(struct hl_device *hdev)
{
gaudi2_restore_user_sm_registers(hdev);
gaudi2_restore_user_qm_registers(hdev);
}
static int gaudi2_map_virtual_msix_doorbell_memory(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
rc = hl_mmu_map_page(ctx, RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START,
gaudi2->virt_msix_db_dma_addr, prop->pmmu.page_size, true);
if (rc)
dev_err(hdev->dev, "Failed to map VA %#llx for virtual MSI-X doorbell memory\n",
RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START);
return rc;
}
static void gaudi2_unmap_virtual_msix_doorbell_memory(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
rc = hl_mmu_unmap_page(ctx, RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START,
prop->pmmu.page_size, true);
if (rc)
dev_err(hdev->dev, "Failed to unmap VA %#llx of virtual MSI-X doorbell memory\n",
RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START);
}
static int gaudi2_ctx_init(struct hl_ctx *ctx)
{
int rc;
rc = gaudi2_mmu_prepare(ctx->hdev, ctx->asid);
if (rc)
return rc;
/* No need to clear user registers if the device has just
* performed reset, we restore only nic qm registers
*/
if (ctx->hdev->reset_upon_device_release)
gaudi2_restore_nic_qm_registers(ctx->hdev);
else
gaudi2_restore_user_registers(ctx->hdev);
rc = gaudi2_internal_cb_pool_init(ctx->hdev, ctx);
if (rc)
return rc;
rc = gaudi2_map_virtual_msix_doorbell_memory(ctx);
if (rc)
gaudi2_internal_cb_pool_fini(ctx->hdev, ctx);
return rc;
}
static void gaudi2_ctx_fini(struct hl_ctx *ctx)
{
if (ctx->asid == HL_KERNEL_ASID_ID)
return;
gaudi2_internal_cb_pool_fini(ctx->hdev, ctx);
gaudi2_unmap_virtual_msix_doorbell_memory(ctx);
}
static int gaudi2_pre_schedule_cs(struct hl_cs *cs)
{
struct hl_device *hdev = cs->ctx->hdev;
int index = cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
u32 mon_payload, sob_id, mon_id;
if (!cs_needs_completion(cs))
return 0;
/*
* First 64 SOB/MON are reserved for driver for QMAN auto completion
* mechanism. Each SOB/MON pair are used for a pending CS with the same
* cyclic index. The SOB value is increased when each of the CS jobs is
* completed. When the SOB reaches the number of CS jobs, the monitor
* generates MSI-X interrupt.
*/
sob_id = mon_id = index;
mon_payload = (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
(1 << CQ_ENTRY_READY_SHIFT) | index;
gaudi2_arm_cq_monitor(hdev, sob_id, mon_id, GAUDI2_RESERVED_CQ_CS_COMPLETION, mon_payload,
cs->jobs_cnt);
return 0;
}
static u32 gaudi2_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
return HL_INVALID_QUEUE;
}
static u32 gaudi2_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, u32 size, bool eb)
{
struct hl_cb *cb = data;
struct packet_msg_short *pkt;
u32 value, ctl, pkt_size = sizeof(*pkt);
pkt = (struct packet_msg_short *) (uintptr_t) (cb->kernel_address + size);
memset(pkt, 0, pkt_size);
/* Inc by 1, Mode ADD */
value = FIELD_PREP(GAUDI2_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 1); /* SOB base */
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
return size + pkt_size;
}
static u32 gaudi2_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, u16 addr)
{
u32 ctl, pkt_size = sizeof(*pkt);
memset(pkt, 0, pkt_size);
ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, addr);
ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 0);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
return pkt_size;
}
static u32 gaudi2_add_arm_monitor_pkt(struct hl_device *hdev, struct packet_msg_short *pkt,
u16 sob_base, u8 sob_mask, u16 sob_val, u16 addr)
{
u32 ctl, value, pkt_size = sizeof(*pkt);
u8 mask;
if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) {
dev_err(hdev->dev, "sob_base %u (mask %#x) is not valid\n", sob_base, sob_mask);
return 0;
}
memset(pkt, 0, pkt_size);
value = FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8);
value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/
value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MASK_MASK, mask);
ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, addr);
ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
return pkt_size;
}
static u32 gaudi2_add_fence_pkt(struct packet_fence *pkt)
{
u32 ctl, cfg, pkt_size = sizeof(*pkt);
memset(pkt, 0, pkt_size);
cfg = FIELD_PREP(GAUDI2_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
cfg |= FIELD_PREP(GAUDI2_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
cfg |= FIELD_PREP(GAUDI2_PKT_FENCE_CFG_ID_MASK, 2);
ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_FENCE);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0);
ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
return pkt_size;
}
static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop)
{
struct hl_cb *cb = prop->data;
void *buf = (void *) (uintptr_t) (cb->kernel_address);
u64 monitor_base, fence_addr = 0;
u32 stream_index, size = prop->size;
u16 msg_addr_offset;
stream_index = prop->q_idx % 4;
fence_addr = CFG_BASE + gaudi2_qm_blocks_bases[prop->q_idx] +
QM_FENCE2_OFFSET + stream_index * 4;
/*
* monitor_base should be the content of the base0 address registers,
* so it will be added to the msg short offsets
*/
monitor_base = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
/* First monitor config packet: low address of the sync */
msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + prop->mon_id * 4) -
monitor_base;
size += gaudi2_add_mon_msg_short(buf + size, (u32) fence_addr, msg_addr_offset);
/* Second monitor config packet: high address of the sync */
msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + prop->mon_id * 4) -
monitor_base;
size += gaudi2_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32), msg_addr_offset);
/*
* Third monitor config packet: the payload, i.e. what to write when the
* sync triggers
*/
msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + prop->mon_id * 4) -
monitor_base;
size += gaudi2_add_mon_msg_short(buf + size, 1, msg_addr_offset);
/* Fourth monitor config packet: bind the monitor to a sync object */
msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + prop->mon_id * 4) - monitor_base;
size += gaudi2_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, prop->sob_mask,
prop->sob_val, msg_addr_offset);
/* Fence packet */
size += gaudi2_add_fence_pkt(buf + size);
return size;
}
static void gaudi2_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = data;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, hw_sob->sob_id);
WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, 0);
kref_init(&hw_sob->kref);
}
static void gaudi2_reset_sob_group(struct hl_device *hdev, u16 sob_group)
{
}
static u64 gaudi2_get_device_time(struct hl_device *hdev)
{
u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
static int gaudi2_collective_wait_init_cs(struct hl_cs *cs)
{
return 0;
}
static int gaudi2_collective_wait_create_jobs(struct hl_device *hdev, struct hl_ctx *ctx,
struct hl_cs *cs, u32 wait_queue_id,
u32 collective_engine_id, u32 encaps_signal_offset)
{
return -EINVAL;
}
/*
* hl_mmu_scramble - converts a dram (non power of 2) page-size aligned address
* to DMMU page-size address (64MB) before mapping it in
* the MMU.
* The operation is performed on both the virtual and physical addresses.
* for device with 6 HBMs the scramble is:
* (addr[47:0] / 48M) * 64M + addr % 48M + addr[63:48]
*
* Example:
* =============================================================================
* Allocated DRAM Reserved VA scrambled VA for MMU mapping Scrambled PA
* Phys address in MMU last
* HOP
* =============================================================================
* PA1 0x3000000 VA1 0x9C000000 SVA1= (VA1/48M)*64M 0xD0000000 <- PA1/48M 0x1
* PA2 0x9000000 VA2 0x9F000000 SVA2= (VA2/48M)*64M 0xD4000000 <- PA2/48M 0x3
* =============================================================================
*/
static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 divisor, mod_va;
u64 div_va;
/* accept any address in the DRAM address space */
if (hl_mem_area_inside_range(raw_addr, sizeof(raw_addr), DRAM_PHYS_BASE,
VA_HBM_SPACE_END)) {
divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE;
div_va = div_u64_rem(raw_addr & GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK, divisor, &mod_va);
return (raw_addr & ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK) |
(div_va << GAUDI2_HBM_MMU_SCRM_DIV_SHIFT) |
(mod_va << GAUDI2_HBM_MMU_SCRM_MOD_SHIFT);
}
return raw_addr;
}
static u64 gaudi2_mmu_descramble_addr(struct hl_device *hdev, u64 scrambled_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 divisor, mod_va;
u64 div_va;
/* accept any address in the DRAM address space */
if (hl_mem_area_inside_range(scrambled_addr, sizeof(scrambled_addr), DRAM_PHYS_BASE,
VA_HBM_SPACE_END)) {
divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE;
div_va = div_u64_rem(scrambled_addr & GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK,
PAGE_SIZE_64MB, &mod_va);
return ((scrambled_addr & ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK) +
(div_va * divisor + mod_va));
}
return scrambled_addr;
}
static u32 gaudi2_get_dec_base_addr(struct hl_device *hdev, u32 core_id)
{
u32 base = 0, dcore_id, dec_id;
if (core_id >= NUMBER_OF_DEC) {
dev_err(hdev->dev, "Unexpected core number %d for DEC\n", core_id);
goto out;
}
if (core_id < 8) {
dcore_id = core_id / NUM_OF_DEC_PER_DCORE;
dec_id = core_id % NUM_OF_DEC_PER_DCORE;
base = mmDCORE0_DEC0_CMD_BASE + dcore_id * DCORE_OFFSET +
dec_id * DCORE_VDEC_OFFSET;
} else {
/* PCIe Shared Decoder */
base = mmPCIE_DEC0_CMD_BASE + ((core_id % 8) * PCIE_VDEC_OFFSET);
}
out:
return base;
}
static int gaudi2_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
u32 *block_size, u32 *block_id)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int i;
for (i = 0 ; i < NUM_USER_MAPPED_BLOCKS ; i++) {
if (block_addr == CFG_BASE + gaudi2->mapped_blocks[i].address) {
*block_id = i;
if (block_size)
*block_size = gaudi2->mapped_blocks[i].size;
return 0;
}
}
dev_err(hdev->dev, "Invalid block address %#llx", block_addr);
return -EINVAL;
}
static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
u32 block_id, u32 block_size)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u64 offset_in_bar;
u64 address;
int rc;
if (block_id >= NUM_USER_MAPPED_BLOCKS) {
dev_err(hdev->dev, "Invalid block id %u", block_id);
return -EINVAL;
}
/* we allow mapping only an entire block */
if (block_size != gaudi2->mapped_blocks[block_id].size) {
dev_err(hdev->dev, "Invalid block size %u", block_size);
return -EINVAL;
}
offset_in_bar = CFG_BASE + gaudi2->mapped_blocks[block_id].address - STM_FLASH_BASE_ADDR;
address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
block_size, vma->vm_page_prot);
if (rc)
dev_err(hdev->dev, "remap_pfn_range error %d", rc);
return rc;
}
static void gaudi2_enable_events_from_fw(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
u32 irq_handler_offset = le32_to_cpu(dyn_regs->gic_host_ints_irq);
if (gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)
WREG32(irq_handler_offset,
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_INTS_REGISTER].cpu_id);
}
static int gaudi2_get_mmu_base(struct hl_device *hdev, u64 mmu_id, u32 *mmu_base)
{
switch (mmu_id) {
case HW_CAP_DCORE0_DMMU0:
*mmu_base = mmDCORE0_HMMU0_MMU_BASE;
break;
case HW_CAP_DCORE0_DMMU1:
*mmu_base = mmDCORE0_HMMU1_MMU_BASE;
break;
case HW_CAP_DCORE0_DMMU2:
*mmu_base = mmDCORE0_HMMU2_MMU_BASE;
break;
case HW_CAP_DCORE0_DMMU3:
*mmu_base = mmDCORE0_HMMU3_MMU_BASE;
break;
case HW_CAP_DCORE1_DMMU0:
*mmu_base = mmDCORE1_HMMU0_MMU_BASE;
break;
case HW_CAP_DCORE1_DMMU1:
*mmu_base = mmDCORE1_HMMU1_MMU_BASE;
break;
case HW_CAP_DCORE1_DMMU2:
*mmu_base = mmDCORE1_HMMU2_MMU_BASE;
break;
case HW_CAP_DCORE1_DMMU3:
*mmu_base = mmDCORE1_HMMU3_MMU_BASE;
break;
case HW_CAP_DCORE2_DMMU0:
*mmu_base = mmDCORE2_HMMU0_MMU_BASE;
break;
case HW_CAP_DCORE2_DMMU1:
*mmu_base = mmDCORE2_HMMU1_MMU_BASE;
break;
case HW_CAP_DCORE2_DMMU2:
*mmu_base = mmDCORE2_HMMU2_MMU_BASE;
break;
case HW_CAP_DCORE2_DMMU3:
*mmu_base = mmDCORE2_HMMU3_MMU_BASE;
break;
case HW_CAP_DCORE3_DMMU0:
*mmu_base = mmDCORE3_HMMU0_MMU_BASE;
break;
case HW_CAP_DCORE3_DMMU1:
*mmu_base = mmDCORE3_HMMU1_MMU_BASE;
break;
case HW_CAP_DCORE3_DMMU2:
*mmu_base = mmDCORE3_HMMU2_MMU_BASE;
break;
case HW_CAP_DCORE3_DMMU3:
*mmu_base = mmDCORE3_HMMU3_MMU_BASE;
break;
case HW_CAP_PMMU:
*mmu_base = mmPMMU_HBW_MMU_BASE;
break;
default:
return -EINVAL;
}
return 0;
}
static void gaudi2_ack_mmu_error(struct hl_device *hdev, u64 mmu_id)
{
bool is_pmmu = (mmu_id == HW_CAP_PMMU);
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 mmu_base;
if (!(gaudi2->hw_cap_initialized & mmu_id))
return;
if (gaudi2_get_mmu_base(hdev, mmu_id, &mmu_base))
return;
gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, NULL);
gaudi2_handle_access_error(hdev, mmu_base, is_pmmu);
}
static int gaudi2_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
{
u32 i, mmu_id, num_of_hmmus = NUM_OF_HMMU_PER_DCORE * NUM_OF_DCORES;
/* check all HMMUs */
for (i = 0 ; i < num_of_hmmus ; i++) {
mmu_id = HW_CAP_DCORE0_DMMU0 << i;
if (mmu_cap_mask & mmu_id)
gaudi2_ack_mmu_error(hdev, mmu_id);
}
/* check PMMU */
if (mmu_cap_mask & HW_CAP_PMMU)
gaudi2_ack_mmu_error(hdev, HW_CAP_PMMU);
return 0;
}
static void gaudi2_get_msi_info(__le32 *table)
{
table[CPUCP_EVENT_QUEUE_MSI_TYPE] = cpu_to_le32(GAUDI2_EVENT_QUEUE_MSIX_IDX);
}
static int gaudi2_map_pll_idx_to_fw_idx(u32 pll_idx)
{
switch (pll_idx) {
case HL_GAUDI2_CPU_PLL: return CPU_PLL;
case HL_GAUDI2_PCI_PLL: return PCI_PLL;
case HL_GAUDI2_NIC_PLL: return NIC_PLL;
case HL_GAUDI2_DMA_PLL: return DMA_PLL;
case HL_GAUDI2_MESH_PLL: return MESH_PLL;
case HL_GAUDI2_MME_PLL: return MME_PLL;
case HL_GAUDI2_TPC_PLL: return TPC_PLL;
case HL_GAUDI2_IF_PLL: return IF_PLL;
case HL_GAUDI2_SRAM_PLL: return SRAM_PLL;
case HL_GAUDI2_HBM_PLL: return HBM_PLL;
case HL_GAUDI2_VID_PLL: return VID_PLL;
case HL_GAUDI2_MSS_PLL: return MSS_PLL;
default: return -EINVAL;
}
}
static int gaudi2_gen_sync_to_engine_map(struct hl_device *hdev, struct hl_sync_to_engine_map *map)
{
/* Not implemented */
return 0;
}
static int gaudi2_monitor_valid(struct hl_mon_state_dump *mon)
{
/* Not implemented */
return 0;
}
static int gaudi2_print_single_monitor(char **buf, size_t *size, size_t *offset,
struct hl_device *hdev, struct hl_mon_state_dump *mon)
{
/* Not implemented */
return 0;
}
static int gaudi2_print_fences_single_engine(struct hl_device *hdev, u64 base_offset,
u64 status_base_offset, enum hl_sync_engine_type engine_type,
u32 engine_id, char **buf, size_t *size, size_t *offset)
{
/* Not implemented */
return 0;
}
static struct hl_state_dump_specs_funcs gaudi2_state_dump_funcs = {
.monitor_valid = gaudi2_monitor_valid,
.print_single_monitor = gaudi2_print_single_monitor,
.gen_sync_to_engine_map = gaudi2_gen_sync_to_engine_map,
.print_fences_single_engine = gaudi2_print_fences_single_engine,
};
static void gaudi2_state_dump_init(struct hl_device *hdev)
{
/* Not implemented */
hdev->state_dump_specs.props = gaudi2_state_dump_specs_props;
hdev->state_dump_specs.funcs = gaudi2_state_dump_funcs;
}
static u32 gaudi2_get_sob_addr(struct hl_device *hdev, u32 sob_id)
{
return 0;
}
static u32 *gaudi2_get_stream_master_qid_arr(void)
{
return NULL;
}
static void gaudi2_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
struct attribute_group *dev_vrm_attr_grp)
{
hl_sysfs_add_dev_clk_attr(hdev, dev_clk_attr_grp);
hl_sysfs_add_dev_vrm_attr(hdev, dev_vrm_attr_grp);
}
static int gaudi2_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
u32 page_size, u32 *real_page_size, bool is_dram_addr)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
/* for host pages the page size must be */
if (!is_dram_addr) {
if (page_size % mmu_prop->page_size)
goto page_size_err;
*real_page_size = mmu_prop->page_size;
return 0;
}
if ((page_size % prop->dram_page_size) || (prop->dram_page_size > mmu_prop->page_size))
goto page_size_err;
/*
* MMU page size is different from DRAM page size (more precisely, DMMU page is greater
* than DRAM page size).
* for this reason work with the DRAM page size and let the MMU scrambling routine handle
* this mismatch when calculating the address to place in the MMU page table.
* (in that case also make sure that the dram_page_size is not greater than the
* mmu page size)
*/
*real_page_size = prop->dram_page_size;
return 0;
page_size_err:
dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
static int gaudi2_get_monitor_dump(struct hl_device *hdev, void *data)
{
return -EOPNOTSUPP;
}
int gaudi2_send_device_activity(struct hl_device *hdev, bool open)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
return hl_fw_send_device_activity(hdev, open);
}
static const struct hl_asic_funcs gaudi2_funcs = {
.early_init = gaudi2_early_init,
.early_fini = gaudi2_early_fini,
.late_init = gaudi2_late_init,
.late_fini = gaudi2_late_fini,
.sw_init = gaudi2_sw_init,
.sw_fini = gaudi2_sw_fini,
.hw_init = gaudi2_hw_init,
.hw_fini = gaudi2_hw_fini,
.halt_engines = gaudi2_halt_engines,
.suspend = gaudi2_suspend,
.resume = gaudi2_resume,
.mmap = gaudi2_mmap,
.ring_doorbell = gaudi2_ring_doorbell,
.pqe_write = gaudi2_pqe_write,
.asic_dma_alloc_coherent = gaudi2_dma_alloc_coherent,
.asic_dma_free_coherent = gaudi2_dma_free_coherent,
.scrub_device_mem = gaudi2_scrub_device_mem,
.scrub_device_dram = gaudi2_scrub_device_dram,
.get_int_queue_base = NULL,
.test_queues = gaudi2_test_queues,
.asic_dma_pool_zalloc = gaudi2_dma_pool_zalloc,
.asic_dma_pool_free = gaudi2_dma_pool_free,
.cpu_accessible_dma_pool_alloc = gaudi2_cpu_accessible_dma_pool_alloc,
.cpu_accessible_dma_pool_free = gaudi2_cpu_accessible_dma_pool_free,
.asic_dma_unmap_single = gaudi2_dma_unmap_single,
.asic_dma_map_single = gaudi2_dma_map_single,
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
.cs_parser = gaudi2_cs_parser,
.asic_dma_map_sgtable = hl_dma_map_sgtable,
.add_end_of_cb_packets = NULL,
.update_eq_ci = gaudi2_update_eq_ci,
.context_switch = gaudi2_context_switch,
.restore_phase_topology = gaudi2_restore_phase_topology,
.debugfs_read_dma = gaudi2_debugfs_read_dma,
.add_device_attr = gaudi2_add_device_attr,
.handle_eqe = gaudi2_handle_eqe,
.get_events_stat = gaudi2_get_events_stat,
.read_pte = NULL,
.write_pte = NULL,
.mmu_invalidate_cache = gaudi2_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi2_mmu_invalidate_cache_range,
.mmu_prefetch_cache_range = NULL,
.send_heartbeat = gaudi2_send_heartbeat,
.debug_coresight = gaudi2_debug_coresight,
.is_device_idle = gaudi2_is_device_idle,
.compute_reset_late_init = gaudi2_compute_reset_late_init,
.hw_queues_lock = gaudi2_hw_queues_lock,
.hw_queues_unlock = gaudi2_hw_queues_unlock,
.get_pci_id = gaudi2_get_pci_id,
.get_eeprom_data = gaudi2_get_eeprom_data,
.get_monitor_dump = gaudi2_get_monitor_dump,
.send_cpu_message = gaudi2_send_cpu_message,
.pci_bars_map = gaudi2_pci_bars_map,
.init_iatu = gaudi2_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = gaudi2_halt_coresight,
.ctx_init = gaudi2_ctx_init,
.ctx_fini = gaudi2_ctx_fini,
.pre_schedule_cs = gaudi2_pre_schedule_cs,
.get_queue_id_for_cq = gaudi2_get_queue_id_for_cq,
.load_firmware_to_device = NULL,
.load_boot_fit_to_device = NULL,
.get_signal_cb_size = gaudi2_get_signal_cb_size,
.get_wait_cb_size = gaudi2_get_wait_cb_size,
.gen_signal_cb = gaudi2_gen_signal_cb,
.gen_wait_cb = gaudi2_gen_wait_cb,
.reset_sob = gaudi2_reset_sob,
.reset_sob_group = gaudi2_reset_sob_group,
.get_device_time = gaudi2_get_device_time,
.pb_print_security_errors = gaudi2_pb_print_security_errors,
.collective_wait_init_cs = gaudi2_collective_wait_init_cs,
.collective_wait_create_jobs = gaudi2_collective_wait_create_jobs,
.get_dec_base_addr = gaudi2_get_dec_base_addr,
.scramble_addr = gaudi2_mmu_scramble_addr,
.descramble_addr = gaudi2_mmu_descramble_addr,
.ack_protection_bits_errors = gaudi2_ack_protection_bits_errors,
.get_hw_block_id = gaudi2_get_hw_block_id,
.hw_block_mmap = gaudi2_block_mmap,
.enable_events_from_fw = gaudi2_enable_events_from_fw,
.ack_mmu_errors = gaudi2_ack_mmu_page_fault_or_access_error,
.get_msi_info = gaudi2_get_msi_info,
.map_pll_idx_to_fw_idx = gaudi2_map_pll_idx_to_fw_idx,
.init_firmware_preload_params = gaudi2_init_firmware_preload_params,
.init_firmware_loader = gaudi2_init_firmware_loader,
.init_cpu_scrambler_dram = gaudi2_init_scrambler_hbm,
.state_dump_init = gaudi2_state_dump_init,
.get_sob_addr = &gaudi2_get_sob_addr,
.set_pci_memory_regions = gaudi2_set_pci_memory_regions,
.get_stream_master_qid_arr = gaudi2_get_stream_master_qid_arr,
.check_if_razwi_happened = gaudi2_check_if_razwi_happened,
.mmu_get_real_page_size = gaudi2_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi2_set_hbm_bar_base,
.set_engine_cores = gaudi2_set_engine_cores,
.set_engines = gaudi2_set_engines,
.send_device_activity = gaudi2_send_device_activity,
.set_dram_properties = gaudi2_set_dram_properties,
.set_binning_masks = gaudi2_set_binning_masks,
};
void gaudi2_set_asic_funcs(struct hl_device *hdev)
{
hdev->asic_funcs = &gaudi2_funcs;
}
| linux-master | drivers/accel/habanalabs/gaudi2/gaudi2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "gaudi2_coresight_regs.h"
#include <uapi/drm/habanalabs_accel.h>
#define GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 2000)
#define SPMU_MAX_COUNTERS 6
#define COMPONENT_ID_INVALID ((u32)(-1))
#define MAX_BMONS_PER_UNIT 8
enum gaudi2_hif_hmmu_id {
HMMU_ID_DCORE0_HMMU0,
HMMU_ID_DCORE0_HMMU1,
HMMU_ID_DCORE0_HMMU2,
HMMU_ID_DCORE0_HMMU3,
HMMU_ID_DCORE1_HMMU0,
HMMU_ID_DCORE1_HMMU1,
HMMU_ID_DCORE1_HMMU2,
HMMU_ID_DCORE1_HMMU3,
HMMU_ID_DCORE2_HMMU0,
HMMU_ID_DCORE2_HMMU1,
HMMU_ID_DCORE2_HMMU2,
HMMU_ID_DCORE2_HMMU3,
HMMU_ID_DCORE3_HMMU0,
HMMU_ID_DCORE3_HMMU1,
HMMU_ID_DCORE3_HMMU2,
HMMU_ID_DCORE3_HMMU3,
HMMU_ID_SIZE,
};
enum gaudi2_xbar_edge_id {
XBAR_EDGE_ID_DCORE0,
XBAR_EDGE_ID_DCORE1,
XBAR_EDGE_ID_DCORE2,
XBAR_EDGE_ID_DCORE3,
XBAR_EDGE_ID_SIZE
};
/**
* struct component_config_offsets - per cs_dbg unit - view off all related components indices
* @funnel_id: funnel id - index in debug_funnel_regs
* @etf_id: etf id - index in debug_etf_regs
* @stm_id: stm id - index in debug_stm_regs
* @spmu_id: spmu_id - index in debug_spmu_regs
* @bmon_count: number of bmons per unit
* @bmon_ids: array of bmon id (max size - MAX_BMONS_PER_UNIT) index in debug_bmon_regs
*/
struct component_config_offsets {
u32 funnel_id;
u32 etf_id;
u32 stm_id;
u32 spmu_id;
u32 bmon_count;
u32 bmon_ids[MAX_BMONS_PER_UNIT];
};
static u64 debug_stm_regs[GAUDI2_STM_LAST + 1] = {
[GAUDI2_STM_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_STM_BASE,
[GAUDI2_STM_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_STM_BASE,
[GAUDI2_STM_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_STM_BASE,
[GAUDI2_STM_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_STM_BASE,
[GAUDI2_STM_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_STM_BASE,
[GAUDI2_STM_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_STM_BASE,
[GAUDI2_STM_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_STM_BASE,
[GAUDI2_STM_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_STM_BASE,
[GAUDI2_STM_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_STM_BASE,
[GAUDI2_STM_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_STM_BASE,
[GAUDI2_STM_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_STM_BASE,
[GAUDI2_STM_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_STM_BASE,
[GAUDI2_STM_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_STM_BASE,
[GAUDI2_STM_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_STM_BASE,
[GAUDI2_STM_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_STM_BASE,
[GAUDI2_STM_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_STM_BASE,
[GAUDI2_STM_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_STM_BASE,
[GAUDI2_STM_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_STM_BASE,
[GAUDI2_STM_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_STM_BASE,
[GAUDI2_STM_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_STM_BASE,
[GAUDI2_STM_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_STM_BASE,
[GAUDI2_STM_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_STM_BASE,
[GAUDI2_STM_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_STM_BASE,
[GAUDI2_STM_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_STM_BASE,
[GAUDI2_STM_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_STM_BASE,
[GAUDI2_STM_DCORE0_HMMU0_CS] = mmDCORE0_HMMU0_CS_STM_BASE,
[GAUDI2_STM_DCORE0_HMMU1_CS] = mmDCORE0_HMMU1_CS_STM_BASE,
[GAUDI2_STM_DCORE0_HMMU2_CS] = mmDCORE0_HMMU2_CS_STM_BASE,
[GAUDI2_STM_DCORE0_HMMU3_CS] = mmDCORE0_HMMU3_CS_STM_BASE,
[GAUDI2_STM_DCORE0_MME_CTRL] = mmDCORE0_MME_CTRL_STM_BASE,
[GAUDI2_STM_DCORE0_MME_SBTE0] = mmDCORE0_MME_SBTE0_STM_BASE,
[GAUDI2_STM_DCORE0_MME_SBTE1] = mmDCORE0_MME_SBTE1_STM_BASE,
[GAUDI2_STM_DCORE0_MME_SBTE2] = mmDCORE0_MME_SBTE2_STM_BASE,
[GAUDI2_STM_DCORE0_MME_SBTE3] = mmDCORE0_MME_SBTE3_STM_BASE,
[GAUDI2_STM_DCORE0_MME_SBTE4] = mmDCORE0_MME_SBTE4_STM_BASE,
[GAUDI2_STM_DCORE0_MME_ACC] = mmDCORE0_MME_ACC_STM_BASE,
[GAUDI2_STM_DCORE0_SM] = mmDCORE0_SM_STM_BASE,
[GAUDI2_STM_DCORE0_EDMA0_CS] = mmDCORE0_EDMA0_CS_STM_BASE,
[GAUDI2_STM_DCORE0_EDMA1_CS] = mmDCORE0_EDMA1_CS_STM_BASE,
[GAUDI2_STM_DCORE0_VDEC0_CS] = mmDCORE0_VDEC0_CS_STM_BASE,
[GAUDI2_STM_DCORE0_VDEC1_CS] = mmDCORE0_VDEC1_CS_STM_BASE,
[GAUDI2_STM_DCORE1_HMMU0_CS] = mmDCORE1_HMMU0_CS_STM_BASE,
[GAUDI2_STM_DCORE1_HMMU1_CS] = mmDCORE1_HMMU1_CS_STM_BASE,
[GAUDI2_STM_DCORE1_HMMU2_CS] = mmDCORE1_HMMU2_CS_STM_BASE,
[GAUDI2_STM_DCORE1_HMMU3_CS] = mmDCORE1_HMMU3_CS_STM_BASE,
[GAUDI2_STM_DCORE1_MME_CTRL] = mmDCORE1_MME_CTRL_STM_BASE,
[GAUDI2_STM_DCORE1_MME_SBTE0] = mmDCORE1_MME_SBTE0_STM_BASE,
[GAUDI2_STM_DCORE1_MME_SBTE1] = mmDCORE1_MME_SBTE1_STM_BASE,
[GAUDI2_STM_DCORE1_MME_SBTE2] = mmDCORE1_MME_SBTE2_STM_BASE,
[GAUDI2_STM_DCORE1_MME_SBTE3] = mmDCORE1_MME_SBTE3_STM_BASE,
[GAUDI2_STM_DCORE1_MME_SBTE4] = mmDCORE1_MME_SBTE4_STM_BASE,
[GAUDI2_STM_DCORE1_MME_ACC] = mmDCORE1_MME_ACC_STM_BASE,
[GAUDI2_STM_DCORE1_SM] = mmDCORE1_SM_STM_BASE,
[GAUDI2_STM_DCORE1_EDMA0_CS] = mmDCORE1_EDMA0_CS_STM_BASE,
[GAUDI2_STM_DCORE1_EDMA1_CS] = mmDCORE1_EDMA1_CS_STM_BASE,
[GAUDI2_STM_DCORE1_VDEC0_CS] = mmDCORE1_VDEC0_CS_STM_BASE,
[GAUDI2_STM_DCORE1_VDEC1_CS] = mmDCORE1_VDEC1_CS_STM_BASE,
[GAUDI2_STM_DCORE2_HMMU0_CS] = mmDCORE2_HMMU0_CS_STM_BASE,
[GAUDI2_STM_DCORE2_HMMU1_CS] = mmDCORE2_HMMU1_CS_STM_BASE,
[GAUDI2_STM_DCORE2_HMMU2_CS] = mmDCORE2_HMMU2_CS_STM_BASE,
[GAUDI2_STM_DCORE2_HMMU3_CS] = mmDCORE2_HMMU3_CS_STM_BASE,
[GAUDI2_STM_DCORE2_MME_CTRL] = mmDCORE2_MME_CTRL_STM_BASE,
[GAUDI2_STM_DCORE2_MME_SBTE0] = mmDCORE2_MME_SBTE0_STM_BASE,
[GAUDI2_STM_DCORE2_MME_SBTE1] = mmDCORE2_MME_SBTE1_STM_BASE,
[GAUDI2_STM_DCORE2_MME_SBTE2] = mmDCORE2_MME_SBTE2_STM_BASE,
[GAUDI2_STM_DCORE2_MME_SBTE3] = mmDCORE2_MME_SBTE3_STM_BASE,
[GAUDI2_STM_DCORE2_MME_SBTE4] = mmDCORE2_MME_SBTE4_STM_BASE,
[GAUDI2_STM_DCORE2_MME_ACC] = mmDCORE2_MME_ACC_STM_BASE,
[GAUDI2_STM_DCORE2_SM] = mmDCORE2_SM_STM_BASE,
[GAUDI2_STM_DCORE2_EDMA0_CS] = mmDCORE2_EDMA0_CS_STM_BASE,
[GAUDI2_STM_DCORE2_EDMA1_CS] = mmDCORE2_EDMA1_CS_STM_BASE,
[GAUDI2_STM_DCORE2_VDEC0_CS] = mmDCORE2_VDEC0_CS_STM_BASE,
[GAUDI2_STM_DCORE2_VDEC1_CS] = mmDCORE2_VDEC1_CS_STM_BASE,
[GAUDI2_STM_DCORE3_HMMU0_CS] = mmDCORE3_HMMU0_CS_STM_BASE,
[GAUDI2_STM_DCORE3_HMMU1_CS] = mmDCORE3_HMMU1_CS_STM_BASE,
[GAUDI2_STM_DCORE3_HMMU2_CS] = mmDCORE3_HMMU2_CS_STM_BASE,
[GAUDI2_STM_DCORE3_HMMU3_CS] = mmDCORE3_HMMU3_CS_STM_BASE,
[GAUDI2_STM_DCORE3_MME_CTRL] = mmDCORE3_MME_CTRL_STM_BASE,
[GAUDI2_STM_DCORE3_MME_SBTE0] = mmDCORE3_MME_SBTE0_STM_BASE,
[GAUDI2_STM_DCORE3_MME_SBTE1] = mmDCORE3_MME_SBTE1_STM_BASE,
[GAUDI2_STM_DCORE3_MME_SBTE2] = mmDCORE3_MME_SBTE2_STM_BASE,
[GAUDI2_STM_DCORE3_MME_SBTE3] = mmDCORE3_MME_SBTE3_STM_BASE,
[GAUDI2_STM_DCORE3_MME_SBTE4] = mmDCORE3_MME_SBTE4_STM_BASE,
[GAUDI2_STM_DCORE3_MME_ACC] = mmDCORE3_MME_ACC_STM_BASE,
[GAUDI2_STM_DCORE3_SM] = mmDCORE3_SM_STM_BASE,
[GAUDI2_STM_DCORE3_EDMA0_CS] = mmDCORE3_EDMA0_CS_STM_BASE,
[GAUDI2_STM_DCORE3_EDMA1_CS] = mmDCORE3_EDMA1_CS_STM_BASE,
[GAUDI2_STM_DCORE3_VDEC0_CS] = mmDCORE3_VDEC0_CS_STM_BASE,
[GAUDI2_STM_DCORE3_VDEC1_CS] = mmDCORE3_VDEC1_CS_STM_BASE,
[GAUDI2_STM_PCIE] = mmPCIE_STM_BASE,
[GAUDI2_STM_PSOC] = mmPSOC_STM_BASE,
[GAUDI2_STM_PSOC_ARC0_CS] = mmPSOC_ARC0_CS_STM_BASE,
[GAUDI2_STM_PSOC_ARC1_CS] = mmPSOC_ARC1_CS_STM_BASE,
[GAUDI2_STM_PDMA0_CS] = mmPDMA0_CS_STM_BASE,
[GAUDI2_STM_PDMA1_CS] = mmPDMA1_CS_STM_BASE,
[GAUDI2_STM_CPU] = mmCPU_STM_BASE,
[GAUDI2_STM_PMMU_CS] = mmPMMU_CS_STM_BASE,
[GAUDI2_STM_ROT0_CS] = mmROT0_CS_STM_BASE,
[GAUDI2_STM_ROT1_CS] = mmROT1_CS_STM_BASE,
[GAUDI2_STM_ARC_FARM_CS] = mmARC_FARM_CS_STM_BASE,
[GAUDI2_STM_KDMA_CS] = mmKDMA_CS_STM_BASE,
[GAUDI2_STM_PCIE_VDEC0_CS] = mmPCIE_VDEC0_CS_STM_BASE,
[GAUDI2_STM_PCIE_VDEC1_CS] = mmPCIE_VDEC1_CS_STM_BASE,
[GAUDI2_STM_HBM0_MC0_CS] = mmHBM0_MC0_CS_STM_BASE,
[GAUDI2_STM_HBM0_MC1_CS] = mmHBM0_MC1_CS_STM_BASE,
[GAUDI2_STM_HBM1_MC0_CS] = mmHBM1_MC0_CS_STM_BASE,
[GAUDI2_STM_HBM1_MC1_CS] = mmHBM1_MC1_CS_STM_BASE,
[GAUDI2_STM_HBM2_MC0_CS] = mmHBM2_MC0_CS_STM_BASE,
[GAUDI2_STM_HBM2_MC1_CS] = mmHBM2_MC1_CS_STM_BASE,
[GAUDI2_STM_HBM3_MC0_CS] = mmHBM3_MC0_CS_STM_BASE,
[GAUDI2_STM_HBM3_MC1_CS] = mmHBM3_MC1_CS_STM_BASE,
[GAUDI2_STM_HBM4_MC0_CS] = mmHBM4_MC0_CS_STM_BASE,
[GAUDI2_STM_HBM4_MC1_CS] = mmHBM4_MC1_CS_STM_BASE,
[GAUDI2_STM_HBM5_MC0_CS] = mmHBM5_MC0_CS_STM_BASE,
[GAUDI2_STM_HBM5_MC1_CS] = mmHBM5_MC1_CS_STM_BASE,
[GAUDI2_STM_NIC0_DBG_0] = mmNIC0_DBG_STM_0_BASE,
[GAUDI2_STM_NIC0_DBG_1] = mmNIC0_DBG_STM_1_BASE,
[GAUDI2_STM_NIC1_DBG_0] = mmNIC1_DBG_STM_0_BASE,
[GAUDI2_STM_NIC1_DBG_1] = mmNIC1_DBG_STM_1_BASE,
[GAUDI2_STM_NIC2_DBG_0] = mmNIC2_DBG_STM_0_BASE,
[GAUDI2_STM_NIC2_DBG_1] = mmNIC2_DBG_STM_1_BASE,
[GAUDI2_STM_NIC3_DBG_0] = mmNIC3_DBG_STM_0_BASE,
[GAUDI2_STM_NIC3_DBG_1] = mmNIC3_DBG_STM_1_BASE,
[GAUDI2_STM_NIC4_DBG_0] = mmNIC4_DBG_STM_0_BASE,
[GAUDI2_STM_NIC4_DBG_1] = mmNIC4_DBG_STM_1_BASE,
[GAUDI2_STM_NIC5_DBG_0] = mmNIC5_DBG_STM_0_BASE,
[GAUDI2_STM_NIC5_DBG_1] = mmNIC5_DBG_STM_1_BASE,
[GAUDI2_STM_NIC6_DBG_0] = mmNIC6_DBG_STM_0_BASE,
[GAUDI2_STM_NIC6_DBG_1] = mmNIC6_DBG_STM_1_BASE,
[GAUDI2_STM_NIC7_DBG_0] = mmNIC7_DBG_STM_0_BASE,
[GAUDI2_STM_NIC7_DBG_1] = mmNIC7_DBG_STM_1_BASE,
[GAUDI2_STM_NIC8_DBG_0] = mmNIC8_DBG_STM_0_BASE,
[GAUDI2_STM_NIC8_DBG_1] = mmNIC8_DBG_STM_1_BASE,
[GAUDI2_STM_NIC9_DBG_0] = mmNIC9_DBG_STM_0_BASE,
[GAUDI2_STM_NIC9_DBG_1] = mmNIC9_DBG_STM_1_BASE,
[GAUDI2_STM_NIC10_DBG_0] = mmNIC10_DBG_STM_0_BASE,
[GAUDI2_STM_NIC10_DBG_1] = mmNIC10_DBG_STM_1_BASE,
[GAUDI2_STM_NIC11_DBG_0] = mmNIC11_DBG_STM_0_BASE,
[GAUDI2_STM_NIC11_DBG_1] = mmNIC11_DBG_STM_1_BASE
};
static u64 debug_etf_regs[GAUDI2_ETF_LAST + 1] = {
[GAUDI2_ETF_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_ETF_BASE,
[GAUDI2_ETF_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_ETF_BASE,
[GAUDI2_ETF_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_ETF_BASE,
[GAUDI2_ETF_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_ETF_BASE,
[GAUDI2_ETF_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_ETF_BASE,
[GAUDI2_ETF_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_ETF_BASE,
[GAUDI2_ETF_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_ETF_BASE,
[GAUDI2_ETF_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_ETF_BASE,
[GAUDI2_ETF_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_ETF_BASE,
[GAUDI2_ETF_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_ETF_BASE,
[GAUDI2_ETF_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_ETF_BASE,
[GAUDI2_ETF_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_ETF_BASE,
[GAUDI2_ETF_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_ETF_BASE,
[GAUDI2_ETF_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_ETF_BASE,
[GAUDI2_ETF_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_ETF_BASE,
[GAUDI2_ETF_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_ETF_BASE,
[GAUDI2_ETF_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_ETF_BASE,
[GAUDI2_ETF_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_ETF_BASE,
[GAUDI2_ETF_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_ETF_BASE,
[GAUDI2_ETF_DCORE0_HMMU0_CS] = mmDCORE0_HMMU0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_HMMU1_CS] = mmDCORE0_HMMU1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_HMMU2_CS] = mmDCORE0_HMMU2_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_HMMU3_CS] = mmDCORE0_HMMU3_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_CTRL] = mmDCORE0_MME_CTRL_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_SBTE0] = mmDCORE0_MME_SBTE0_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_SBTE1] = mmDCORE0_MME_SBTE1_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_SBTE2] = mmDCORE0_MME_SBTE2_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_SBTE3] = mmDCORE0_MME_SBTE3_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_SBTE4] = mmDCORE0_MME_SBTE4_ETF_BASE,
[GAUDI2_ETF_DCORE0_MME_ACC] = mmDCORE0_MME_ACC_ETF_BASE,
[GAUDI2_ETF_DCORE0_SM] = mmDCORE0_SM_ETF_BASE,
[GAUDI2_ETF_DCORE0_EDMA0_CS] = mmDCORE0_EDMA0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_EDMA1_CS] = mmDCORE0_EDMA1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_VDEC0_CS] = mmDCORE0_VDEC0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE0_VDEC1_CS] = mmDCORE0_VDEC1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_HMMU0_CS] = mmDCORE1_HMMU0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_HMMU1_CS] = mmDCORE1_HMMU1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_HMMU2_CS] = mmDCORE1_HMMU2_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_HMMU3_CS] = mmDCORE1_HMMU3_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_CTRL] = mmDCORE1_MME_CTRL_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_SBTE0] = mmDCORE1_MME_SBTE0_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_SBTE1] = mmDCORE1_MME_SBTE1_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_SBTE2] = mmDCORE1_MME_SBTE2_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_SBTE3] = mmDCORE1_MME_SBTE3_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_SBTE4] = mmDCORE1_MME_SBTE4_ETF_BASE,
[GAUDI2_ETF_DCORE1_MME_ACC] = mmDCORE1_MME_ACC_ETF_BASE,
[GAUDI2_ETF_DCORE1_SM] = mmDCORE1_SM_ETF_BASE,
[GAUDI2_ETF_DCORE1_EDMA0_CS] = mmDCORE1_EDMA0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_EDMA1_CS] = mmDCORE1_EDMA1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_VDEC0_CS] = mmDCORE1_VDEC0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE1_VDEC1_CS] = mmDCORE1_VDEC1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_HMMU0_CS] = mmDCORE2_HMMU0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_HMMU1_CS] = mmDCORE2_HMMU1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_HMMU2_CS] = mmDCORE2_HMMU2_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_HMMU3_CS] = mmDCORE2_HMMU3_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_CTRL] = mmDCORE2_MME_CTRL_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_SBTE0] = mmDCORE2_MME_SBTE0_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_SBTE1] = mmDCORE2_MME_SBTE1_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_SBTE2] = mmDCORE2_MME_SBTE2_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_SBTE3] = mmDCORE2_MME_SBTE3_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_SBTE4] = mmDCORE2_MME_SBTE4_ETF_BASE,
[GAUDI2_ETF_DCORE2_MME_ACC] = mmDCORE2_MME_ACC_ETF_BASE,
[GAUDI2_ETF_DCORE2_SM] = mmDCORE2_SM_ETF_BASE,
[GAUDI2_ETF_DCORE2_EDMA0_CS] = mmDCORE2_EDMA0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_EDMA1_CS] = mmDCORE2_EDMA1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_VDEC0_CS] = mmDCORE2_VDEC0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE2_VDEC1_CS] = mmDCORE2_VDEC1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_HMMU0_CS] = mmDCORE3_HMMU0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_HMMU1_CS] = mmDCORE3_HMMU1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_HMMU2_CS] = mmDCORE3_HMMU2_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_HMMU3_CS] = mmDCORE3_HMMU3_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_CTRL] = mmDCORE3_MME_CTRL_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_SBTE0] = mmDCORE3_MME_SBTE0_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_SBTE1] = mmDCORE3_MME_SBTE1_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_SBTE2] = mmDCORE3_MME_SBTE2_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_SBTE3] = mmDCORE3_MME_SBTE3_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_SBTE4] = mmDCORE3_MME_SBTE4_ETF_BASE,
[GAUDI2_ETF_DCORE3_MME_ACC] = mmDCORE3_MME_ACC_ETF_BASE,
[GAUDI2_ETF_DCORE3_SM] = mmDCORE3_SM_ETF_BASE,
[GAUDI2_ETF_DCORE3_EDMA0_CS] = mmDCORE3_EDMA0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_EDMA1_CS] = mmDCORE3_EDMA1_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_VDEC0_CS] = mmDCORE3_VDEC0_CS_ETF_BASE,
[GAUDI2_ETF_DCORE3_VDEC1_CS] = mmDCORE3_VDEC1_CS_ETF_BASE,
[GAUDI2_ETF_PCIE] = mmPCIE_ETF_BASE,
[GAUDI2_ETF_PSOC] = mmPSOC_ETF_BASE,
[GAUDI2_ETF_PSOC_ARC0_CS] = mmPSOC_ARC0_CS_ETF_BASE,
[GAUDI2_ETF_PSOC_ARC1_CS] = mmPSOC_ARC1_CS_ETF_BASE,
[GAUDI2_ETF_PDMA0_CS] = mmPDMA0_CS_ETF_BASE,
[GAUDI2_ETF_PDMA1_CS] = mmPDMA1_CS_ETF_BASE,
[GAUDI2_ETF_CPU_0] = mmCPU_ETF_0_BASE,
[GAUDI2_ETF_CPU_1] = mmCPU_ETF_1_BASE,
[GAUDI2_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
[GAUDI2_ETF_PMMU_CS] = mmPMMU_CS_ETF_BASE,
[GAUDI2_ETF_ROT0_CS] = mmROT0_CS_ETF_BASE,
[GAUDI2_ETF_ROT1_CS] = mmROT1_CS_ETF_BASE,
[GAUDI2_ETF_ARC_FARM_CS] = mmARC_FARM_CS_ETF_BASE,
[GAUDI2_ETF_KDMA_CS] = mmKDMA_CS_ETF_BASE,
[GAUDI2_ETF_PCIE_VDEC0_CS] = mmPCIE_VDEC0_CS_ETF_BASE,
[GAUDI2_ETF_PCIE_VDEC1_CS] = mmPCIE_VDEC1_CS_ETF_BASE,
[GAUDI2_ETF_HBM0_MC0_CS] = mmHBM0_MC0_CS_ETF_BASE,
[GAUDI2_ETF_HBM0_MC1_CS] = mmHBM0_MC1_CS_ETF_BASE,
[GAUDI2_ETF_HBM1_MC0_CS] = mmHBM1_MC0_CS_ETF_BASE,
[GAUDI2_ETF_HBM1_MC1_CS] = mmHBM1_MC1_CS_ETF_BASE,
[GAUDI2_ETF_HBM2_MC0_CS] = mmHBM2_MC0_CS_ETF_BASE,
[GAUDI2_ETF_HBM2_MC1_CS] = mmHBM2_MC1_CS_ETF_BASE,
[GAUDI2_ETF_HBM3_MC0_CS] = mmHBM3_MC0_CS_ETF_BASE,
[GAUDI2_ETF_HBM3_MC1_CS] = mmHBM3_MC1_CS_ETF_BASE,
[GAUDI2_ETF_HBM4_MC0_CS] = mmHBM4_MC0_CS_ETF_BASE,
[GAUDI2_ETF_HBM4_MC1_CS] = mmHBM4_MC1_CS_ETF_BASE,
[GAUDI2_ETF_HBM5_MC0_CS] = mmHBM5_MC0_CS_ETF_BASE,
[GAUDI2_ETF_HBM5_MC1_CS] = mmHBM5_MC1_CS_ETF_BASE,
[GAUDI2_ETF_NIC0_DBG_0] = mmNIC0_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC0_DBG_1] = mmNIC0_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC1_DBG_0] = mmNIC1_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC1_DBG_1] = mmNIC1_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC2_DBG_0] = mmNIC2_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC2_DBG_1] = mmNIC2_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC3_DBG_0] = mmNIC3_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC3_DBG_1] = mmNIC3_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC4_DBG_0] = mmNIC4_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC4_DBG_1] = mmNIC4_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC5_DBG_0] = mmNIC5_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC5_DBG_1] = mmNIC5_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC6_DBG_0] = mmNIC6_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC6_DBG_1] = mmNIC6_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC7_DBG_0] = mmNIC7_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC7_DBG_1] = mmNIC7_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC8_DBG_0] = mmNIC8_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC8_DBG_1] = mmNIC8_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC9_DBG_0] = mmNIC9_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC9_DBG_1] = mmNIC9_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC10_DBG_0] = mmNIC10_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC10_DBG_1] = mmNIC10_DBG_ETF_1_BASE,
[GAUDI2_ETF_NIC11_DBG_0] = mmNIC11_DBG_ETF_0_BASE,
[GAUDI2_ETF_NIC11_DBG_1] = mmNIC11_DBG_ETF_1_BASE
};
static u64 debug_funnel_regs[GAUDI2_FUNNEL_LAST + 1] = {
[GAUDI2_FUNNEL_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_XFT] = mmDCORE0_XFT_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TFT0] = mmDCORE0_TFT0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TFT1] = mmDCORE0_TFT1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_TFT2] = mmDCORE0_TFT2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR0] = mmDCORE0_RTR0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR1] = mmDCORE0_RTR1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR2] = mmDCORE0_RTR2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR3] = mmDCORE0_RTR3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR4] = mmDCORE0_RTR4_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_MIF0] = mmDCORE0_MIF0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR5] = mmDCORE0_RTR5_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_MIF1] = mmDCORE0_MIF1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR6] = mmDCORE0_RTR6_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_MIF2] = mmDCORE0_MIF2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_RTR7] = mmDCORE0_RTR7_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_MIF3] = mmDCORE0_MIF3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_XFT] = mmDCORE1_XFT_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TFT0] = mmDCORE1_TFT0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TFT1] = mmDCORE1_TFT1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_TFT2] = mmDCORE1_TFT2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR0] = mmDCORE1_RTR0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_MIF0] = mmDCORE1_MIF0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR1] = mmDCORE1_RTR1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_MIF1] = mmDCORE1_MIF1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR2] = mmDCORE1_RTR2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_MIF2] = mmDCORE1_MIF2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR3] = mmDCORE1_RTR3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_MIF3] = mmDCORE1_MIF3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR4] = mmDCORE1_RTR4_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR5] = mmDCORE1_RTR5_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR6] = mmDCORE1_RTR6_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_RTR7] = mmDCORE1_RTR7_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_XFT] = mmDCORE2_XFT_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TFT0] = mmDCORE2_TFT0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TFT1] = mmDCORE2_TFT1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_TFT2] = mmDCORE2_TFT2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR0] = mmDCORE2_RTR0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR1] = mmDCORE2_RTR1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR2] = mmDCORE2_RTR2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR3] = mmDCORE2_RTR3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR4] = mmDCORE2_RTR4_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_MIF0] = mmDCORE2_MIF0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR5] = mmDCORE2_RTR5_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_MIF1] = mmDCORE2_MIF1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR6] = mmDCORE2_RTR6_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_MIF2] = mmDCORE2_MIF2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_RTR7] = mmDCORE2_RTR7_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_MIF3] = mmDCORE2_MIF3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_XFT] = mmDCORE3_XFT_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TFT0] = mmDCORE3_TFT0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TFT1] = mmDCORE3_TFT1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_TFT2] = mmDCORE3_TFT2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR0] = mmDCORE3_RTR0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_MIF0] = mmDCORE3_MIF0_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR1] = mmDCORE3_RTR1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_MIF1] = mmDCORE3_MIF1_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR2] = mmDCORE3_RTR2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_MIF2] = mmDCORE3_MIF2_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR3] = mmDCORE3_RTR3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_MIF3] = mmDCORE3_MIF3_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR4] = mmDCORE3_RTR4_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR5] = mmDCORE3_RTR5_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR6] = mmDCORE3_RTR6_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_RTR7] = mmDCORE3_RTR7_FUNNEL_BASE,
[GAUDI2_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
[GAUDI2_FUNNEL_PSOC_ARC0] = mmPSOC_ARC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_PSOC_ARC1] = mmPSOC_ARC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_XDMA] = mmXDMA_FUNNEL_BASE,
[GAUDI2_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
[GAUDI2_FUNNEL_PMMU] = mmPMMU_FUNNEL_BASE,
[GAUDI2_FUNNEL_PMMU_DEC] = mmPMMU_FUNNEL_DEC_BASE,
[GAUDI2_FUNNEL_DCORE0_XBAR_MID] = mmDCORE0_XBAR_MID_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE0_XBAR_EDGE] = mmDCORE0_XBAR_EDGE_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_XBAR_MID] = mmDCORE1_XBAR_MID_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE1_XBAR_EDGE] = mmDCORE1_XBAR_EDGE_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_XBAR_MID] = mmDCORE2_XBAR_MID_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE2_XBAR_EDGE] = mmDCORE2_XBAR_EDGE_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_XBAR_MID] = mmDCORE3_XBAR_MID_FUNNEL_BASE,
[GAUDI2_FUNNEL_DCORE3_XBAR_EDGE] = mmDCORE3_XBAR_EDGE_FUNNEL_BASE,
[GAUDI2_FUNNEL_ARC_FARM] = mmARC_FARM_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM0_MC0] = mmHBM0_MC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM0_MC1] = mmHBM0_MC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM1_MC0] = mmHBM1_MC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM1_MC1] = mmHBM1_MC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM2_MC0] = mmHBM2_MC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM2_MC1] = mmHBM2_MC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM3_MC0] = mmHBM3_MC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM3_MC1] = mmHBM3_MC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM4_MC0] = mmHBM4_MC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM4_MC1] = mmHBM4_MC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM5_MC0] = mmHBM5_MC0_FUNNEL_BASE,
[GAUDI2_FUNNEL_HBM5_MC1] = mmHBM5_MC1_FUNNEL_BASE,
[GAUDI2_FUNNEL_NIC0_DBG_TX] = mmNIC0_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC0_DBG_NCH] = mmNIC0_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC1_DBG_TX] = mmNIC1_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC1_DBG_NCH] = mmNIC1_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC2_DBG_TX] = mmNIC2_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC2_DBG_NCH] = mmNIC2_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC3_DBG_TX] = mmNIC3_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC3_DBG_NCH] = mmNIC3_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC4_DBG_TX] = mmNIC4_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC4_DBG_NCH] = mmNIC4_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC5_DBG_TX] = mmNIC5_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC5_DBG_NCH] = mmNIC5_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC6_DBG_TX] = mmNIC6_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC6_DBG_NCH] = mmNIC6_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC7_DBG_TX] = mmNIC7_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC7_DBG_NCH] = mmNIC7_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC8_DBG_TX] = mmNIC8_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC8_DBG_NCH] = mmNIC8_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC9_DBG_TX] = mmNIC9_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC9_DBG_NCH] = mmNIC9_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC10_DBG_TX] = mmNIC10_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC10_DBG_NCH] = mmNIC10_DBG_FUNNEL_NCH_BASE,
[GAUDI2_FUNNEL_NIC11_DBG_TX] = mmNIC11_DBG_FUNNEL_TX_BASE,
[GAUDI2_FUNNEL_NIC11_DBG_NCH] = mmNIC11_DBG_FUNNEL_NCH_BASE
};
static u64 debug_bmon_regs[GAUDI2_BMON_LAST + 1] = {
[GAUDI2_BMON_DCORE0_TPC0_EML_0] = mmDCORE0_TPC0_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC0_EML_1] = mmDCORE0_TPC0_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC0_EML_2] = mmDCORE0_TPC0_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC0_EML_3] = mmDCORE0_TPC0_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_TPC1_EML_0] = mmDCORE0_TPC1_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC1_EML_1] = mmDCORE0_TPC1_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC1_EML_2] = mmDCORE0_TPC1_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC1_EML_3] = mmDCORE0_TPC1_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_TPC2_EML_0] = mmDCORE0_TPC2_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC2_EML_1] = mmDCORE0_TPC2_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC2_EML_2] = mmDCORE0_TPC2_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC2_EML_3] = mmDCORE0_TPC2_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_TPC3_EML_0] = mmDCORE0_TPC3_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC3_EML_1] = mmDCORE0_TPC3_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC3_EML_2] = mmDCORE0_TPC3_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC3_EML_3] = mmDCORE0_TPC3_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_TPC4_EML_0] = mmDCORE0_TPC4_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC4_EML_1] = mmDCORE0_TPC4_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC4_EML_2] = mmDCORE0_TPC4_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC4_EML_3] = mmDCORE0_TPC4_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_TPC5_EML_0] = mmDCORE0_TPC5_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC5_EML_1] = mmDCORE0_TPC5_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC5_EML_2] = mmDCORE0_TPC5_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC5_EML_3] = mmDCORE0_TPC5_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_TPC6_EML_0] = mmDCORE0_TPC6_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE0_TPC6_EML_1] = mmDCORE0_TPC6_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE0_TPC6_EML_2] = mmDCORE0_TPC6_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE0_TPC6_EML_3] = mmDCORE0_TPC6_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE1_TPC0_EML_0] = mmDCORE1_TPC0_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE1_TPC0_EML_1] = mmDCORE1_TPC0_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE1_TPC0_EML_2] = mmDCORE1_TPC0_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE1_TPC0_EML_3] = mmDCORE1_TPC0_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE1_TPC1_EML_0] = mmDCORE1_TPC1_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE1_TPC1_EML_1] = mmDCORE1_TPC1_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE1_TPC1_EML_2] = mmDCORE1_TPC1_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE1_TPC1_EML_3] = mmDCORE1_TPC1_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE1_TPC2_EML_0] = mmDCORE1_TPC2_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE1_TPC2_EML_1] = mmDCORE1_TPC2_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE1_TPC2_EML_2] = mmDCORE1_TPC2_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE1_TPC2_EML_3] = mmDCORE1_TPC2_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE1_TPC3_EML_0] = mmDCORE1_TPC3_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE1_TPC3_EML_1] = mmDCORE1_TPC3_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE1_TPC3_EML_2] = mmDCORE1_TPC3_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE1_TPC3_EML_3] = mmDCORE1_TPC3_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE1_TPC4_EML_0] = mmDCORE1_TPC4_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE1_TPC4_EML_1] = mmDCORE1_TPC4_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE1_TPC4_EML_2] = mmDCORE1_TPC4_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE1_TPC4_EML_3] = mmDCORE1_TPC4_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE1_TPC5_EML_0] = mmDCORE1_TPC5_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE1_TPC5_EML_1] = mmDCORE1_TPC5_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE1_TPC5_EML_2] = mmDCORE1_TPC5_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE1_TPC5_EML_3] = mmDCORE1_TPC5_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE2_TPC0_EML_0] = mmDCORE2_TPC0_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE2_TPC0_EML_1] = mmDCORE2_TPC0_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE2_TPC0_EML_2] = mmDCORE2_TPC0_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE2_TPC0_EML_3] = mmDCORE2_TPC0_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE2_TPC1_EML_0] = mmDCORE2_TPC1_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE2_TPC1_EML_1] = mmDCORE2_TPC1_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE2_TPC1_EML_2] = mmDCORE2_TPC1_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE2_TPC1_EML_3] = mmDCORE2_TPC1_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE2_TPC2_EML_0] = mmDCORE2_TPC2_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE2_TPC2_EML_1] = mmDCORE2_TPC2_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE2_TPC2_EML_2] = mmDCORE2_TPC2_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE2_TPC2_EML_3] = mmDCORE2_TPC2_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE2_TPC3_EML_0] = mmDCORE2_TPC3_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE2_TPC3_EML_1] = mmDCORE2_TPC3_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE2_TPC3_EML_2] = mmDCORE2_TPC3_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE2_TPC3_EML_3] = mmDCORE2_TPC3_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE2_TPC4_EML_0] = mmDCORE2_TPC4_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE2_TPC4_EML_1] = mmDCORE2_TPC4_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE2_TPC4_EML_2] = mmDCORE2_TPC4_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE2_TPC4_EML_3] = mmDCORE2_TPC4_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE2_TPC5_EML_0] = mmDCORE2_TPC5_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE2_TPC5_EML_1] = mmDCORE2_TPC5_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE2_TPC5_EML_2] = mmDCORE2_TPC5_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE2_TPC5_EML_3] = mmDCORE2_TPC5_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE3_TPC0_EML_0] = mmDCORE3_TPC0_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE3_TPC0_EML_1] = mmDCORE3_TPC0_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE3_TPC0_EML_2] = mmDCORE3_TPC0_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE3_TPC0_EML_3] = mmDCORE3_TPC0_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE3_TPC1_EML_0] = mmDCORE3_TPC1_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE3_TPC1_EML_1] = mmDCORE3_TPC1_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE3_TPC1_EML_2] = mmDCORE3_TPC1_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE3_TPC1_EML_3] = mmDCORE3_TPC1_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE3_TPC2_EML_0] = mmDCORE3_TPC2_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE3_TPC2_EML_1] = mmDCORE3_TPC2_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE3_TPC2_EML_2] = mmDCORE3_TPC2_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE3_TPC2_EML_3] = mmDCORE3_TPC2_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE3_TPC3_EML_0] = mmDCORE3_TPC3_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE3_TPC3_EML_1] = mmDCORE3_TPC3_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE3_TPC3_EML_2] = mmDCORE3_TPC3_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE3_TPC3_EML_3] = mmDCORE3_TPC3_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE3_TPC4_EML_0] = mmDCORE3_TPC4_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE3_TPC4_EML_1] = mmDCORE3_TPC4_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE3_TPC4_EML_2] = mmDCORE3_TPC4_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE3_TPC4_EML_3] = mmDCORE3_TPC4_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE3_TPC5_EML_0] = mmDCORE3_TPC5_EML_BUSMON_0_BASE,
[GAUDI2_BMON_DCORE3_TPC5_EML_1] = mmDCORE3_TPC5_EML_BUSMON_1_BASE,
[GAUDI2_BMON_DCORE3_TPC5_EML_2] = mmDCORE3_TPC5_EML_BUSMON_2_BASE,
[GAUDI2_BMON_DCORE3_TPC5_EML_3] = mmDCORE3_TPC5_EML_BUSMON_3_BASE,
[GAUDI2_BMON_DCORE0_HMMU0_0] = mmDCORE0_HMMU0_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_HMMU0_1] = mmDCORE0_HMMU0_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_HMMU0_3] = mmDCORE0_HMMU0_BMON_3_BASE,
[GAUDI2_BMON_DCORE0_HMMU0_2] = mmDCORE0_HMMU0_BMON_2_BASE,
[GAUDI2_BMON_DCORE0_HMMU0_4] = mmDCORE0_HMMU0_BMON_4_BASE,
[GAUDI2_BMON_DCORE0_HMMU1_0] = mmDCORE0_HMMU1_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_HMMU1_1] = mmDCORE0_HMMU1_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_HMMU1_3] = mmDCORE0_HMMU1_BMON_3_BASE,
[GAUDI2_BMON_DCORE0_HMMU1_2] = mmDCORE0_HMMU1_BMON_2_BASE,
[GAUDI2_BMON_DCORE0_HMMU1_4] = mmDCORE0_HMMU1_BMON_4_BASE,
[GAUDI2_BMON_DCORE0_HMMU2_0] = mmDCORE0_HMMU2_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_HMMU2_1] = mmDCORE0_HMMU2_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_HMMU2_3] = mmDCORE0_HMMU2_BMON_3_BASE,
[GAUDI2_BMON_DCORE0_HMMU2_2] = mmDCORE0_HMMU2_BMON_2_BASE,
[GAUDI2_BMON_DCORE0_HMMU2_4] = mmDCORE0_HMMU2_BMON_4_BASE,
[GAUDI2_BMON_DCORE0_HMMU3_0] = mmDCORE0_HMMU3_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_HMMU3_1] = mmDCORE0_HMMU3_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_HMMU3_3] = mmDCORE0_HMMU3_BMON_3_BASE,
[GAUDI2_BMON_DCORE0_HMMU3_2] = mmDCORE0_HMMU3_BMON_2_BASE,
[GAUDI2_BMON_DCORE0_HMMU3_4] = mmDCORE0_HMMU3_BMON_4_BASE,
[GAUDI2_BMON_DCORE0_MME_CTRL_0] = mmDCORE0_MME_CTRL_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_CTRL_1] = mmDCORE0_MME_CTRL_BMON1_BASE,
[GAUDI2_BMON_DCORE0_MME_CTRL_2] = mmDCORE0_MME_CTRL_BMON2_BASE,
[GAUDI2_BMON_DCORE0_MME_CTRL_3] = mmDCORE0_MME_CTRL_BMON3_BASE,
[GAUDI2_BMON_DCORE0_MME_SBTE0_0] = mmDCORE0_MME_SBTE0_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_SBTE1_0] = mmDCORE0_MME_SBTE1_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_SBTE2_0] = mmDCORE0_MME_SBTE2_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_SBTE3_0] = mmDCORE0_MME_SBTE3_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_SBTE4_0] = mmDCORE0_MME_SBTE4_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_ACC_0] = mmDCORE0_MME_ACC_BMON0_BASE,
[GAUDI2_BMON_DCORE0_MME_ACC_1] = mmDCORE0_MME_ACC_BMON1_BASE,
[GAUDI2_BMON_DCORE0_SM] = mmDCORE0_SM_BMON_BASE,
[GAUDI2_BMON_DCORE0_SM_1] = mmDCORE0_SM_BMON1_BASE,
[GAUDI2_BMON_DCORE0_EDMA0_0] = mmDCORE0_EDMA0_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_EDMA0_1] = mmDCORE0_EDMA0_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_EDMA1_0] = mmDCORE0_EDMA1_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_EDMA1_1] = mmDCORE0_EDMA1_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_VDEC0_0] = mmDCORE0_VDEC0_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_VDEC0_1] = mmDCORE0_VDEC0_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_VDEC0_2] = mmDCORE0_VDEC0_BMON_2_BASE,
[GAUDI2_BMON_DCORE0_VDEC1_0] = mmDCORE0_VDEC1_BMON_0_BASE,
[GAUDI2_BMON_DCORE0_VDEC1_1] = mmDCORE0_VDEC1_BMON_1_BASE,
[GAUDI2_BMON_DCORE0_VDEC1_2] = mmDCORE0_VDEC1_BMON_2_BASE,
[GAUDI2_BMON_DCORE1_HMMU0_0] = mmDCORE1_HMMU0_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_HMMU0_1] = mmDCORE1_HMMU0_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_HMMU0_3] = mmDCORE1_HMMU0_BMON_3_BASE,
[GAUDI2_BMON_DCORE1_HMMU0_2] = mmDCORE1_HMMU0_BMON_2_BASE,
[GAUDI2_BMON_DCORE1_HMMU0_4] = mmDCORE1_HMMU0_BMON_4_BASE,
[GAUDI2_BMON_DCORE1_HMMU1_0] = mmDCORE1_HMMU1_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_HMMU1_1] = mmDCORE1_HMMU1_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_HMMU1_3] = mmDCORE1_HMMU1_BMON_3_BASE,
[GAUDI2_BMON_DCORE1_HMMU1_2] = mmDCORE1_HMMU1_BMON_2_BASE,
[GAUDI2_BMON_DCORE1_HMMU1_4] = mmDCORE1_HMMU1_BMON_4_BASE,
[GAUDI2_BMON_DCORE1_HMMU2_0] = mmDCORE1_HMMU2_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_HMMU2_1] = mmDCORE1_HMMU2_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_HMMU2_3] = mmDCORE1_HMMU2_BMON_3_BASE,
[GAUDI2_BMON_DCORE1_HMMU2_2] = mmDCORE1_HMMU2_BMON_2_BASE,
[GAUDI2_BMON_DCORE1_HMMU2_4] = mmDCORE1_HMMU2_BMON_4_BASE,
[GAUDI2_BMON_DCORE1_HMMU3_0] = mmDCORE1_HMMU3_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_HMMU3_1] = mmDCORE1_HMMU3_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_HMMU3_3] = mmDCORE1_HMMU3_BMON_3_BASE,
[GAUDI2_BMON_DCORE1_HMMU3_2] = mmDCORE1_HMMU3_BMON_2_BASE,
[GAUDI2_BMON_DCORE1_HMMU3_4] = mmDCORE1_HMMU3_BMON_4_BASE,
[GAUDI2_BMON_DCORE1_MME_CTRL_0] = mmDCORE1_MME_CTRL_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_CTRL_1] = mmDCORE1_MME_CTRL_BMON1_BASE,
[GAUDI2_BMON_DCORE1_MME_CTRL_2] = mmDCORE1_MME_CTRL_BMON2_BASE,
[GAUDI2_BMON_DCORE1_MME_CTRL_3] = mmDCORE1_MME_CTRL_BMON3_BASE,
[GAUDI2_BMON_DCORE1_MME_SBTE0_0] = mmDCORE1_MME_SBTE0_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_SBTE1_0] = mmDCORE1_MME_SBTE1_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_SBTE2_0] = mmDCORE1_MME_SBTE2_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_SBTE3_0] = mmDCORE1_MME_SBTE3_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_SBTE4_0] = mmDCORE1_MME_SBTE4_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_ACC_0] = mmDCORE1_MME_ACC_BMON0_BASE,
[GAUDI2_BMON_DCORE1_MME_ACC_1] = mmDCORE1_MME_ACC_BMON1_BASE,
[GAUDI2_BMON_DCORE1_SM] = mmDCORE1_SM_BMON_BASE,
[GAUDI2_BMON_DCORE1_SM_1] = mmDCORE1_SM_BMON1_BASE,
[GAUDI2_BMON_DCORE1_EDMA0_0] = mmDCORE1_EDMA0_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_EDMA0_1] = mmDCORE1_EDMA0_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_EDMA1_0] = mmDCORE1_EDMA1_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_EDMA1_1] = mmDCORE1_EDMA1_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_VDEC0_0] = mmDCORE1_VDEC0_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_VDEC0_1] = mmDCORE1_VDEC0_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_VDEC0_2] = mmDCORE1_VDEC0_BMON_2_BASE,
[GAUDI2_BMON_DCORE1_VDEC1_0] = mmDCORE1_VDEC1_BMON_0_BASE,
[GAUDI2_BMON_DCORE1_VDEC1_1] = mmDCORE1_VDEC1_BMON_1_BASE,
[GAUDI2_BMON_DCORE1_VDEC1_2] = mmDCORE1_VDEC1_BMON_2_BASE,
[GAUDI2_BMON_DCORE2_HMMU0_0] = mmDCORE2_HMMU0_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_HMMU0_1] = mmDCORE2_HMMU0_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_HMMU0_3] = mmDCORE2_HMMU0_BMON_3_BASE,
[GAUDI2_BMON_DCORE2_HMMU0_2] = mmDCORE2_HMMU0_BMON_2_BASE,
[GAUDI2_BMON_DCORE2_HMMU0_4] = mmDCORE2_HMMU0_BMON_4_BASE,
[GAUDI2_BMON_DCORE2_HMMU1_0] = mmDCORE2_HMMU1_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_HMMU1_1] = mmDCORE2_HMMU1_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_HMMU1_3] = mmDCORE2_HMMU1_BMON_3_BASE,
[GAUDI2_BMON_DCORE2_HMMU1_2] = mmDCORE2_HMMU1_BMON_2_BASE,
[GAUDI2_BMON_DCORE2_HMMU1_4] = mmDCORE2_HMMU1_BMON_4_BASE,
[GAUDI2_BMON_DCORE2_HMMU2_0] = mmDCORE2_HMMU2_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_HMMU2_1] = mmDCORE2_HMMU2_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_HMMU2_3] = mmDCORE2_HMMU2_BMON_3_BASE,
[GAUDI2_BMON_DCORE2_HMMU2_2] = mmDCORE2_HMMU2_BMON_2_BASE,
[GAUDI2_BMON_DCORE2_HMMU2_4] = mmDCORE2_HMMU2_BMON_4_BASE,
[GAUDI2_BMON_DCORE2_HMMU3_0] = mmDCORE2_HMMU3_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_HMMU3_1] = mmDCORE2_HMMU3_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_HMMU3_3] = mmDCORE2_HMMU3_BMON_3_BASE,
[GAUDI2_BMON_DCORE2_HMMU3_2] = mmDCORE2_HMMU3_BMON_2_BASE,
[GAUDI2_BMON_DCORE2_HMMU3_4] = mmDCORE2_HMMU3_BMON_4_BASE,
[GAUDI2_BMON_DCORE2_MME_CTRL_0] = mmDCORE2_MME_CTRL_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_CTRL_1] = mmDCORE2_MME_CTRL_BMON1_BASE,
[GAUDI2_BMON_DCORE2_MME_CTRL_2] = mmDCORE2_MME_CTRL_BMON2_BASE,
[GAUDI2_BMON_DCORE2_MME_CTRL_3] = mmDCORE2_MME_CTRL_BMON3_BASE,
[GAUDI2_BMON_DCORE2_MME_SBTE0_0] = mmDCORE2_MME_SBTE0_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_SBTE1_0] = mmDCORE2_MME_SBTE1_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_SBTE2_0] = mmDCORE2_MME_SBTE2_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_SBTE3_0] = mmDCORE2_MME_SBTE3_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_SBTE4_0] = mmDCORE2_MME_SBTE4_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_ACC_0] = mmDCORE2_MME_ACC_BMON0_BASE,
[GAUDI2_BMON_DCORE2_MME_ACC_1] = mmDCORE2_MME_ACC_BMON1_BASE,
[GAUDI2_BMON_DCORE2_SM] = mmDCORE2_SM_BMON_BASE,
[GAUDI2_BMON_DCORE2_SM_1] = mmDCORE2_SM_BMON1_BASE,
[GAUDI2_BMON_DCORE2_EDMA0_0] = mmDCORE2_EDMA0_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_EDMA0_1] = mmDCORE2_EDMA0_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_EDMA1_0] = mmDCORE2_EDMA1_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_EDMA1_1] = mmDCORE2_EDMA1_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_VDEC0_0] = mmDCORE2_VDEC0_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_VDEC0_1] = mmDCORE2_VDEC0_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_VDEC0_2] = mmDCORE2_VDEC0_BMON_2_BASE,
[GAUDI2_BMON_DCORE2_VDEC1_0] = mmDCORE2_VDEC1_BMON_0_BASE,
[GAUDI2_BMON_DCORE2_VDEC1_1] = mmDCORE2_VDEC1_BMON_1_BASE,
[GAUDI2_BMON_DCORE2_VDEC1_2] = mmDCORE2_VDEC1_BMON_2_BASE,
[GAUDI2_BMON_DCORE3_HMMU0_0] = mmDCORE3_HMMU0_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_HMMU0_1] = mmDCORE3_HMMU0_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_HMMU0_3] = mmDCORE3_HMMU0_BMON_3_BASE,
[GAUDI2_BMON_DCORE3_HMMU0_2] = mmDCORE3_HMMU0_BMON_2_BASE,
[GAUDI2_BMON_DCORE3_HMMU0_4] = mmDCORE3_HMMU0_BMON_4_BASE,
[GAUDI2_BMON_DCORE3_HMMU1_0] = mmDCORE3_HMMU1_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_HMMU1_1] = mmDCORE3_HMMU1_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_HMMU1_3] = mmDCORE3_HMMU1_BMON_3_BASE,
[GAUDI2_BMON_DCORE3_HMMU1_2] = mmDCORE3_HMMU1_BMON_2_BASE,
[GAUDI2_BMON_DCORE3_HMMU1_4] = mmDCORE3_HMMU1_BMON_4_BASE,
[GAUDI2_BMON_DCORE3_HMMU2_0] = mmDCORE3_HMMU2_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_HMMU2_1] = mmDCORE3_HMMU2_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_HMMU2_3] = mmDCORE3_HMMU2_BMON_3_BASE,
[GAUDI2_BMON_DCORE3_HMMU2_2] = mmDCORE3_HMMU2_BMON_2_BASE,
[GAUDI2_BMON_DCORE3_HMMU2_4] = mmDCORE3_HMMU2_BMON_4_BASE,
[GAUDI2_BMON_DCORE3_HMMU3_0] = mmDCORE3_HMMU3_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_HMMU3_1] = mmDCORE3_HMMU3_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_HMMU3_3] = mmDCORE3_HMMU3_BMON_3_BASE,
[GAUDI2_BMON_DCORE3_HMMU3_2] = mmDCORE3_HMMU3_BMON_2_BASE,
[GAUDI2_BMON_DCORE3_HMMU3_4] = mmDCORE3_HMMU3_BMON_4_BASE,
[GAUDI2_BMON_DCORE3_MME_CTRL_0] = mmDCORE3_MME_CTRL_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_CTRL_1] = mmDCORE3_MME_CTRL_BMON1_BASE,
[GAUDI2_BMON_DCORE3_MME_CTRL_2] = mmDCORE3_MME_CTRL_BMON2_BASE,
[GAUDI2_BMON_DCORE3_MME_CTRL_3] = mmDCORE3_MME_CTRL_BMON3_BASE,
[GAUDI2_BMON_DCORE3_MME_SBTE0_0] = mmDCORE3_MME_SBTE0_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_SBTE1_0] = mmDCORE3_MME_SBTE1_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_SBTE2_0] = mmDCORE3_MME_SBTE2_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_SBTE3_0] = mmDCORE3_MME_SBTE3_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_SBTE4_0] = mmDCORE3_MME_SBTE4_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_ACC_0] = mmDCORE3_MME_ACC_BMON0_BASE,
[GAUDI2_BMON_DCORE3_MME_ACC_1] = mmDCORE3_MME_ACC_BMON1_BASE,
[GAUDI2_BMON_DCORE3_SM] = mmDCORE3_SM_BMON_BASE,
[GAUDI2_BMON_DCORE3_SM_1] = mmDCORE3_SM_BMON1_BASE,
[GAUDI2_BMON_DCORE3_EDMA0_0] = mmDCORE3_EDMA0_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_EDMA0_1] = mmDCORE3_EDMA0_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_EDMA1_0] = mmDCORE3_EDMA1_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_EDMA1_1] = mmDCORE3_EDMA1_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_VDEC0_0] = mmDCORE3_VDEC0_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_VDEC0_1] = mmDCORE3_VDEC0_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_VDEC0_2] = mmDCORE3_VDEC0_BMON_2_BASE,
[GAUDI2_BMON_DCORE3_VDEC1_0] = mmDCORE3_VDEC1_BMON_0_BASE,
[GAUDI2_BMON_DCORE3_VDEC1_1] = mmDCORE3_VDEC1_BMON_1_BASE,
[GAUDI2_BMON_DCORE3_VDEC1_2] = mmDCORE3_VDEC1_BMON_2_BASE,
[GAUDI2_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
[GAUDI2_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
[GAUDI2_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
[GAUDI2_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
[GAUDI2_BMON_PSOC_ARC0_0] = mmPSOC_ARC0_BMON_0_BASE,
[GAUDI2_BMON_PSOC_ARC0_1] = mmPSOC_ARC0_BMON_1_BASE,
[GAUDI2_BMON_PSOC_ARC1_0] = mmPSOC_ARC1_BMON_0_BASE,
[GAUDI2_BMON_PSOC_ARC1_1] = mmPSOC_ARC1_BMON_1_BASE,
[GAUDI2_BMON_PDMA0_0] = mmPDMA0_BMON_0_BASE,
[GAUDI2_BMON_PDMA0_1] = mmPDMA0_BMON_1_BASE,
[GAUDI2_BMON_PDMA1_0] = mmPDMA1_BMON_0_BASE,
[GAUDI2_BMON_PDMA1_1] = mmPDMA1_BMON_1_BASE,
[GAUDI2_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
[GAUDI2_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
[GAUDI2_BMON_PMMU_0] = mmPMMU_BMON_0_BASE,
[GAUDI2_BMON_PMMU_1] = mmPMMU_BMON_1_BASE,
[GAUDI2_BMON_PMMU_2] = mmPMMU_BMON_2_BASE,
[GAUDI2_BMON_PMMU_3] = mmPMMU_BMON_3_BASE,
[GAUDI2_BMON_PMMU_4] = mmPMMU_BMON_4_BASE,
[GAUDI2_BMON_ROT0_0] = mmROT0_BMON_0_BASE,
[GAUDI2_BMON_ROT0_1] = mmROT0_BMON_1_BASE,
[GAUDI2_BMON_ROT0_2] = mmROT0_BMON_2_BASE,
[GAUDI2_BMON_ROT0_3] = mmROT0_BMON_3_BASE,
[GAUDI2_BMON_ROT1_0] = mmROT1_BMON_0_BASE,
[GAUDI2_BMON_ROT1_1] = mmROT1_BMON_1_BASE,
[GAUDI2_BMON_ROT1_2] = mmROT1_BMON_2_BASE,
[GAUDI2_BMON_ROT1_3] = mmROT1_BMON_3_BASE,
[GAUDI2_BMON_ARC_FARM_0] = mmARC_FARM_BMON_0_BASE,
[GAUDI2_BMON_ARC_FARM_1] = mmARC_FARM_BMON_1_BASE,
[GAUDI2_BMON_ARC_FARM_2] = mmARC_FARM_BMON_2_BASE,
[GAUDI2_BMON_ARC_FARM_3] = mmARC_FARM_BMON_3_BASE,
[GAUDI2_BMON_KDMA_0] = mmKDMA_BMON_0_BASE,
[GAUDI2_BMON_KDMA_1] = mmKDMA_BMON_1_BASE,
[GAUDI2_BMON_KDMA_2] = mmKDMA_BMON_2_BASE,
[GAUDI2_BMON_KDMA_3] = mmKDMA_BMON_3_BASE,
[GAUDI2_BMON_PCIE_VDEC0_0] = mmPCIE_VDEC0_BMON_0_BASE,
[GAUDI2_BMON_PCIE_VDEC0_1] = mmPCIE_VDEC0_BMON_1_BASE,
[GAUDI2_BMON_PCIE_VDEC0_2] = mmPCIE_VDEC0_BMON_2_BASE,
[GAUDI2_BMON_PCIE_VDEC1_0] = mmPCIE_VDEC1_BMON_0_BASE,
[GAUDI2_BMON_PCIE_VDEC1_1] = mmPCIE_VDEC1_BMON_1_BASE,
[GAUDI2_BMON_PCIE_VDEC1_2] = mmPCIE_VDEC1_BMON_2_BASE,
[GAUDI2_BMON_NIC0_DBG_0_0] = mmNIC0_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC0_DBG_1_0] = mmNIC0_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC0_DBG_2_0] = mmNIC0_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC0_DBG_0_1] = mmNIC0_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC0_DBG_1_1] = mmNIC0_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC0_DBG_2_1] = mmNIC0_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC1_DBG_0_0] = mmNIC1_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC1_DBG_1_0] = mmNIC1_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC1_DBG_2_0] = mmNIC1_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC1_DBG_0_1] = mmNIC1_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC1_DBG_1_1] = mmNIC1_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC1_DBG_2_1] = mmNIC1_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC2_DBG_0_0] = mmNIC2_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC2_DBG_1_0] = mmNIC2_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC2_DBG_2_0] = mmNIC2_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC2_DBG_0_1] = mmNIC2_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC2_DBG_1_1] = mmNIC2_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC2_DBG_2_1] = mmNIC2_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC3_DBG_0_0] = mmNIC3_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC3_DBG_1_0] = mmNIC3_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC3_DBG_2_0] = mmNIC3_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC3_DBG_0_1] = mmNIC3_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC3_DBG_1_1] = mmNIC3_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC3_DBG_2_1] = mmNIC3_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC4_DBG_0_0] = mmNIC4_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC4_DBG_1_0] = mmNIC4_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC4_DBG_2_0] = mmNIC4_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC4_DBG_0_1] = mmNIC4_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC4_DBG_1_1] = mmNIC4_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC4_DBG_2_1] = mmNIC4_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC5_DBG_0_0] = mmNIC5_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC5_DBG_1_0] = mmNIC5_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC5_DBG_2_0] = mmNIC5_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC5_DBG_0_1] = mmNIC5_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC5_DBG_1_1] = mmNIC5_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC5_DBG_2_1] = mmNIC5_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC6_DBG_0_0] = mmNIC6_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC6_DBG_1_0] = mmNIC6_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC6_DBG_2_0] = mmNIC6_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC6_DBG_0_1] = mmNIC6_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC6_DBG_1_1] = mmNIC6_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC6_DBG_2_1] = mmNIC6_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC7_DBG_0_0] = mmNIC7_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC7_DBG_1_0] = mmNIC7_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC7_DBG_2_0] = mmNIC7_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC7_DBG_0_1] = mmNIC7_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC7_DBG_1_1] = mmNIC7_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC7_DBG_2_1] = mmNIC7_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC8_DBG_0_0] = mmNIC8_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC8_DBG_1_0] = mmNIC8_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC8_DBG_2_0] = mmNIC8_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC8_DBG_0_1] = mmNIC8_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC8_DBG_1_1] = mmNIC8_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC8_DBG_2_1] = mmNIC8_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC9_DBG_0_0] = mmNIC9_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC9_DBG_1_0] = mmNIC9_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC9_DBG_2_0] = mmNIC9_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC9_DBG_0_1] = mmNIC9_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC9_DBG_1_1] = mmNIC9_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC9_DBG_2_1] = mmNIC9_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC10_DBG_0_0] = mmNIC10_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC10_DBG_1_0] = mmNIC10_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC10_DBG_2_0] = mmNIC10_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC10_DBG_0_1] = mmNIC10_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC10_DBG_1_1] = mmNIC10_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC10_DBG_2_1] = mmNIC10_DBG_BMON2_1_BASE,
[GAUDI2_BMON_NIC11_DBG_0_0] = mmNIC11_DBG_BMON0_0_BASE,
[GAUDI2_BMON_NIC11_DBG_1_0] = mmNIC11_DBG_BMON1_0_BASE,
[GAUDI2_BMON_NIC11_DBG_2_0] = mmNIC11_DBG_BMON2_0_BASE,
[GAUDI2_BMON_NIC11_DBG_0_1] = mmNIC11_DBG_BMON0_1_BASE,
[GAUDI2_BMON_NIC11_DBG_1_1] = mmNIC11_DBG_BMON1_1_BASE,
[GAUDI2_BMON_NIC11_DBG_2_1] = mmNIC11_DBG_BMON2_1_BASE
};
static u64 debug_spmu_regs[GAUDI2_SPMU_LAST + 1] = {
[GAUDI2_SPMU_DCORE0_TPC0_EML] = mmDCORE0_TPC0_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_TPC1_EML] = mmDCORE0_TPC1_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_TPC2_EML] = mmDCORE0_TPC2_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_TPC3_EML] = mmDCORE0_TPC3_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_TPC4_EML] = mmDCORE0_TPC4_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_TPC5_EML] = mmDCORE0_TPC5_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_TPC6_EML] = mmDCORE0_TPC6_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_TPC0_EML] = mmDCORE1_TPC0_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_TPC1_EML] = mmDCORE1_TPC1_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_TPC2_EML] = mmDCORE1_TPC2_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_TPC3_EML] = mmDCORE1_TPC3_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_TPC4_EML] = mmDCORE1_TPC4_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_TPC5_EML] = mmDCORE1_TPC5_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_TPC0_EML] = mmDCORE2_TPC0_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_TPC1_EML] = mmDCORE2_TPC1_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_TPC2_EML] = mmDCORE2_TPC2_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_TPC3_EML] = mmDCORE2_TPC3_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_TPC4_EML] = mmDCORE2_TPC4_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_TPC5_EML] = mmDCORE2_TPC5_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_TPC0_EML] = mmDCORE3_TPC0_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_TPC1_EML] = mmDCORE3_TPC1_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_TPC2_EML] = mmDCORE3_TPC2_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_TPC3_EML] = mmDCORE3_TPC3_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_TPC4_EML] = mmDCORE3_TPC4_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_TPC5_EML] = mmDCORE3_TPC5_EML_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_HMMU0_CS] = mmDCORE0_HMMU0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_HMMU1_CS] = mmDCORE0_HMMU1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_HMMU2_CS] = mmDCORE0_HMMU2_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_HMMU3_CS] = mmDCORE0_HMMU3_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_CTRL] = mmDCORE0_MME_CTRL_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_SBTE0] = mmDCORE0_MME_SBTE0_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_SBTE1] = mmDCORE0_MME_SBTE1_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_SBTE2] = mmDCORE0_MME_SBTE2_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_SBTE3] = mmDCORE0_MME_SBTE3_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_SBTE4] = mmDCORE0_MME_SBTE4_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_MME_ACC] = mmDCORE0_MME_ACC_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_SM] = mmDCORE0_SM_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_EDMA0_CS] = mmDCORE0_EDMA0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_EDMA1_CS] = mmDCORE0_EDMA1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_VDEC0_CS] = mmDCORE0_VDEC0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE0_VDEC1_CS] = mmDCORE0_VDEC1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_HMMU0_CS] = mmDCORE1_HMMU0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_HMMU1_CS] = mmDCORE1_HMMU1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_HMMU2_CS] = mmDCORE1_HMMU2_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_HMMU3_CS] = mmDCORE1_HMMU3_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_CTRL] = mmDCORE1_MME_CTRL_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_SBTE0] = mmDCORE1_MME_SBTE0_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_SBTE1] = mmDCORE1_MME_SBTE1_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_SBTE2] = mmDCORE1_MME_SBTE2_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_SBTE3] = mmDCORE1_MME_SBTE3_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_SBTE4] = mmDCORE1_MME_SBTE4_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_MME_ACC] = mmDCORE1_MME_ACC_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_SM] = mmDCORE1_SM_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_EDMA0_CS] = mmDCORE1_EDMA0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_EDMA1_CS] = mmDCORE1_EDMA1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_VDEC0_CS] = mmDCORE1_VDEC0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE1_VDEC1_CS] = mmDCORE1_VDEC1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_HMMU0_CS] = mmDCORE2_HMMU0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_HMMU1_CS] = mmDCORE2_HMMU1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_HMMU2_CS] = mmDCORE2_HMMU2_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_HMMU3_CS] = mmDCORE2_HMMU3_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_CTRL] = mmDCORE2_MME_CTRL_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_SBTE0] = mmDCORE2_MME_SBTE0_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_SBTE1] = mmDCORE2_MME_SBTE1_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_SBTE2] = mmDCORE2_MME_SBTE2_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_SBTE3] = mmDCORE2_MME_SBTE3_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_SBTE4] = mmDCORE2_MME_SBTE4_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_MME_ACC] = mmDCORE2_MME_ACC_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_SM] = mmDCORE2_SM_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_EDMA0_CS] = mmDCORE2_EDMA0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_EDMA1_CS] = mmDCORE2_EDMA1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_VDEC0_CS] = mmDCORE2_VDEC0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE2_VDEC1_CS] = mmDCORE2_VDEC1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_HMMU0_CS] = mmDCORE3_HMMU0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_HMMU1_CS] = mmDCORE3_HMMU1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_HMMU2_CS] = mmDCORE3_HMMU2_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_HMMU3_CS] = mmDCORE3_HMMU3_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_CTRL] = mmDCORE3_MME_CTRL_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_SBTE0] = mmDCORE3_MME_SBTE0_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_SBTE1] = mmDCORE3_MME_SBTE1_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_SBTE2] = mmDCORE3_MME_SBTE2_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_SBTE3] = mmDCORE3_MME_SBTE3_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_SBTE4] = mmDCORE3_MME_SBTE4_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_MME_ACC] = mmDCORE3_MME_ACC_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_SM] = mmDCORE3_SM_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_EDMA0_CS] = mmDCORE3_EDMA0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_EDMA1_CS] = mmDCORE3_EDMA1_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_VDEC0_CS] = mmDCORE3_VDEC0_CS_SPMU_BASE,
[GAUDI2_SPMU_DCORE3_VDEC1_CS] = mmDCORE3_VDEC1_CS_SPMU_BASE,
[GAUDI2_SPMU_PCIE] = mmPCIE_SPMU_BASE,
[GAUDI2_SPMU_PSOC_ARC0_CS] = mmPSOC_ARC0_CS_SPMU_BASE,
[GAUDI2_SPMU_PSOC_ARC1_CS] = mmPSOC_ARC1_CS_SPMU_BASE,
[GAUDI2_SPMU_PDMA0_CS] = mmPDMA0_CS_SPMU_BASE,
[GAUDI2_SPMU_PDMA1_CS] = mmPDMA1_CS_SPMU_BASE,
[GAUDI2_SPMU_PMMU_CS] = mmPMMU_CS_SPMU_BASE,
[GAUDI2_SPMU_ROT0_CS] = mmROT0_CS_SPMU_BASE,
[GAUDI2_SPMU_ROT1_CS] = mmROT1_CS_SPMU_BASE,
[GAUDI2_SPMU_ARC_FARM_CS] = mmARC_FARM_CS_SPMU_BASE,
[GAUDI2_SPMU_KDMA_CS] = mmKDMA_CS_SPMU_BASE,
[GAUDI2_SPMU_PCIE_VDEC0_CS] = mmPCIE_VDEC0_CS_SPMU_BASE,
[GAUDI2_SPMU_PCIE_VDEC1_CS] = mmPCIE_VDEC1_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM0_MC0_CS] = mmHBM0_MC0_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM0_MC1_CS] = mmHBM0_MC1_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM1_MC0_CS] = mmHBM1_MC0_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM1_MC1_CS] = mmHBM1_MC1_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM2_MC0_CS] = mmHBM2_MC0_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM2_MC1_CS] = mmHBM2_MC1_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM3_MC0_CS] = mmHBM3_MC0_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM3_MC1_CS] = mmHBM3_MC1_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM4_MC0_CS] = mmHBM4_MC0_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM4_MC1_CS] = mmHBM4_MC1_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM5_MC0_CS] = mmHBM5_MC0_CS_SPMU_BASE,
[GAUDI2_SPMU_HBM5_MC1_CS] = mmHBM5_MC1_CS_SPMU_BASE,
[GAUDI2_SPMU_NIC0_DBG_0] = mmNIC0_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC0_DBG_1] = mmNIC0_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC1_DBG_0] = mmNIC1_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC1_DBG_1] = mmNIC1_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC2_DBG_0] = mmNIC2_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC2_DBG_1] = mmNIC2_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC3_DBG_0] = mmNIC3_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC3_DBG_1] = mmNIC3_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC4_DBG_0] = mmNIC4_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC4_DBG_1] = mmNIC4_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC5_DBG_0] = mmNIC5_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC5_DBG_1] = mmNIC5_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC6_DBG_0] = mmNIC6_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC6_DBG_1] = mmNIC6_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC7_DBG_0] = mmNIC7_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC7_DBG_1] = mmNIC7_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC8_DBG_0] = mmNIC8_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC8_DBG_1] = mmNIC8_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC9_DBG_0] = mmNIC9_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC9_DBG_1] = mmNIC9_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC10_DBG_0] = mmNIC10_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC10_DBG_1] = mmNIC10_DBG_SPMU_1_BASE,
[GAUDI2_SPMU_NIC11_DBG_0] = mmNIC11_DBG_SPMU_0_BASE,
[GAUDI2_SPMU_NIC11_DBG_1] = mmNIC11_DBG_SPMU_1_BASE
};
static struct component_config_offsets xbar_edge_binning_cfg_table[XBAR_EDGE_ID_SIZE] = {
[XBAR_EDGE_ID_DCORE0] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_XBAR_EDGE,
.etf_id = COMPONENT_ID_INVALID,
.stm_id = COMPONENT_ID_INVALID,
.spmu_id = COMPONENT_ID_INVALID,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[XBAR_EDGE_ID_DCORE1] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_XBAR_EDGE,
.etf_id = COMPONENT_ID_INVALID,
.stm_id = COMPONENT_ID_INVALID,
.spmu_id = COMPONENT_ID_INVALID,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[XBAR_EDGE_ID_DCORE2] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_XBAR_EDGE,
.etf_id = COMPONENT_ID_INVALID,
.stm_id = COMPONENT_ID_INVALID,
.spmu_id = COMPONENT_ID_INVALID,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[XBAR_EDGE_ID_DCORE3] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_XBAR_EDGE,
.etf_id = COMPONENT_ID_INVALID,
.stm_id = COMPONENT_ID_INVALID,
.spmu_id = COMPONENT_ID_INVALID,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
};
static struct component_config_offsets hmmu_binning_cfg_table[HMMU_ID_SIZE] = {
[HMMU_ID_DCORE0_HMMU0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_HMMU0_CS,
.stm_id = GAUDI2_STM_DCORE0_HMMU0_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_HMMU0_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE0_HMMU0_0,
GAUDI2_BMON_DCORE0_HMMU0_1,
GAUDI2_BMON_DCORE0_HMMU0_2,
GAUDI2_BMON_DCORE0_HMMU0_3,
GAUDI2_BMON_DCORE0_HMMU0_4,
}
},
[HMMU_ID_DCORE0_HMMU1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_HMMU1_CS,
.stm_id = GAUDI2_STM_DCORE0_HMMU1_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_HMMU1_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE0_HMMU1_0,
GAUDI2_BMON_DCORE0_HMMU1_1,
GAUDI2_BMON_DCORE0_HMMU1_2,
GAUDI2_BMON_DCORE0_HMMU1_3,
GAUDI2_BMON_DCORE0_HMMU1_4,
}
},
[HMMU_ID_DCORE0_HMMU2] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_HMMU2_CS,
.stm_id = GAUDI2_STM_DCORE0_HMMU2_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_HMMU2_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE0_HMMU2_0,
GAUDI2_BMON_DCORE0_HMMU2_1,
GAUDI2_BMON_DCORE0_HMMU2_2,
GAUDI2_BMON_DCORE0_HMMU2_3,
GAUDI2_BMON_DCORE0_HMMU2_4,
}
},
[HMMU_ID_DCORE0_HMMU3] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_HMMU3_CS,
.stm_id = GAUDI2_STM_DCORE0_HMMU3_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_HMMU3_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE0_HMMU3_0,
GAUDI2_BMON_DCORE0_HMMU3_1,
GAUDI2_BMON_DCORE0_HMMU3_2,
GAUDI2_BMON_DCORE0_HMMU3_3,
GAUDI2_BMON_DCORE0_HMMU3_4,
}
},
[HMMU_ID_DCORE1_HMMU0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_HMMU0_CS,
.stm_id = GAUDI2_STM_DCORE1_HMMU0_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_HMMU0_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE1_HMMU0_0,
GAUDI2_BMON_DCORE1_HMMU0_1,
GAUDI2_BMON_DCORE1_HMMU0_2,
GAUDI2_BMON_DCORE1_HMMU0_3,
GAUDI2_BMON_DCORE1_HMMU0_4,
}
},
[HMMU_ID_DCORE1_HMMU1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_HMMU1_CS,
.stm_id = GAUDI2_STM_DCORE1_HMMU1_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_HMMU1_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE1_HMMU1_0,
GAUDI2_BMON_DCORE1_HMMU1_1,
GAUDI2_BMON_DCORE1_HMMU1_2,
GAUDI2_BMON_DCORE1_HMMU1_3,
GAUDI2_BMON_DCORE1_HMMU1_4,
}
},
[HMMU_ID_DCORE1_HMMU2] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_HMMU2_CS,
.stm_id = GAUDI2_STM_DCORE1_HMMU2_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_HMMU2_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE1_HMMU2_0,
GAUDI2_BMON_DCORE1_HMMU2_1,
GAUDI2_BMON_DCORE1_HMMU2_2,
GAUDI2_BMON_DCORE1_HMMU2_3,
GAUDI2_BMON_DCORE1_HMMU2_4,
}
},
[HMMU_ID_DCORE1_HMMU3] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_HMMU3_CS,
.stm_id = GAUDI2_STM_DCORE1_HMMU3_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_HMMU3_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE1_HMMU3_0,
GAUDI2_BMON_DCORE1_HMMU3_1,
GAUDI2_BMON_DCORE1_HMMU3_2,
GAUDI2_BMON_DCORE1_HMMU3_3,
GAUDI2_BMON_DCORE1_HMMU3_4,
}
},
[HMMU_ID_DCORE2_HMMU0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_HMMU0_CS,
.stm_id = GAUDI2_STM_DCORE2_HMMU0_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_HMMU0_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE2_HMMU0_0,
GAUDI2_BMON_DCORE2_HMMU0_1,
GAUDI2_BMON_DCORE2_HMMU0_2,
GAUDI2_BMON_DCORE2_HMMU0_3,
GAUDI2_BMON_DCORE2_HMMU0_4,
}
},
[HMMU_ID_DCORE2_HMMU1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_HMMU1_CS,
.stm_id = GAUDI2_STM_DCORE2_HMMU1_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_HMMU1_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE2_HMMU1_0,
GAUDI2_BMON_DCORE2_HMMU1_1,
GAUDI2_BMON_DCORE2_HMMU1_2,
GAUDI2_BMON_DCORE2_HMMU1_3,
GAUDI2_BMON_DCORE2_HMMU1_4,
}
},
[HMMU_ID_DCORE2_HMMU2] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_HMMU2_CS,
.stm_id = GAUDI2_STM_DCORE2_HMMU2_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_HMMU2_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE2_HMMU2_0,
GAUDI2_BMON_DCORE2_HMMU2_1,
GAUDI2_BMON_DCORE2_HMMU2_2,
GAUDI2_BMON_DCORE2_HMMU2_3,
GAUDI2_BMON_DCORE2_HMMU2_4,
}
},
[HMMU_ID_DCORE2_HMMU3] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_HMMU3_CS,
.stm_id = GAUDI2_STM_DCORE2_HMMU3_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_HMMU3_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE2_HMMU3_0,
GAUDI2_BMON_DCORE2_HMMU3_1,
GAUDI2_BMON_DCORE2_HMMU3_2,
GAUDI2_BMON_DCORE2_HMMU3_3,
GAUDI2_BMON_DCORE2_HMMU3_4,
}
},
[HMMU_ID_DCORE3_HMMU0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_HMMU0_CS,
.stm_id = GAUDI2_STM_DCORE3_HMMU0_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_HMMU0_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE3_HMMU0_0,
GAUDI2_BMON_DCORE3_HMMU0_1,
GAUDI2_BMON_DCORE3_HMMU0_2,
GAUDI2_BMON_DCORE3_HMMU0_3,
GAUDI2_BMON_DCORE3_HMMU0_4,
}
},
[HMMU_ID_DCORE3_HMMU1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_HMMU1_CS,
.stm_id = GAUDI2_STM_DCORE3_HMMU1_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_HMMU1_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE3_HMMU1_0,
GAUDI2_BMON_DCORE3_HMMU1_1,
GAUDI2_BMON_DCORE3_HMMU1_2,
GAUDI2_BMON_DCORE3_HMMU1_3,
GAUDI2_BMON_DCORE3_HMMU1_4,
}
},
[HMMU_ID_DCORE3_HMMU2] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_HMMU2_CS,
.stm_id = GAUDI2_STM_DCORE3_HMMU2_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_HMMU2_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE3_HMMU2_0,
GAUDI2_BMON_DCORE3_HMMU2_1,
GAUDI2_BMON_DCORE3_HMMU2_2,
GAUDI2_BMON_DCORE3_HMMU2_3,
GAUDI2_BMON_DCORE3_HMMU2_4,
}
},
[HMMU_ID_DCORE3_HMMU3] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_HMMU3_CS,
.stm_id = GAUDI2_STM_DCORE3_HMMU3_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_HMMU3_CS,
.bmon_count = 5,
.bmon_ids = {
GAUDI2_BMON_DCORE3_HMMU3_0,
GAUDI2_BMON_DCORE3_HMMU3_1,
GAUDI2_BMON_DCORE3_HMMU3_2,
GAUDI2_BMON_DCORE3_HMMU3_3,
GAUDI2_BMON_DCORE3_HMMU3_4,
}
},
};
static struct component_config_offsets hbm_mc0_binning_cfg_table[HBM_ID_SIZE] = {
[HBM_ID0] = {
.funnel_id = GAUDI2_FUNNEL_HBM0_MC0,
.etf_id = GAUDI2_ETF_HBM0_MC0_CS,
.stm_id = GAUDI2_STM_HBM0_MC0_CS,
.spmu_id = GAUDI2_SPMU_HBM0_MC0_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID1] = {
.funnel_id = GAUDI2_FUNNEL_HBM1_MC0,
.etf_id = GAUDI2_ETF_HBM1_MC0_CS,
.stm_id = GAUDI2_STM_HBM1_MC0_CS,
.spmu_id = GAUDI2_SPMU_HBM1_MC0_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID2] = {
.funnel_id = GAUDI2_FUNNEL_HBM2_MC0,
.etf_id = GAUDI2_ETF_HBM2_MC0_CS,
.stm_id = GAUDI2_STM_HBM2_MC0_CS,
.spmu_id = GAUDI2_SPMU_HBM2_MC0_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID3] = {
.funnel_id = GAUDI2_FUNNEL_HBM3_MC0,
.etf_id = GAUDI2_ETF_HBM3_MC0_CS,
.stm_id = GAUDI2_STM_HBM3_MC0_CS,
.spmu_id = GAUDI2_SPMU_HBM3_MC0_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID4] = {
.funnel_id = GAUDI2_FUNNEL_HBM4_MC0,
.etf_id = GAUDI2_ETF_HBM4_MC0_CS,
.stm_id = GAUDI2_STM_HBM4_MC0_CS,
.spmu_id = GAUDI2_SPMU_HBM4_MC0_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID5] = {
.funnel_id = GAUDI2_FUNNEL_HBM5_MC0,
.etf_id = GAUDI2_ETF_HBM5_MC0_CS,
.stm_id = GAUDI2_STM_HBM5_MC0_CS,
.spmu_id = GAUDI2_SPMU_HBM5_MC0_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
};
static struct component_config_offsets hbm_mc1_binning_cfg_table[HBM_ID_SIZE] = {
[HBM_ID0] = {
.funnel_id = GAUDI2_FUNNEL_HBM0_MC1,
.etf_id = GAUDI2_ETF_HBM0_MC1_CS,
.stm_id = GAUDI2_STM_HBM0_MC1_CS,
.spmu_id = GAUDI2_SPMU_HBM0_MC1_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID1] = {
.funnel_id = GAUDI2_FUNNEL_HBM1_MC1,
.etf_id = GAUDI2_ETF_HBM1_MC1_CS,
.stm_id = GAUDI2_STM_HBM1_MC1_CS,
.spmu_id = GAUDI2_SPMU_HBM1_MC1_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID2] = {
.funnel_id = GAUDI2_FUNNEL_HBM2_MC1,
.etf_id = GAUDI2_ETF_HBM2_MC1_CS,
.stm_id = GAUDI2_STM_HBM2_MC1_CS,
.spmu_id = GAUDI2_SPMU_HBM2_MC1_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID3] = {
.funnel_id = GAUDI2_FUNNEL_HBM3_MC1,
.etf_id = GAUDI2_ETF_HBM3_MC1_CS,
.stm_id = GAUDI2_STM_HBM3_MC1_CS,
.spmu_id = GAUDI2_SPMU_HBM3_MC1_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID4] = {
.funnel_id = GAUDI2_FUNNEL_HBM4_MC1,
.etf_id = GAUDI2_ETF_HBM4_MC1_CS,
.stm_id = GAUDI2_STM_HBM4_MC1_CS,
.spmu_id = GAUDI2_SPMU_HBM4_MC1_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
[HBM_ID5] = {
.funnel_id = GAUDI2_FUNNEL_HBM5_MC1,
.etf_id = GAUDI2_ETF_HBM5_MC1_CS,
.stm_id = GAUDI2_STM_HBM5_MC1_CS,
.spmu_id = GAUDI2_SPMU_HBM5_MC1_CS,
.bmon_count = 0,
.bmon_ids = {COMPONENT_ID_INVALID}
},
};
static struct component_config_offsets decoder_binning_cfg_table[DEC_ID_SIZE] = {
[DEC_ID_DCORE0_DEC0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_VDEC0_CS,
.stm_id = GAUDI2_STM_DCORE0_VDEC0_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_VDEC0_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE0_VDEC0_0,
GAUDI2_BMON_DCORE0_VDEC0_1,
GAUDI2_BMON_DCORE0_VDEC0_2,
}
},
[DEC_ID_DCORE0_DEC1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_VDEC1_CS,
.stm_id = GAUDI2_STM_DCORE0_VDEC1_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_VDEC1_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE0_VDEC1_0,
GAUDI2_BMON_DCORE0_VDEC1_1,
GAUDI2_BMON_DCORE0_VDEC1_2,
}
},
[DEC_ID_DCORE1_DEC0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_VDEC0_CS,
.stm_id = GAUDI2_STM_DCORE1_VDEC0_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_VDEC0_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE1_VDEC0_0,
GAUDI2_BMON_DCORE1_VDEC0_1,
GAUDI2_BMON_DCORE1_VDEC0_2,
}
},
[DEC_ID_DCORE1_DEC1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_VDEC1_CS,
.stm_id = GAUDI2_STM_DCORE1_VDEC1_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_VDEC1_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE1_VDEC1_0,
GAUDI2_BMON_DCORE1_VDEC1_1,
GAUDI2_BMON_DCORE1_VDEC1_2,
}
},
[DEC_ID_DCORE2_DEC0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_VDEC0_CS,
.stm_id = GAUDI2_STM_DCORE2_VDEC0_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_VDEC0_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE2_VDEC0_0,
GAUDI2_BMON_DCORE2_VDEC0_1,
GAUDI2_BMON_DCORE2_VDEC0_2,
}
},
[DEC_ID_DCORE2_DEC1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_VDEC1_CS,
.stm_id = GAUDI2_STM_DCORE2_VDEC1_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_VDEC1_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE2_VDEC1_0,
GAUDI2_BMON_DCORE2_VDEC1_1,
GAUDI2_BMON_DCORE2_VDEC1_2,
}
},
[DEC_ID_DCORE3_DEC0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_VDEC0_CS,
.stm_id = GAUDI2_STM_DCORE3_VDEC0_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_VDEC0_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE3_VDEC0_0,
GAUDI2_BMON_DCORE3_VDEC0_1,
GAUDI2_BMON_DCORE3_VDEC0_2,
}
},
[DEC_ID_DCORE3_DEC1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_VDEC1_CS,
.stm_id = GAUDI2_STM_DCORE3_VDEC1_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_VDEC1_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_DCORE3_VDEC1_0,
GAUDI2_BMON_DCORE3_VDEC1_1,
GAUDI2_BMON_DCORE3_VDEC1_2,
}
},
[DEC_ID_PCIE_VDEC0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_PCIE_VDEC0_CS,
.stm_id = GAUDI2_STM_PCIE_VDEC0_CS,
.spmu_id = GAUDI2_SPMU_PCIE_VDEC0_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_PCIE_VDEC0_0,
GAUDI2_BMON_PCIE_VDEC0_1,
GAUDI2_BMON_PCIE_VDEC0_2,
}
},
[DEC_ID_PCIE_VDEC1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_PCIE_VDEC1_CS,
.stm_id = GAUDI2_STM_PCIE_VDEC1_CS,
.spmu_id = GAUDI2_SPMU_PCIE_VDEC1_CS,
.bmon_count = 3,
.bmon_ids = {
GAUDI2_BMON_PCIE_VDEC1_0,
GAUDI2_BMON_PCIE_VDEC1_1,
GAUDI2_BMON_PCIE_VDEC1_2,
}
},
};
static struct component_config_offsets edma_binning_cfg_table[EDMA_ID_SIZE] = {
[EDMA_ID_DCORE0_INSTANCE0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_EDMA0_CS,
.stm_id = GAUDI2_STM_DCORE0_EDMA0_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_EDMA0_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE0_EDMA0_0,
GAUDI2_BMON_DCORE0_EDMA0_1,
}
},
[EDMA_ID_DCORE0_INSTANCE1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE0_EDMA1_CS,
.stm_id = GAUDI2_STM_DCORE0_EDMA1_CS,
.spmu_id = GAUDI2_SPMU_DCORE0_EDMA1_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE0_EDMA1_0,
GAUDI2_BMON_DCORE0_EDMA1_1,
}
},
[EDMA_ID_DCORE1_INSTANCE0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_EDMA0_CS,
.stm_id = GAUDI2_STM_DCORE1_EDMA0_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_EDMA0_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE1_EDMA0_0,
GAUDI2_BMON_DCORE1_EDMA0_1,
}
},
[EDMA_ID_DCORE1_INSTANCE1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE1_EDMA1_CS,
.stm_id = GAUDI2_STM_DCORE1_EDMA1_CS,
.spmu_id = GAUDI2_SPMU_DCORE1_EDMA1_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE1_EDMA1_0,
GAUDI2_BMON_DCORE1_EDMA1_1,
}
},
[EDMA_ID_DCORE2_INSTANCE0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_EDMA0_CS,
.stm_id = GAUDI2_STM_DCORE2_EDMA0_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_EDMA0_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE2_EDMA0_0,
GAUDI2_BMON_DCORE2_EDMA0_1,
}
},
[EDMA_ID_DCORE2_INSTANCE1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE2_EDMA1_CS,
.stm_id = GAUDI2_STM_DCORE2_EDMA1_CS,
.spmu_id = GAUDI2_SPMU_DCORE2_EDMA1_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE2_EDMA1_0,
GAUDI2_BMON_DCORE2_EDMA1_1,
}
},
[EDMA_ID_DCORE3_INSTANCE0] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_EDMA0_CS,
.stm_id = GAUDI2_STM_DCORE3_EDMA0_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_EDMA0_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE3_EDMA0_0,
GAUDI2_BMON_DCORE3_EDMA0_1,
}
},
[EDMA_ID_DCORE3_INSTANCE1] = {
.funnel_id = COMPONENT_ID_INVALID,
.etf_id = GAUDI2_ETF_DCORE3_EDMA1_CS,
.stm_id = GAUDI2_STM_DCORE3_EDMA1_CS,
.spmu_id = GAUDI2_SPMU_DCORE3_EDMA1_CS,
.bmon_count = 2,
.bmon_ids = {
GAUDI2_BMON_DCORE3_EDMA1_0,
GAUDI2_BMON_DCORE3_EDMA1_1,
}
},
};
static struct component_config_offsets tpc_binning_cfg_table[TPC_ID_SIZE] = {
[TPC_ID_DCORE0_TPC0] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC0_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC0_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC0_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC0_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC0_EML_0,
GAUDI2_BMON_DCORE0_TPC0_EML_1,
GAUDI2_BMON_DCORE0_TPC0_EML_2,
GAUDI2_BMON_DCORE0_TPC0_EML_3,
}
},
[TPC_ID_DCORE0_TPC1] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC1_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC1_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC1_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC1_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC1_EML_0,
GAUDI2_BMON_DCORE0_TPC1_EML_1,
GAUDI2_BMON_DCORE0_TPC1_EML_2,
GAUDI2_BMON_DCORE0_TPC1_EML_3,
}
},
[TPC_ID_DCORE0_TPC2] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC2_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC2_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC2_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC2_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC2_EML_0,
GAUDI2_BMON_DCORE0_TPC2_EML_1,
GAUDI2_BMON_DCORE0_TPC2_EML_2,
GAUDI2_BMON_DCORE0_TPC2_EML_3,
}
},
[TPC_ID_DCORE0_TPC3] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC3_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC3_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC3_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC3_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC3_EML_0,
GAUDI2_BMON_DCORE0_TPC3_EML_1,
GAUDI2_BMON_DCORE0_TPC3_EML_2,
GAUDI2_BMON_DCORE0_TPC3_EML_3,
}
},
[TPC_ID_DCORE0_TPC4] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC4_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC4_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC4_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC4_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC4_EML_0,
GAUDI2_BMON_DCORE0_TPC4_EML_1,
GAUDI2_BMON_DCORE0_TPC4_EML_2,
GAUDI2_BMON_DCORE0_TPC4_EML_3,
}
},
[TPC_ID_DCORE0_TPC5] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC5_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC5_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC5_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC5_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC5_EML_0,
GAUDI2_BMON_DCORE0_TPC5_EML_1,
GAUDI2_BMON_DCORE0_TPC5_EML_2,
GAUDI2_BMON_DCORE0_TPC5_EML_3,
}
},
[TPC_ID_DCORE1_TPC0] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_TPC0_EML,
.etf_id = GAUDI2_ETF_DCORE1_TPC0_EML,
.stm_id = GAUDI2_STM_DCORE1_TPC0_EML,
.spmu_id = GAUDI2_SPMU_DCORE1_TPC0_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE1_TPC0_EML_0,
GAUDI2_BMON_DCORE1_TPC0_EML_1,
GAUDI2_BMON_DCORE1_TPC0_EML_2,
GAUDI2_BMON_DCORE1_TPC0_EML_3,
}
},
[TPC_ID_DCORE1_TPC1] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_TPC1_EML,
.etf_id = GAUDI2_ETF_DCORE1_TPC1_EML,
.stm_id = GAUDI2_STM_DCORE1_TPC1_EML,
.spmu_id = GAUDI2_SPMU_DCORE1_TPC1_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE1_TPC1_EML_0,
GAUDI2_BMON_DCORE1_TPC1_EML_1,
GAUDI2_BMON_DCORE1_TPC1_EML_2,
GAUDI2_BMON_DCORE1_TPC1_EML_3,
}
},
[TPC_ID_DCORE1_TPC2] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_TPC2_EML,
.etf_id = GAUDI2_ETF_DCORE1_TPC2_EML,
.stm_id = GAUDI2_STM_DCORE1_TPC2_EML,
.spmu_id = GAUDI2_SPMU_DCORE1_TPC2_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE1_TPC2_EML_0,
GAUDI2_BMON_DCORE1_TPC2_EML_1,
GAUDI2_BMON_DCORE1_TPC2_EML_2,
GAUDI2_BMON_DCORE1_TPC2_EML_3,
}
},
[TPC_ID_DCORE1_TPC3] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_TPC3_EML,
.etf_id = GAUDI2_ETF_DCORE1_TPC3_EML,
.stm_id = GAUDI2_STM_DCORE1_TPC3_EML,
.spmu_id = GAUDI2_SPMU_DCORE1_TPC3_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE1_TPC3_EML_0,
GAUDI2_BMON_DCORE1_TPC3_EML_1,
GAUDI2_BMON_DCORE1_TPC3_EML_2,
GAUDI2_BMON_DCORE1_TPC3_EML_3,
}
},
[TPC_ID_DCORE1_TPC4] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_TPC4_EML,
.etf_id = GAUDI2_ETF_DCORE1_TPC4_EML,
.stm_id = GAUDI2_STM_DCORE1_TPC4_EML,
.spmu_id = GAUDI2_SPMU_DCORE1_TPC4_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE1_TPC4_EML_0,
GAUDI2_BMON_DCORE1_TPC4_EML_1,
GAUDI2_BMON_DCORE1_TPC4_EML_2,
GAUDI2_BMON_DCORE1_TPC4_EML_3,
}
},
[TPC_ID_DCORE1_TPC5] = {
.funnel_id = GAUDI2_FUNNEL_DCORE1_TPC5_EML,
.etf_id = GAUDI2_ETF_DCORE1_TPC5_EML,
.stm_id = GAUDI2_STM_DCORE1_TPC5_EML,
.spmu_id = GAUDI2_SPMU_DCORE1_TPC5_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE1_TPC5_EML_0,
GAUDI2_BMON_DCORE1_TPC5_EML_1,
GAUDI2_BMON_DCORE1_TPC5_EML_2,
GAUDI2_BMON_DCORE1_TPC5_EML_3,
}
},
[TPC_ID_DCORE2_TPC0] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_TPC0_EML,
.etf_id = GAUDI2_ETF_DCORE2_TPC0_EML,
.stm_id = GAUDI2_STM_DCORE2_TPC0_EML,
.spmu_id = GAUDI2_SPMU_DCORE2_TPC0_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE2_TPC0_EML_0,
GAUDI2_BMON_DCORE2_TPC0_EML_1,
GAUDI2_BMON_DCORE2_TPC0_EML_2,
GAUDI2_BMON_DCORE2_TPC0_EML_3,
}
},
[TPC_ID_DCORE2_TPC1] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_TPC1_EML,
.etf_id = GAUDI2_ETF_DCORE2_TPC1_EML,
.stm_id = GAUDI2_STM_DCORE2_TPC1_EML,
.spmu_id = GAUDI2_SPMU_DCORE2_TPC1_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE2_TPC1_EML_0,
GAUDI2_BMON_DCORE2_TPC1_EML_1,
GAUDI2_BMON_DCORE2_TPC1_EML_2,
GAUDI2_BMON_DCORE2_TPC1_EML_3,
}
},
[TPC_ID_DCORE2_TPC2] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_TPC2_EML,
.etf_id = GAUDI2_ETF_DCORE2_TPC2_EML,
.stm_id = GAUDI2_STM_DCORE2_TPC2_EML,
.spmu_id = GAUDI2_SPMU_DCORE2_TPC2_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE2_TPC2_EML_0,
GAUDI2_BMON_DCORE2_TPC2_EML_1,
GAUDI2_BMON_DCORE2_TPC2_EML_2,
GAUDI2_BMON_DCORE2_TPC2_EML_3,
}
},
[TPC_ID_DCORE2_TPC3] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_TPC3_EML,
.etf_id = GAUDI2_ETF_DCORE2_TPC3_EML,
.stm_id = GAUDI2_STM_DCORE2_TPC3_EML,
.spmu_id = GAUDI2_SPMU_DCORE2_TPC3_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE2_TPC3_EML_0,
GAUDI2_BMON_DCORE2_TPC3_EML_1,
GAUDI2_BMON_DCORE2_TPC3_EML_2,
GAUDI2_BMON_DCORE2_TPC3_EML_3,
}
},
[TPC_ID_DCORE2_TPC4] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_TPC4_EML,
.etf_id = GAUDI2_ETF_DCORE2_TPC4_EML,
.stm_id = GAUDI2_STM_DCORE2_TPC4_EML,
.spmu_id = GAUDI2_SPMU_DCORE2_TPC4_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE2_TPC4_EML_0,
GAUDI2_BMON_DCORE2_TPC4_EML_1,
GAUDI2_BMON_DCORE2_TPC4_EML_2,
GAUDI2_BMON_DCORE2_TPC4_EML_3,
}
},
[TPC_ID_DCORE2_TPC5] = {
.funnel_id = GAUDI2_FUNNEL_DCORE2_TPC5_EML,
.etf_id = GAUDI2_ETF_DCORE2_TPC5_EML,
.stm_id = GAUDI2_STM_DCORE2_TPC5_EML,
.spmu_id = GAUDI2_SPMU_DCORE2_TPC5_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE2_TPC5_EML_0,
GAUDI2_BMON_DCORE2_TPC5_EML_1,
GAUDI2_BMON_DCORE2_TPC5_EML_2,
GAUDI2_BMON_DCORE2_TPC5_EML_3,
}
},
[TPC_ID_DCORE3_TPC0] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_TPC0_EML,
.etf_id = GAUDI2_ETF_DCORE3_TPC0_EML,
.stm_id = GAUDI2_STM_DCORE3_TPC0_EML,
.spmu_id = GAUDI2_SPMU_DCORE3_TPC0_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE3_TPC0_EML_0,
GAUDI2_BMON_DCORE3_TPC0_EML_1,
GAUDI2_BMON_DCORE3_TPC0_EML_2,
GAUDI2_BMON_DCORE3_TPC0_EML_3,
}
},
[TPC_ID_DCORE3_TPC1] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_TPC1_EML,
.etf_id = GAUDI2_ETF_DCORE3_TPC1_EML,
.stm_id = GAUDI2_STM_DCORE3_TPC1_EML,
.spmu_id = GAUDI2_SPMU_DCORE3_TPC1_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE3_TPC1_EML_0,
GAUDI2_BMON_DCORE3_TPC1_EML_1,
GAUDI2_BMON_DCORE3_TPC1_EML_2,
GAUDI2_BMON_DCORE3_TPC1_EML_3,
}
},
[TPC_ID_DCORE3_TPC2] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_TPC2_EML,
.etf_id = GAUDI2_ETF_DCORE3_TPC2_EML,
.stm_id = GAUDI2_STM_DCORE3_TPC2_EML,
.spmu_id = GAUDI2_SPMU_DCORE3_TPC2_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE3_TPC2_EML_0,
GAUDI2_BMON_DCORE3_TPC2_EML_1,
GAUDI2_BMON_DCORE3_TPC2_EML_2,
GAUDI2_BMON_DCORE3_TPC2_EML_3,
}
},
[TPC_ID_DCORE3_TPC3] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_TPC3_EML,
.etf_id = GAUDI2_ETF_DCORE3_TPC3_EML,
.stm_id = GAUDI2_STM_DCORE3_TPC3_EML,
.spmu_id = GAUDI2_SPMU_DCORE3_TPC3_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE3_TPC3_EML_0,
GAUDI2_BMON_DCORE3_TPC3_EML_1,
GAUDI2_BMON_DCORE3_TPC3_EML_2,
GAUDI2_BMON_DCORE3_TPC3_EML_3,
}
},
[TPC_ID_DCORE3_TPC4] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_TPC4_EML,
.etf_id = GAUDI2_ETF_DCORE3_TPC4_EML,
.stm_id = GAUDI2_STM_DCORE3_TPC4_EML,
.spmu_id = GAUDI2_SPMU_DCORE3_TPC4_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE3_TPC4_EML_0,
GAUDI2_BMON_DCORE3_TPC4_EML_1,
GAUDI2_BMON_DCORE3_TPC4_EML_2,
GAUDI2_BMON_DCORE3_TPC4_EML_3,
}
},
[TPC_ID_DCORE3_TPC5] = {
.funnel_id = GAUDI2_FUNNEL_DCORE3_TPC5_EML,
.etf_id = GAUDI2_ETF_DCORE3_TPC5_EML,
.stm_id = GAUDI2_STM_DCORE3_TPC5_EML,
.spmu_id = GAUDI2_SPMU_DCORE3_TPC5_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE3_TPC5_EML_0,
GAUDI2_BMON_DCORE3_TPC5_EML_1,
GAUDI2_BMON_DCORE3_TPC5_EML_2,
GAUDI2_BMON_DCORE3_TPC5_EML_3,
}
},
[TPC_ID_DCORE0_TPC6] = {
.funnel_id = GAUDI2_FUNNEL_DCORE0_TPC6_EML,
.etf_id = GAUDI2_ETF_DCORE0_TPC6_EML,
.stm_id = GAUDI2_STM_DCORE0_TPC6_EML,
.spmu_id = GAUDI2_SPMU_DCORE0_TPC6_EML,
.bmon_count = 4,
.bmon_ids = {
GAUDI2_BMON_DCORE0_TPC6_EML_0,
GAUDI2_BMON_DCORE0_TPC6_EML_1,
GAUDI2_BMON_DCORE0_TPC6_EML_2,
GAUDI2_BMON_DCORE0_TPC6_EML_3,
}
}
};
static int gaudi2_coresight_timeout(struct hl_device *hdev, u64 addr,
int position, bool up)
{
int rc;
u32 val, timeout_usec;
if (hdev->pldm)
timeout_usec = GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC;
else
timeout_usec = CORESIGHT_TIMEOUT_USEC;
rc = hl_poll_timeout(
hdev,
addr,
val,
up ? val & BIT(position) : !(val & BIT(position)),
1000,
timeout_usec);
if (rc)
dev_err(hdev->dev,
"Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
addr, position, up);
return rc;
}
static int gaudi2_unlock_coresight_unit(struct hl_device *hdev,
const u64 base_reg)
{
int rc = 0;
WREG32(base_reg + mmCORESIGHT_UNLOCK_REGISTER_OFFSET, CORESIGHT_UNLOCK);
rc = gaudi2_coresight_timeout(hdev, base_reg + mmCORESIGHT_UNLOCK_STATUS_REGISTER_OFFSET,
1, 0);
if (rc)
dev_err(hdev->dev,
"Failed to unlock register base addr: 0x%llx , position: 1, up: 0\n",
base_reg);
return rc;
}
static int gaudi2_config_stm(struct hl_device *hdev, struct hl_debug_params *params)
{
struct hl_debug_params_stm *input;
u64 base_reg;
u32 frequency;
u32 read_reg;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
dev_err(hdev->dev, "Invalid register index in STM\n");
return -EINVAL;
}
base_reg = debug_stm_regs[params->reg_idx];
/*
* in case base reg is 0x0 we ignore this configuration
*/
if (!base_reg)
return 0;
/* check if stub component on pldm
* we check offset 0xCFC STMDMAIDR in case
* return value is 0x0 - hence stub component
*/
read_reg = RREG32(base_reg + mmSTM_STMDMAIDR_OFFSET);
if (hdev->pldm && read_reg == 0x0)
return 0;
rc = gaudi2_unlock_coresight_unit(hdev, base_reg);
if (rc)
return -EIO;
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 0x80004);
/* dummy read for pldm to flush outstanding writes */
if (hdev->pldm)
RREG32(base_reg + mmSTM_STMTCSR_OFFSET);
WREG32(base_reg + mmSTM_STMHEMCR_OFFSET, 7);
WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMHEER_OFFSET, lower_32_bits(input->he_mask));
WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 1);
WREG32(base_reg + mmSTM_STMHEER_OFFSET, upper_32_bits(input->he_mask));
WREG32(base_reg + mmSTM_STMSPTRIGCSR_OFFSET, 0x10);
WREG32(base_reg + mmSTM_STMSPSCR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMSPER_OFFSET, lower_32_bits(input->sp_mask));
WREG32(base_reg + mmSTM_STMITATBID_OFFSET, input->id);
WREG32(base_reg + mmSTM_STMHEMASTR_OFFSET, 0x80);
frequency = hdev->asic_prop.psoc_timestamp_frequency;
if (frequency == 0)
frequency = input->frequency;
WREG32(base_reg + mmSTM_STMTSFREQR_OFFSET, frequency);
WREG32(base_reg + mmSTM_STMSYNCR_OFFSET, 0x7FF);
WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 0x27 | (input->id << 16));
} else {
WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 4);
WREG32(base_reg + mmSTM_STMHEMCR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 1);
WREG32(base_reg + mmSTM_STMHEER_OFFSET, 0);
WREG32(base_reg + mmSTM_STMHETER_OFFSET, 0);
WREG32(base_reg + mmSTM_STMHEBSR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMSPTER_OFFSET, 0);
WREG32(base_reg + mmSTM_STMSPER_OFFSET, 0);
WREG32(base_reg + mmSTM_STMHEMASTR_OFFSET, 0x80);
WREG32(base_reg + mmSTM_STMSPTRIGCSR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMSPSCR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMSPMSCR_OFFSET, 0);
WREG32(base_reg + mmSTM_STMTSFREQR_OFFSET, 0);
rc = gaudi2_coresight_timeout(hdev, base_reg + mmSTM_STMTCSR_OFFSET, 23, false);
if (rc) {
dev_err(hdev->dev, "Failed to disable STM on timeout, error %d\n", rc);
return rc;
}
WREG32(base_reg + mmSTM_STMTCSR_OFFSET, 4);
}
return 0;
}
static int gaudi2_config_etf(struct hl_device *hdev, struct hl_debug_params *params)
{
struct hl_debug_params_etf *input;
u64 base_reg;
u32 read_reg;
u32 val;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
dev_err(hdev->dev, "Invalid register index in ETF\n");
return -EINVAL;
}
base_reg = debug_etf_regs[params->reg_idx];
/*
* in case base reg is 0x0 we ignore this configuration
*/
if (!base_reg)
return 0;
/* in pldm we need to check if unit is not stub
* for doing do need to read ETF STS register and check
* it is not return 0x0 - in case it does
* it means that this is stub, we ignore this and return 0
* means success
*/
read_reg = RREG32(base_reg + mmETF_STS_OFFSET);
if (hdev->pldm && read_reg == 0x0)
return 0;
rc = gaudi2_unlock_coresight_unit(hdev, base_reg);
if (rc)
return -EIO;
val = RREG32(base_reg + mmETF_FFCR_OFFSET);
val |= 0x1000;
WREG32(base_reg + mmETF_FFCR_OFFSET, val);
val |= 0x40;
WREG32(base_reg + mmETF_FFCR_OFFSET, val);
rc = gaudi2_coresight_timeout(hdev, base_reg + mmETF_FFCR_OFFSET, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETF on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
rc = gaudi2_coresight_timeout(hdev, base_reg + mmETF_STS_OFFSET, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETF on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
WREG32(base_reg + mmETF_CTL_OFFSET, 0);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + mmETF_BUFWM_OFFSET, 0x3FFC);
WREG32(base_reg + mmETF_MODE_OFFSET, input->sink_mode);
WREG32(base_reg + mmETF_FFCR_OFFSET, 0x4001);
WREG32(base_reg + mmETF_PSCR_OFFSET, 0x10);
WREG32(base_reg + mmETF_CTL_OFFSET, 1);
} else {
WREG32(base_reg + mmETF_BUFWM_OFFSET, 0);
WREG32(base_reg + mmETF_MODE_OFFSET, 0);
WREG32(base_reg + mmETF_FFCR_OFFSET, 0);
}
return 0;
}
static int gaudi2_etr_validate_address(struct hl_device *hdev, u64 addr, u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (addr > (addr + size)) {
dev_err(hdev->dev, "ETR buffer size %llu overflow\n", size);
return false;
}
if (gaudi2->hw_cap_initialized & HW_CAP_PMMU) {
if (hl_mem_area_inside_range(addr, size,
prop->pmmu.start_addr,
prop->pmmu.end_addr))
return true;
if (hl_mem_area_inside_range(addr, size,
prop->pmmu_huge.start_addr,
prop->pmmu_huge.end_addr))
return true;
if (hl_mem_area_inside_range(addr, size,
prop->dmmu.start_addr,
prop->dmmu.end_addr))
return true;
} else {
if (hl_mem_area_inside_range(addr, size,
prop->dram_user_base_address,
prop->dram_end_address))
return true;
}
if (hl_mem_area_inside_range(addr, size,
prop->sram_user_base_address,
prop->sram_end_address))
return true;
if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
dev_err(hdev->dev, "ETR buffer should be in SRAM/DRAM\n");
return false;
}
static int gaudi2_config_etr(struct hl_device *hdev, struct hl_ctx *ctx,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
u64 msb;
u32 val;
int rc;
rc = gaudi2_unlock_coresight_unit(hdev, mmPSOC_ETR_BASE);
if (rc)
return -EIO;
val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
WREG32(mmPSOC_ETR_FFCR, val);
rc = gaudi2_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
rc = gaudi2_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
if (input->buffer_size == 0) {
dev_err(hdev->dev, "ETR buffer size should be bigger than 0\n");
return -EINVAL;
}
if (!gaudi2_etr_validate_address(hdev, input->buffer_address, input->buffer_size)) {
dev_err(hdev->dev, "ETR buffer address is invalid\n");
return -EINVAL;
}
RMWREG32(mmPSOC_GLOBAL_CONF_TRACE_AWUSER, ctx->asid, MMUBP_ASID_MASK);
RMWREG32(mmPSOC_GLOBAL_CONF_TRACE_ARUSER, ctx->asid, MMUBP_ASID_MASK);
msb = upper_32_bits(input->buffer_address) >> 8;
WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
WREG32(mmPSOC_ETR_MODE, input->sink_mode);
/* write the protection bits only if security is disable */
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
/* make ETR not privileged */
val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0);
/* make ETR non-secured (inverted logic) */
val |= FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1);
/* burst size 16 */
val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, 0xF);
WREG32(mmPSOC_ETR_AXICTL, val);
}
WREG32(mmPSOC_ETR_DBALO, lower_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_DBAHI, upper_32_bits(input->buffer_address));
WREG32(mmPSOC_ETR_FFCR, 3);
WREG32(mmPSOC_ETR_PSCR, 0x10);
WREG32(mmPSOC_ETR_CTL, 1);
} else {
WREG32(mmPSOC_ETR_BUFWM, 0);
WREG32(mmPSOC_ETR_RSZ, 0x400);
WREG32(mmPSOC_ETR_DBALO, 0);
WREG32(mmPSOC_ETR_DBAHI, 0);
WREG32(mmPSOC_ETR_PSCR, 0);
WREG32(mmPSOC_ETR_MODE, 0);
WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
/*
* The trace buffer address is 64 bits wide. The end of
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
* The 24 msb of the 64-bit address are stored in a
* global configuration register.
*/
rwp = RREG32(mmPSOC_ETR_RWP);
rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
msb = RREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR);
*(u64 *) params->output = ((u64) msb << 40) | ((u64) rwphi << 32) | rwp;
}
}
return 0;
}
static int gaudi2_config_funnel(struct hl_device *hdev, struct hl_debug_params *params)
{
u64 base_reg;
u32 val = params->enable ? 0xFFF : 0;
u32 read_reg;
int rc = 0;
if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
return -EINVAL;
}
base_reg = debug_funnel_regs[params->reg_idx];
/*
* in case base reg is 0x0 we ignore this configuration
*/
if (!base_reg)
return 0;
/* in pldm we need to check if unit is not stub
* for doing so, need to read DEVID value.
* in case return 0x0 - it means that this is stub,
* we ignore this and return 0 - means success
*/
read_reg = RREG32(base_reg + mmFUNNEL_DEVID_OFFSET);
if (hdev->pldm && read_reg == 0x0)
return 0;
rc = gaudi2_unlock_coresight_unit(hdev, base_reg);
if (rc)
return -EIO;
WREG32(base_reg, val);
return 0;
}
static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *params)
{
struct hl_debug_params_bmon *input;
u64 base_reg;
u32 read_reg;
if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
dev_err(hdev->dev, "Invalid register index in BMON\n");
return -EINVAL;
}
base_reg = debug_bmon_regs[params->reg_idx];
/*
* in case base reg is 0x0 we ignore this configuration
*/
if (!base_reg)
return 0;
/* in pldm we need to check if unit is not stub
* for doing do need to read Control Register (offset 0x0) and check
* it is not return 0x0 - in case it does
* it means that this is stub, we ignore this and return 0
* means success
*/
read_reg = RREG32(base_reg + mmBMON_CR_OFFSET);
if (hdev->pldm && read_reg == 0x0)
return 0;
WREG32(base_reg + mmBMON_ATTREN_OFFSET, 1);
/* dummy read for pldm to flush outstanding writes */
if (hdev->pldm)
RREG32(base_reg + mmBMON_ATTREN_OFFSET);
/* Write Only Reset AXIMON */
WREG32(base_reg + mmBMON_RESET_OFFSET, 0x1);
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
WREG32(base_reg + mmBMON_ADDRL_S0_OFFSET, lower_32_bits(input->start_addr0));
WREG32(base_reg + mmBMON_ADDRH_S0_OFFSET, upper_32_bits(input->start_addr0));
WREG32(base_reg + mmBMON_ADDRL_E0_OFFSET, lower_32_bits(input->addr_mask0));
WREG32(base_reg + mmBMON_ADDRH_E0_OFFSET, upper_32_bits(input->addr_mask0));
WREG32(base_reg + mmBMON_ADDRL_S1_OFFSET, lower_32_bits(input->start_addr1));
WREG32(base_reg + mmBMON_ADDRH_S1_OFFSET, upper_32_bits(input->start_addr1));
WREG32(base_reg + mmBMON_ADDRL_E1_OFFSET, lower_32_bits(input->addr_mask1));
WREG32(base_reg + mmBMON_ADDRH_E1_OFFSET, upper_32_bits(input->addr_mask1));
WREG32(base_reg + mmBMON_ADDRL_S2_OFFSET, lower_32_bits(input->start_addr2));
WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, upper_32_bits(input->start_addr2));
WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, lower_32_bits(input->end_addr2));
WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, upper_32_bits(input->end_addr2));
WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr3));
WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr3));
WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr3));
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr3));
WREG32(base_reg + mmBMON_IDL_OFFSET, 0x0);
WREG32(base_reg + mmBMON_IDH_OFFSET, 0x0);
WREG32(base_reg + mmBMON_ATTREN_OFFSET, 0);
WREG32(base_reg + mmBMON_BW_WIN_OFFSET, input->bw_win);
WREG32(base_reg + mmBMON_WIN_CAPTURE_OFFSET, input->win_capture);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0x1 | (13 << 8));
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (input->id << 8));
WREG32(base_reg + mmBMON_CR_OFFSET, input->control);
} else {
WREG32(base_reg + mmBMON_ADDRL_S0_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_S0_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_E0_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_E0_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_S1_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_S1_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_E1_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_E1_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_S2_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
}
return 0;
}
static int gaudi2_config_spmu(struct hl_device *hdev, struct hl_debug_params *params)
{
struct hl_debug_params_spmu *input = params->input;
u32 output_arr_len;
u32 cycle_cnt_idx;
u32 overflow_idx;
u32 events_num;
u32 event_mask;
u64 base_reg;
u32 read_reg;
u64 *output;
int i;
if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
dev_err(hdev->dev, "Invalid register index in SPMU\n");
return -EINVAL;
}
base_reg = debug_spmu_regs[params->reg_idx];
/*
* in case base reg is 0x0 we ignore this configuration
*/
if (!base_reg)
return 0;
/* in pldm we need to check if unit is not stub
* for doing do need to read PMTRC (at offset 0x200)
* address and check if return value is 0x0 - in case it does
* it means that this is stub, we ignore this and return 0
* means success
*/
read_reg = RREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET);
if (hdev->pldm && read_reg == 0x0)
return 0;
if (params->enable) {
input = params->input;
if (!input)
return -EINVAL;
if (input->event_types_num > SPMU_MAX_COUNTERS) {
dev_err(hdev->dev, "too many event types values for SPMU enable\n");
return -EINVAL;
}
WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013046);
WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013040);
/* dummy read for pldm to flush outstanding writes */
if (hdev->pldm)
RREG32(base_reg);
for (i = 0 ; i < input->event_types_num ; i++)
WREG32(base_reg + mmSPMU_PMEVTYPER0_EL0_OFFSET + i * 4,
input->event_types[i]);
WREG32(base_reg + mmSPMU_PMTRC_OFFSET, input->pmtrc_val);
WREG32(base_reg + mmSPMU_TRC_CTRL_HOST_OFFSET, input->trc_ctrl_host_val);
WREG32(base_reg + mmSPMU_TRC_EN_HOST_OFFSET, input->trc_en_host_val);
WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013041);
/*
* set enabled events mask based on input->event_types_num
*/
event_mask = 0x80000000;
event_mask |= GENMASK(input->event_types_num, 0);
WREG32(base_reg + mmSPMU_PMCNTENSET_EL0_OFFSET, event_mask);
} else {
output = params->output;
output_arr_len = params->output_size / 8;
events_num = output_arr_len - 2;
overflow_idx = output_arr_len - 2;
cycle_cnt_idx = output_arr_len - 1;
WREG32(base_reg + mmSPMU_PMCR_EL0_OFFSET, 0x41013040);
if (output && output_arr_len > 2) {
if (events_num > SPMU_MAX_COUNTERS) {
dev_err(hdev->dev, "too many events values for SPMU disable\n");
return -EINVAL;
}
for (i = 0 ; i < events_num ; i++) {
const u64 performance_counter_offset =
base_reg + mmSPMU_PMEVCNTR0_EL0_OFFSET + (i * 8);
output[i] = RREG32(performance_counter_offset);
}
output[overflow_idx] = RREG32(base_reg + mmSPMU_PMOVSSET_EL0_OFFSET);
output[cycle_cnt_idx] = RREG32(base_reg + mmSPMU_PMCCNTR_H_EL0_OFFSET);
output[cycle_cnt_idx] <<= 32;
output[cycle_cnt_idx] |= RREG32(base_reg + mmSPMU_PMCCNTR_L_EL0_OFFSET);
}
WREG32(base_reg + mmSPMU_PMOVSSET_EL0_OFFSET, 0);
/* clean pmtrc to reset value */
WREG32(base_reg + mmSPMU_PMTRC_OFFSET, 0x100400);
}
return 0;
}
int gaudi2_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
switch (params->op) {
case HL_DEBUG_OP_STM:
rc = gaudi2_config_stm(hdev, params);
break;
case HL_DEBUG_OP_ETF:
rc = gaudi2_config_etf(hdev, params);
break;
case HL_DEBUG_OP_ETR:
rc = gaudi2_config_etr(hdev, ctx, params);
break;
case HL_DEBUG_OP_FUNNEL:
rc = gaudi2_config_funnel(hdev, params);
break;
case HL_DEBUG_OP_BMON:
rc = gaudi2_config_bmon(hdev, params);
break;
case HL_DEBUG_OP_SPMU:
rc = gaudi2_config_spmu(hdev, params);
break;
case HL_DEBUG_OP_TIMESTAMP:
/* Do nothing as this opcode is deprecated */
break;
default:
dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
return -EINVAL;
}
return rc;
}
void gaudi2_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
/* in pldm attempting to access stubbed etfs can cause problems */
if (!hdev->pldm)
for (i = GAUDI2_ETF_FIRST ; i <= GAUDI2_ETF_LAST ; i++) {
params.reg_idx = i;
rc = gaudi2_config_etf(hdev, ¶ms);
if (rc)
dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
}
rc = gaudi2_config_etr(hdev, ctx, ¶ms);
if (rc)
dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
}
static int gaudi2_coresight_set_disabled_components(struct hl_device *hdev, u32 unit_count,
u32 enabled_mask,
const struct component_config_offsets *binning_table)
{
u32 component_idx = 0;
u32 disabled_mask;
u32 full_mask;
/* in case no unit - no need to do work */
if (!unit_count)
return 0;
full_mask = GENMASK(unit_count - 1, 0);
/* set the disable bits on disabled mask */
disabled_mask = (~enabled_mask) & full_mask;
while (disabled_mask) {
u32 component_mask = 1 << component_idx;
if (component_idx >= unit_count) {
dev_err(hdev->dev, "index is out of range index(%u) >= units_count(%u)\n",
component_idx, unit_count);
return -EINVAL;
}
/*
* in case mask is set, driver need to set to 0x0
* all offsets for the following structures in the appropriate indices:
* debug_funnel_regs - offsets for all cs_dbg FUNNELs
* debug_etf_regs - offsets for all cs_dbg ETFs
* debug_stm_regs - offsets for all cs_dbg STMs
* debug_spmu_regs - offsets for all cs_dbg SPMUs
* debug_bmon_regs - offsets for all cs_dbg BMONs
* when value is set to COMPONENT_ID_INVALID -
* it means there is no such register for current component.
*/
if (disabled_mask & component_mask) {
u32 bmon_idx;
const struct component_config_offsets *binned_component =
&(binning_table[component_idx]);
if (binned_component->funnel_id != COMPONENT_ID_INVALID)
debug_funnel_regs[binned_component->funnel_id] = 0x0;
if (binned_component->etf_id != COMPONENT_ID_INVALID)
debug_etf_regs[binned_component->etf_id] = 0x0;
if (binned_component->stm_id != COMPONENT_ID_INVALID)
debug_stm_regs[binned_component->stm_id] = 0x0;
if (binned_component->spmu_id != COMPONENT_ID_INVALID)
debug_spmu_regs[binned_component->spmu_id] = 0x0;
for (bmon_idx = 0; bmon_idx < binned_component->bmon_count; bmon_idx++)
debug_bmon_regs[binned_component->bmon_ids[bmon_idx]] = 0x0;
/*
* reset enabled bit
*/
disabled_mask &= ~component_mask;
}
component_idx++;
}
return 0;
}
int gaudi2_coresight_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int ret;
/*
* Mask out all the disabled binned offsets.
* so when user request to configure a binned or masked out component,
* driver will ignore programming it ( happens when offset value is set to 0x0 )
* this is being set in gaudi2_coresight_set_disabled_components
*/
/* Set TPC disable components */
ret = gaudi2_coresight_set_disabled_components(hdev, TPC_ID_SIZE, prop->tpc_enabled_mask,
tpc_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for tpc coresight\n");
return ret;
}
/* Set decoder disable components */
ret = gaudi2_coresight_set_disabled_components(hdev, DEC_ID_SIZE,
prop->decoder_enabled_mask, decoder_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for decoder coresight\n");
return ret;
}
/* Set HBM (MC0 and MC1) disable components */
ret = gaudi2_coresight_set_disabled_components(hdev, HBM_ID_SIZE, prop->dram_enabled_mask,
hbm_mc0_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for hbm mc0 coresight\n");
return ret;
}
ret = gaudi2_coresight_set_disabled_components(hdev, HBM_ID_SIZE, prop->dram_enabled_mask,
hbm_mc1_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for hbm mc1 coresight\n");
return ret;
}
/* Set HIF_HMMU disable components */
ret = gaudi2_coresight_set_disabled_components(hdev, HMMU_ID_SIZE,
prop->hmmu_hif_enabled_mask, hmmu_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for hmmu coresight\n");
return ret;
}
/* Set XBAR_EDGE disable components */
ret = gaudi2_coresight_set_disabled_components(hdev, XBAR_EDGE_ID_SIZE,
prop->xbar_edge_enabled_mask, xbar_edge_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for xbar_edge coresight\n");
return ret;
}
/* Set EDMA disable components */
ret = gaudi2_coresight_set_disabled_components(hdev, EDMA_ID_SIZE, prop->edma_enabled_mask,
edma_binning_cfg_table);
if (ret) {
dev_err(hdev->dev, "Failed to set disabled cs_dbg units for edma coresight\n");
return ret;
}
return 0;
}
| linux-master | drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "gaudi2P.h"
#include "../include/gaudi2/asic_reg/gaudi2_regs.h"
#define UNSET_GLBL_SEC_BIT(array, b) ((array)[((b) / 32)] |= (1 << ((b) % 32)))
#define SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD_MASK
#define SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD_MASK
#define SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR_MASK
#define SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR_MASK
#define SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR_MASK
#define SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD \
PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD_MASK
#define SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR \
PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR_MASK
#define SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR \
PDMA0_CORE_SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR_MASK
/* LBW RR */
#define SFT_NUM_OF_LBW_RTR 1
#define SFT_LBW_RTR_OFFSET 0
#define RR_LBW_LONG_MASK 0x7FFFFFFull
#define RR_LBW_SHORT_MASK 0x7FFF000ull
/* HBW RR */
#define SFT_NUM_OF_HBW_RTR 2
#define RR_HBW_SHORT_LO_MASK 0xFFFFFFFF000ull
#define RR_HBW_SHORT_HI_MASK 0xF00000000000ull
#define RR_HBW_LONG_LO_MASK 0xFFFFFFFF000ull
#define RR_HBW_LONG_HI_MASK 0xFFFFF00000000000ull
struct rr_config {
u64 min;
u64 max;
u32 index;
u8 type;
};
struct gaudi2_atypical_bp_blocks {
u32 mm_block_base_addr;
u32 block_size;
u32 glbl_sec_offset;
u32 glbl_sec_length;
};
static const struct gaudi2_atypical_bp_blocks gaudi2_pb_dcr0_sm_objs = {
mmDCORE0_SYNC_MNGR_OBJS_BASE,
128 * 1024,
SM_OBJS_PROT_BITS_OFFS,
640
};
static const u32 gaudi2_pb_sft0[] = {
mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE,
mmSFT0_HBW_RTR_IF0_RTR_H3_BASE,
mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT0_HBW_RTR_IF0_ADDR_DEC_HBW_BASE,
mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE,
mmSFT0_HBW_RTR_IF1_RTR_H3_BASE,
mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT0_HBW_RTR_IF1_ADDR_DEC_HBW_BASE,
mmSFT0_LBW_RTR_IF_RTR_CTRL_BASE,
mmSFT0_LBW_RTR_IF_RTR_H3_BASE,
mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE,
mmSFT0_LBW_RTR_IF_ADDR_DEC_HBW_BASE,
mmSFT0_BASE,
};
static const u32 gaudi2_pb_dcr0_hif[] = {
mmDCORE0_HIF0_BASE,
};
static const u32 gaudi2_pb_dcr0_rtr0[] = {
mmDCORE0_RTR0_CTRL_BASE,
mmDCORE0_RTR0_H3_BASE,
mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE,
mmDCORE0_RTR0_ADD_DEC_HBW_BASE,
mmDCORE0_RTR0_BASE,
mmDCORE0_RTR0_DBG_ADDR_BASE,
};
static const u32 gaudi2_pb_dcr0_hmmu0[] = {
mmDCORE0_HMMU0_MMU_BASE,
mmDCORE0_HMMU0_MSTR_IF_RR_SHRD_HBW_BASE,
mmDCORE0_HMMU0_SCRAMB_OUT_BASE,
mmDCORE0_HMMU0_STLB_BASE,
};
static const u32 gaudi2_pb_cpu_if[] = {
mmCPU_IF_BASE,
};
static const u32 gaudi2_pb_cpu[] = {
mmCPU_CA53_CFG_BASE,
mmCPU_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_kdma[] = {
mmARC_FARM_KDMA_BASE,
mmARC_FARM_KDMA_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_pdma0[] = {
mmPDMA0_CORE_BASE,
mmPDMA0_MSTR_IF_RR_SHRD_HBW_BASE,
mmPDMA0_QM_BASE,
};
static const u32 gaudi2_pb_pdma0_arc[] = {
mmPDMA0_QM_ARC_AUX_BASE,
};
static const struct range gaudi2_pb_pdma0_arc_unsecured_regs[] = {
{mmPDMA0_QM_ARC_AUX_RUN_HALT_REQ, mmPDMA0_QM_ARC_AUX_RUN_HALT_ACK},
{mmPDMA0_QM_ARC_AUX_CLUSTER_NUM, mmPDMA0_QM_ARC_AUX_WAKE_UP_EVENT},
{mmPDMA0_QM_ARC_AUX_ARC_RST_REQ, mmPDMA0_QM_ARC_AUX_CID_OFFSET_7},
{mmPDMA0_QM_ARC_AUX_SCRATCHPAD_0, mmPDMA0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
{mmPDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmPDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
{mmPDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmPDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
{mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0, mmPDMA0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
{mmPDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT, mmPDMA0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
{mmPDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT, mmPDMA0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
};
static const u32 gaudi2_pb_pdma0_unsecured_regs[] = {
mmPDMA0_CORE_CTX_AXUSER_HB_WR_REDUCTION,
mmPDMA0_CORE_CTX_WR_COMP_ADDR_HI,
mmPDMA0_CORE_CTX_WR_COMP_ADDR_LO,
mmPDMA0_CORE_CTX_WR_COMP_WDATA,
mmPDMA0_CORE_CTX_SRC_BASE_LO,
mmPDMA0_CORE_CTX_SRC_BASE_HI,
mmPDMA0_CORE_CTX_DST_BASE_LO,
mmPDMA0_CORE_CTX_DST_BASE_HI,
mmPDMA0_CORE_CTX_SRC_TSIZE_0,
mmPDMA0_CORE_CTX_SRC_TSIZE_1,
mmPDMA0_CORE_CTX_SRC_TSIZE_2,
mmPDMA0_CORE_CTX_SRC_TSIZE_3,
mmPDMA0_CORE_CTX_SRC_TSIZE_4,
mmPDMA0_CORE_CTX_SRC_STRIDE_1,
mmPDMA0_CORE_CTX_SRC_STRIDE_2,
mmPDMA0_CORE_CTX_SRC_STRIDE_3,
mmPDMA0_CORE_CTX_SRC_STRIDE_4,
mmPDMA0_CORE_CTX_SRC_OFFSET_LO,
mmPDMA0_CORE_CTX_SRC_OFFSET_HI,
mmPDMA0_CORE_CTX_DST_TSIZE_0,
mmPDMA0_CORE_CTX_DST_TSIZE_1,
mmPDMA0_CORE_CTX_DST_TSIZE_2,
mmPDMA0_CORE_CTX_DST_TSIZE_3,
mmPDMA0_CORE_CTX_DST_TSIZE_4,
mmPDMA0_CORE_CTX_DST_STRIDE_1,
mmPDMA0_CORE_CTX_DST_STRIDE_2,
mmPDMA0_CORE_CTX_DST_STRIDE_3,
mmPDMA0_CORE_CTX_DST_STRIDE_4,
mmPDMA0_CORE_CTX_DST_OFFSET_LO,
mmPDMA0_CORE_CTX_DST_OFFSET_HI,
mmPDMA0_CORE_CTX_COMMIT,
mmPDMA0_CORE_CTX_CTRL,
mmPDMA0_CORE_CTX_TE_NUMROWS,
mmPDMA0_CORE_CTX_IDX,
mmPDMA0_CORE_CTX_IDX_INC,
mmPDMA0_QM_CQ_CFG0_0,
mmPDMA0_QM_CQ_CFG0_1,
mmPDMA0_QM_CQ_CFG0_2,
mmPDMA0_QM_CQ_CFG0_3,
mmPDMA0_QM_CQ_CFG0_4,
mmPDMA0_QM_CP_FENCE0_RDATA_0,
mmPDMA0_QM_CP_FENCE0_RDATA_1,
mmPDMA0_QM_CP_FENCE0_RDATA_2,
mmPDMA0_QM_CP_FENCE0_RDATA_3,
mmPDMA0_QM_CP_FENCE0_RDATA_4,
mmPDMA0_QM_CP_FENCE1_RDATA_0,
mmPDMA0_QM_CP_FENCE1_RDATA_1,
mmPDMA0_QM_CP_FENCE1_RDATA_2,
mmPDMA0_QM_CP_FENCE1_RDATA_3,
mmPDMA0_QM_CP_FENCE1_RDATA_4,
mmPDMA0_QM_CP_FENCE2_RDATA_0,
mmPDMA0_QM_CP_FENCE2_RDATA_1,
mmPDMA0_QM_CP_FENCE2_RDATA_2,
mmPDMA0_QM_CP_FENCE2_RDATA_3,
mmPDMA0_QM_CP_FENCE2_RDATA_4,
mmPDMA0_QM_CP_FENCE3_RDATA_0,
mmPDMA0_QM_CP_FENCE3_RDATA_1,
mmPDMA0_QM_CP_FENCE3_RDATA_2,
mmPDMA0_QM_CP_FENCE3_RDATA_3,
mmPDMA0_QM_CP_FENCE3_RDATA_4,
mmPDMA0_QM_CP_FENCE0_CNT_0,
mmPDMA0_QM_CP_FENCE0_CNT_1,
mmPDMA0_QM_CP_FENCE0_CNT_2,
mmPDMA0_QM_CP_FENCE0_CNT_3,
mmPDMA0_QM_CP_FENCE0_CNT_4,
mmPDMA0_QM_CP_FENCE1_CNT_0,
mmPDMA0_QM_CP_FENCE1_CNT_1,
mmPDMA0_QM_CP_FENCE1_CNT_2,
mmPDMA0_QM_CP_FENCE1_CNT_3,
mmPDMA0_QM_CP_FENCE1_CNT_4,
mmPDMA0_QM_CP_FENCE2_CNT_0,
mmPDMA0_QM_CP_FENCE2_CNT_1,
mmPDMA0_QM_CP_FENCE2_CNT_2,
mmPDMA0_QM_CP_FENCE2_CNT_3,
mmPDMA0_QM_CP_FENCE2_CNT_4,
mmPDMA0_QM_CP_FENCE3_CNT_0,
mmPDMA0_QM_CP_FENCE3_CNT_1,
mmPDMA0_QM_CP_FENCE3_CNT_2,
mmPDMA0_QM_CP_FENCE3_CNT_3,
mmPDMA0_QM_CP_FENCE3_CNT_4,
mmPDMA0_QM_CQ_PTR_LO_0,
mmPDMA0_QM_CQ_PTR_HI_0,
mmPDMA0_QM_CQ_TSIZE_0,
mmPDMA0_QM_CQ_CTL_0,
mmPDMA0_QM_CQ_PTR_LO_1,
mmPDMA0_QM_CQ_PTR_HI_1,
mmPDMA0_QM_CQ_TSIZE_1,
mmPDMA0_QM_CQ_CTL_1,
mmPDMA0_QM_CQ_PTR_LO_2,
mmPDMA0_QM_CQ_PTR_HI_2,
mmPDMA0_QM_CQ_TSIZE_2,
mmPDMA0_QM_CQ_CTL_2,
mmPDMA0_QM_CQ_PTR_LO_3,
mmPDMA0_QM_CQ_PTR_HI_3,
mmPDMA0_QM_CQ_TSIZE_3,
mmPDMA0_QM_CQ_CTL_3,
mmPDMA0_QM_CQ_PTR_LO_4,
mmPDMA0_QM_CQ_PTR_HI_4,
mmPDMA0_QM_CQ_TSIZE_4,
mmPDMA0_QM_CQ_CTL_4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE,
mmPDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
mmPDMA0_QM_ARC_CQ_PTR_LO,
mmPDMA0_QM_ARC_CQ_PTR_LO_STS,
mmPDMA0_QM_ARC_CQ_PTR_HI,
mmPDMA0_QM_ARC_CQ_PTR_HI_STS,
mmPDMA0_QM_ARB_CFG_0,
mmPDMA0_QM_ARB_MST_QUIET_PER,
mmPDMA0_QM_ARB_CHOICE_Q_PUSH,
mmPDMA0_QM_ARB_WRR_WEIGHT_0,
mmPDMA0_QM_ARB_WRR_WEIGHT_1,
mmPDMA0_QM_ARB_WRR_WEIGHT_2,
mmPDMA0_QM_ARB_WRR_WEIGHT_3,
mmPDMA0_QM_ARB_BASE_LO,
mmPDMA0_QM_ARB_BASE_HI,
mmPDMA0_QM_ARB_MST_SLAVE_EN,
mmPDMA0_QM_ARB_MST_SLAVE_EN_1,
mmPDMA0_QM_ARB_MST_CRED_INC,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
mmPDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
mmPDMA0_QM_ARB_SLV_ID,
mmPDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
mmPDMA0_QM_ARC_CQ_CFG0,
mmPDMA0_QM_CQ_IFIFO_CI_0,
mmPDMA0_QM_CQ_IFIFO_CI_1,
mmPDMA0_QM_CQ_IFIFO_CI_2,
mmPDMA0_QM_CQ_IFIFO_CI_3,
mmPDMA0_QM_CQ_IFIFO_CI_4,
mmPDMA0_QM_ARC_CQ_IFIFO_CI,
mmPDMA0_QM_CQ_CTL_CI_0,
mmPDMA0_QM_CQ_CTL_CI_1,
mmPDMA0_QM_CQ_CTL_CI_2,
mmPDMA0_QM_CQ_CTL_CI_3,
mmPDMA0_QM_CQ_CTL_CI_4,
mmPDMA0_QM_ARC_CQ_CTL_CI,
mmPDMA0_QM_ARC_CQ_TSIZE,
mmPDMA0_QM_ARC_CQ_CTL,
mmPDMA0_QM_CP_SWITCH_WD_SET,
mmPDMA0_QM_CP_EXT_SWITCH,
mmPDMA0_QM_CP_PRED_0,
mmPDMA0_QM_CP_PRED_1,
mmPDMA0_QM_CP_PRED_2,
mmPDMA0_QM_CP_PRED_3,
mmPDMA0_QM_CP_PRED_4,
mmPDMA0_QM_CP_PRED_UPEN_0,
mmPDMA0_QM_CP_PRED_UPEN_1,
mmPDMA0_QM_CP_PRED_UPEN_2,
mmPDMA0_QM_CP_PRED_UPEN_3,
mmPDMA0_QM_CP_PRED_UPEN_4,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_0,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_1,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_2,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_3,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_LO_4,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_0,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_1,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_2,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_3,
mmPDMA0_QM_CP_MSG_BASE0_ADDR_HI_4,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_0,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_1,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_2,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_3,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_LO_4,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_0,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_1,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_2,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_3,
mmPDMA0_QM_CP_MSG_BASE1_ADDR_HI_4,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_0,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_1,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_2,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_3,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_LO_4,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_0,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_1,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_2,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_3,
mmPDMA0_QM_CP_MSG_BASE2_ADDR_HI_4,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_0,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_1,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_2,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_3,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_LO_4,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_0,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_1,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_2,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_3,
mmPDMA0_QM_CP_MSG_BASE3_ADDR_HI_4,
mmPDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
mmPDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO,
mmPDMA0_QM_CQ_IFIFO_MSG_BASE_LO,
mmPDMA0_QM_CQ_CTL_MSG_BASE_LO
};
static const u32 gaudi2_pb_dcr0_edma0[] = {
mmDCORE0_EDMA0_CORE_BASE,
mmDCORE0_EDMA0_MSTR_IF_RR_SHRD_HBW_BASE,
mmDCORE0_EDMA0_QM_BASE,
};
static const u32 gaudi2_pb_dcr0_edma0_arc[] = {
mmDCORE0_EDMA0_QM_ARC_AUX_BASE,
};
static const struct range gaudi2_pb_dcr0_edma0_arc_unsecured_regs[] = {
{mmDCORE0_EDMA0_QM_ARC_AUX_RUN_HALT_REQ, mmDCORE0_EDMA0_QM_ARC_AUX_RUN_HALT_ACK},
{mmDCORE0_EDMA0_QM_ARC_AUX_CLUSTER_NUM, mmDCORE0_EDMA0_QM_ARC_AUX_WAKE_UP_EVENT},
{mmDCORE0_EDMA0_QM_ARC_AUX_ARC_RST_REQ, mmDCORE0_EDMA0_QM_ARC_AUX_CID_OFFSET_7},
{mmDCORE0_EDMA0_QM_ARC_AUX_SCRATCHPAD_0, mmDCORE0_EDMA0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
{mmDCORE0_EDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN,
mmDCORE0_EDMA0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
{mmDCORE0_EDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN,
mmDCORE0_EDMA0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
{mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0,
mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
{mmDCORE0_EDMA0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT,
mmDCORE0_EDMA0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
{mmDCORE0_EDMA0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT,
mmDCORE0_EDMA0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
};
static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_WR_REDUCTION,
mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI,
mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO,
mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA,
mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_LO,
mmDCORE0_EDMA0_CORE_CTX_SRC_BASE_HI,
mmDCORE0_EDMA0_CORE_CTX_DST_BASE_LO,
mmDCORE0_EDMA0_CORE_CTX_DST_BASE_HI,
mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_0,
mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_1,
mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_2,
mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_3,
mmDCORE0_EDMA0_CORE_CTX_SRC_TSIZE_4,
mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_1,
mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_2,
mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_3,
mmDCORE0_EDMA0_CORE_CTX_SRC_STRIDE_4,
mmDCORE0_EDMA0_CORE_CTX_SRC_OFFSET_LO,
mmDCORE0_EDMA0_CORE_CTX_SRC_OFFSET_HI,
mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_0,
mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_1,
mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_2,
mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_3,
mmDCORE0_EDMA0_CORE_CTX_DST_TSIZE_4,
mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_1,
mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_2,
mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_3,
mmDCORE0_EDMA0_CORE_CTX_DST_STRIDE_4,
mmDCORE0_EDMA0_CORE_CTX_DST_OFFSET_LO,
mmDCORE0_EDMA0_CORE_CTX_DST_OFFSET_HI,
mmDCORE0_EDMA0_CORE_CTX_COMMIT,
mmDCORE0_EDMA0_CORE_CTX_CTRL,
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
mmDCORE0_EDMA0_CORE_CTX_IDX,
mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
mmDCORE0_EDMA0_QM_CQ_CFG0_0,
mmDCORE0_EDMA0_QM_CQ_CFG0_1,
mmDCORE0_EDMA0_QM_CQ_CFG0_2,
mmDCORE0_EDMA0_QM_CQ_CFG0_3,
mmDCORE0_EDMA0_QM_CQ_CFG0_4,
mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_0,
mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_1,
mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_2,
mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_3,
mmDCORE0_EDMA0_QM_CP_FENCE0_RDATA_4,
mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_0,
mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_1,
mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_2,
mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_3,
mmDCORE0_EDMA0_QM_CP_FENCE1_RDATA_4,
mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_0,
mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_1,
mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_2,
mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_3,
mmDCORE0_EDMA0_QM_CP_FENCE2_RDATA_4,
mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_0,
mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_1,
mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_2,
mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_3,
mmDCORE0_EDMA0_QM_CP_FENCE3_RDATA_4,
mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_0,
mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_1,
mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_2,
mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_3,
mmDCORE0_EDMA0_QM_CP_FENCE0_CNT_4,
mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_0,
mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_1,
mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_2,
mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_3,
mmDCORE0_EDMA0_QM_CP_FENCE1_CNT_4,
mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_0,
mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_1,
mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_2,
mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_3,
mmDCORE0_EDMA0_QM_CP_FENCE2_CNT_4,
mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_0,
mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_1,
mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_2,
mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_3,
mmDCORE0_EDMA0_QM_CP_FENCE3_CNT_4,
mmDCORE0_EDMA0_QM_CQ_PTR_LO_0,
mmDCORE0_EDMA0_QM_CQ_PTR_HI_0,
mmDCORE0_EDMA0_QM_CQ_TSIZE_0,
mmDCORE0_EDMA0_QM_CQ_CTL_0,
mmDCORE0_EDMA0_QM_CQ_PTR_LO_1,
mmDCORE0_EDMA0_QM_CQ_PTR_HI_1,
mmDCORE0_EDMA0_QM_CQ_TSIZE_1,
mmDCORE0_EDMA0_QM_CQ_CTL_1,
mmDCORE0_EDMA0_QM_CQ_PTR_LO_2,
mmDCORE0_EDMA0_QM_CQ_PTR_HI_2,
mmDCORE0_EDMA0_QM_CQ_TSIZE_2,
mmDCORE0_EDMA0_QM_CQ_CTL_2,
mmDCORE0_EDMA0_QM_CQ_PTR_LO_3,
mmDCORE0_EDMA0_QM_CQ_PTR_HI_3,
mmDCORE0_EDMA0_QM_CQ_TSIZE_3,
mmDCORE0_EDMA0_QM_CQ_CTL_3,
mmDCORE0_EDMA0_QM_CQ_PTR_LO_4,
mmDCORE0_EDMA0_QM_CQ_PTR_HI_4,
mmDCORE0_EDMA0_QM_CQ_TSIZE_4,
mmDCORE0_EDMA0_QM_CQ_CTL_4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE,
mmDCORE0_EDMA0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
mmDCORE0_EDMA0_QM_ARC_CQ_PTR_LO,
mmDCORE0_EDMA0_QM_ARC_CQ_PTR_LO_STS,
mmDCORE0_EDMA0_QM_ARC_CQ_PTR_HI,
mmDCORE0_EDMA0_QM_ARC_CQ_PTR_HI_STS,
mmDCORE0_EDMA0_QM_ARB_CFG_0,
mmDCORE0_EDMA0_QM_ARB_MST_QUIET_PER,
mmDCORE0_EDMA0_QM_ARB_CHOICE_Q_PUSH,
mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_0,
mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_1,
mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_2,
mmDCORE0_EDMA0_QM_ARB_WRR_WEIGHT_3,
mmDCORE0_EDMA0_QM_ARB_BASE_LO,
mmDCORE0_EDMA0_QM_ARB_BASE_HI,
mmDCORE0_EDMA0_QM_ARB_MST_SLAVE_EN,
mmDCORE0_EDMA0_QM_ARB_MST_SLAVE_EN_1,
mmDCORE0_EDMA0_QM_ARB_MST_CRED_INC,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
mmDCORE0_EDMA0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
mmDCORE0_EDMA0_QM_ARB_SLV_ID,
mmDCORE0_EDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
mmDCORE0_EDMA0_QM_ARC_CQ_CFG0,
mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_0,
mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_1,
mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_2,
mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_3,
mmDCORE0_EDMA0_QM_CQ_IFIFO_CI_4,
mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_CI,
mmDCORE0_EDMA0_QM_CQ_CTL_CI_0,
mmDCORE0_EDMA0_QM_CQ_CTL_CI_1,
mmDCORE0_EDMA0_QM_CQ_CTL_CI_2,
mmDCORE0_EDMA0_QM_CQ_CTL_CI_3,
mmDCORE0_EDMA0_QM_CQ_CTL_CI_4,
mmDCORE0_EDMA0_QM_ARC_CQ_CTL_CI,
mmDCORE0_EDMA0_QM_ARC_CQ_TSIZE,
mmDCORE0_EDMA0_QM_ARC_CQ_CTL,
mmDCORE0_EDMA0_QM_CP_SWITCH_WD_SET,
mmDCORE0_EDMA0_QM_CP_EXT_SWITCH,
mmDCORE0_EDMA0_QM_CP_PRED_0,
mmDCORE0_EDMA0_QM_CP_PRED_1,
mmDCORE0_EDMA0_QM_CP_PRED_2,
mmDCORE0_EDMA0_QM_CP_PRED_3,
mmDCORE0_EDMA0_QM_CP_PRED_4,
mmDCORE0_EDMA0_QM_CP_PRED_UPEN_0,
mmDCORE0_EDMA0_QM_CP_PRED_UPEN_1,
mmDCORE0_EDMA0_QM_CP_PRED_UPEN_2,
mmDCORE0_EDMA0_QM_CP_PRED_UPEN_3,
mmDCORE0_EDMA0_QM_CP_PRED_UPEN_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_LO_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE0_ADDR_HI_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_LO_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE1_ADDR_HI_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_LO_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE2_ADDR_HI_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_LO_4,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_0,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_1,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_2,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_3,
mmDCORE0_EDMA0_QM_CP_MSG_BASE3_ADDR_HI_4,
mmDCORE0_EDMA0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
mmDCORE0_EDMA0_QM_ARC_CQ_CTL_MSG_BASE_LO,
mmDCORE0_EDMA0_QM_CQ_IFIFO_MSG_BASE_LO,
mmDCORE0_EDMA0_QM_CQ_CTL_MSG_BASE_LO
};
static const u32 gaudi2_pb_dcr0_mme_sbte[] = {
mmDCORE0_MME_SBTE0_BASE,
mmDCORE0_MME_SBTE0_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_dcr0_mme_qm[] = {
mmDCORE0_MME_QM_BASE,
};
static const u32 gaudi2_pb_dcr0_mme_eng[] = {
mmDCORE0_MME_ACC_BASE,
mmDCORE0_MME_CTRL_HI_BASE,
mmDCORE0_MME_CTRL_LO_BASE,
mmDCORE0_MME_CTRL_MSTR_IF_RR_SHRD_HBW_BASE,
mmDCORE0_MME_WB0_MSTR_IF_RR_SHRD_HBW_BASE,
mmDCORE0_MME_WB1_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_dcr0_mme_arc[] = {
mmDCORE0_MME_QM_ARC_AUX_BASE,
mmDCORE0_MME_QM_ARC_DUP_ENG_BASE,
};
static const struct range gaudi2_pb_dcr0_mme_arc_unsecured_regs[] = {
{mmDCORE0_MME_QM_ARC_AUX_RUN_HALT_REQ, mmDCORE0_MME_QM_ARC_AUX_RUN_HALT_ACK},
{mmDCORE0_MME_QM_ARC_AUX_CLUSTER_NUM, mmDCORE0_MME_QM_ARC_AUX_WAKE_UP_EVENT},
{mmDCORE0_MME_QM_ARC_AUX_ARC_RST_REQ, mmDCORE0_MME_QM_ARC_AUX_CID_OFFSET_7},
{mmDCORE0_MME_QM_ARC_AUX_SCRATCHPAD_0, mmDCORE0_MME_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
{mmDCORE0_MME_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmDCORE0_MME_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
{mmDCORE0_MME_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmDCORE0_MME_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
{mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0,
mmDCORE0_MME_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
{mmDCORE0_MME_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT,
mmDCORE0_MME_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
{mmDCORE0_MME_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT,
mmDCORE0_MME_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
{mmDCORE0_MME_QM_ARC_DUP_ENG_DUP_TPC_ENG_ADDR_0,
mmDCORE0_MME_QM_ARC_DUP_ENG_ARC_CID_OFFSET_63},
{mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_HB_STRONG_ORDER,
mmDCORE0_MME_QM_ARC_DUP_ENG_AXUSER_LB_OVRD},
};
static const u32 gaudi2_pb_dcr0_mme_qm_unsecured_regs[] = {
mmDCORE0_MME_QM_CQ_CFG0_0,
mmDCORE0_MME_QM_CQ_CFG0_1,
mmDCORE0_MME_QM_CQ_CFG0_2,
mmDCORE0_MME_QM_CQ_CFG0_3,
mmDCORE0_MME_QM_CQ_CFG0_4,
mmDCORE0_MME_QM_CP_FENCE0_RDATA_0,
mmDCORE0_MME_QM_CP_FENCE0_RDATA_1,
mmDCORE0_MME_QM_CP_FENCE0_RDATA_2,
mmDCORE0_MME_QM_CP_FENCE0_RDATA_3,
mmDCORE0_MME_QM_CP_FENCE0_RDATA_4,
mmDCORE0_MME_QM_CP_FENCE1_RDATA_0,
mmDCORE0_MME_QM_CP_FENCE1_RDATA_1,
mmDCORE0_MME_QM_CP_FENCE1_RDATA_2,
mmDCORE0_MME_QM_CP_FENCE1_RDATA_3,
mmDCORE0_MME_QM_CP_FENCE1_RDATA_4,
mmDCORE0_MME_QM_CP_FENCE2_RDATA_0,
mmDCORE0_MME_QM_CP_FENCE2_RDATA_1,
mmDCORE0_MME_QM_CP_FENCE2_RDATA_2,
mmDCORE0_MME_QM_CP_FENCE2_RDATA_3,
mmDCORE0_MME_QM_CP_FENCE2_RDATA_4,
mmDCORE0_MME_QM_CP_FENCE3_RDATA_0,
mmDCORE0_MME_QM_CP_FENCE3_RDATA_1,
mmDCORE0_MME_QM_CP_FENCE3_RDATA_2,
mmDCORE0_MME_QM_CP_FENCE3_RDATA_3,
mmDCORE0_MME_QM_CP_FENCE3_RDATA_4,
mmDCORE0_MME_QM_CP_FENCE0_CNT_0,
mmDCORE0_MME_QM_CP_FENCE0_CNT_1,
mmDCORE0_MME_QM_CP_FENCE0_CNT_2,
mmDCORE0_MME_QM_CP_FENCE0_CNT_3,
mmDCORE0_MME_QM_CP_FENCE0_CNT_4,
mmDCORE0_MME_QM_CP_FENCE1_CNT_0,
mmDCORE0_MME_QM_CP_FENCE1_CNT_1,
mmDCORE0_MME_QM_CP_FENCE1_CNT_2,
mmDCORE0_MME_QM_CP_FENCE1_CNT_3,
mmDCORE0_MME_QM_CP_FENCE1_CNT_4,
mmDCORE0_MME_QM_CP_FENCE2_CNT_0,
mmDCORE0_MME_QM_CP_FENCE2_CNT_1,
mmDCORE0_MME_QM_CP_FENCE2_CNT_2,
mmDCORE0_MME_QM_CP_FENCE2_CNT_3,
mmDCORE0_MME_QM_CP_FENCE2_CNT_4,
mmDCORE0_MME_QM_CP_FENCE3_CNT_0,
mmDCORE0_MME_QM_CP_FENCE3_CNT_1,
mmDCORE0_MME_QM_CP_FENCE3_CNT_2,
mmDCORE0_MME_QM_CP_FENCE3_CNT_3,
mmDCORE0_MME_QM_CP_FENCE3_CNT_4,
mmDCORE0_MME_QM_CQ_PTR_LO_0,
mmDCORE0_MME_QM_CQ_PTR_HI_0,
mmDCORE0_MME_QM_CQ_TSIZE_0,
mmDCORE0_MME_QM_CQ_CTL_0,
mmDCORE0_MME_QM_CQ_PTR_LO_1,
mmDCORE0_MME_QM_CQ_PTR_HI_1,
mmDCORE0_MME_QM_CQ_TSIZE_1,
mmDCORE0_MME_QM_CQ_CTL_1,
mmDCORE0_MME_QM_CQ_PTR_LO_2,
mmDCORE0_MME_QM_CQ_PTR_HI_2,
mmDCORE0_MME_QM_CQ_TSIZE_2,
mmDCORE0_MME_QM_CQ_CTL_2,
mmDCORE0_MME_QM_CQ_PTR_LO_3,
mmDCORE0_MME_QM_CQ_PTR_HI_3,
mmDCORE0_MME_QM_CQ_TSIZE_3,
mmDCORE0_MME_QM_CQ_CTL_3,
mmDCORE0_MME_QM_CQ_PTR_LO_4,
mmDCORE0_MME_QM_CQ_PTR_HI_4,
mmDCORE0_MME_QM_CQ_TSIZE_4,
mmDCORE0_MME_QM_CQ_CTL_4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_BASE,
mmDCORE0_MME_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
mmDCORE0_MME_QM_ARC_CQ_PTR_LO,
mmDCORE0_MME_QM_ARC_CQ_PTR_LO_STS,
mmDCORE0_MME_QM_ARC_CQ_PTR_HI,
mmDCORE0_MME_QM_ARC_CQ_PTR_HI_STS,
mmDCORE0_MME_QM_ARB_CFG_0,
mmDCORE0_MME_QM_ARB_MST_QUIET_PER,
mmDCORE0_MME_QM_ARB_CHOICE_Q_PUSH,
mmDCORE0_MME_QM_ARB_WRR_WEIGHT_0,
mmDCORE0_MME_QM_ARB_WRR_WEIGHT_1,
mmDCORE0_MME_QM_ARB_WRR_WEIGHT_2,
mmDCORE0_MME_QM_ARB_WRR_WEIGHT_3,
mmDCORE0_MME_QM_ARB_BASE_LO,
mmDCORE0_MME_QM_ARB_BASE_HI,
mmDCORE0_MME_QM_ARB_MST_SLAVE_EN,
mmDCORE0_MME_QM_ARB_MST_SLAVE_EN_1,
mmDCORE0_MME_QM_ARB_MST_CRED_INC,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_0,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_1,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_2,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_3,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_4,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_5,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_6,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_7,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_8,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_9,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_10,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_11,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_12,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_13,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_14,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_15,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_16,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_17,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_18,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_19,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_20,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_21,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_22,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_23,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_24,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_25,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_26,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_27,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_28,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_29,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_30,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_31,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_32,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_33,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_34,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_35,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_36,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_37,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_38,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_39,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_40,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_41,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_42,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_43,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_44,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_45,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_46,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_47,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_48,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_49,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_50,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_51,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_52,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_53,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_54,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_55,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_56,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_57,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_58,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_59,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_60,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_61,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_62,
mmDCORE0_MME_QM_ARB_MST_CHOICE_PUSH_OFST_63,
mmDCORE0_MME_QM_ARB_SLV_ID,
mmDCORE0_MME_QM_ARB_SLV_MASTER_INC_CRED_OFST,
mmDCORE0_MME_QM_ARC_CQ_CFG0,
mmDCORE0_MME_QM_CQ_IFIFO_CI_0,
mmDCORE0_MME_QM_CQ_IFIFO_CI_1,
mmDCORE0_MME_QM_CQ_IFIFO_CI_2,
mmDCORE0_MME_QM_CQ_IFIFO_CI_3,
mmDCORE0_MME_QM_CQ_IFIFO_CI_4,
mmDCORE0_MME_QM_ARC_CQ_IFIFO_CI,
mmDCORE0_MME_QM_CQ_CTL_CI_0,
mmDCORE0_MME_QM_CQ_CTL_CI_1,
mmDCORE0_MME_QM_CQ_CTL_CI_2,
mmDCORE0_MME_QM_CQ_CTL_CI_3,
mmDCORE0_MME_QM_CQ_CTL_CI_4,
mmDCORE0_MME_QM_ARC_CQ_CTL_CI,
mmDCORE0_MME_QM_ARC_CQ_TSIZE,
mmDCORE0_MME_QM_ARC_CQ_CTL,
mmDCORE0_MME_QM_CP_SWITCH_WD_SET,
mmDCORE0_MME_QM_CP_EXT_SWITCH,
mmDCORE0_MME_QM_CP_PRED_0,
mmDCORE0_MME_QM_CP_PRED_1,
mmDCORE0_MME_QM_CP_PRED_2,
mmDCORE0_MME_QM_CP_PRED_3,
mmDCORE0_MME_QM_CP_PRED_4,
mmDCORE0_MME_QM_CP_PRED_UPEN_0,
mmDCORE0_MME_QM_CP_PRED_UPEN_1,
mmDCORE0_MME_QM_CP_PRED_UPEN_2,
mmDCORE0_MME_QM_CP_PRED_UPEN_3,
mmDCORE0_MME_QM_CP_PRED_UPEN_4,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_0,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_1,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_2,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_3,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_LO_4,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_0,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_1,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_2,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_3,
mmDCORE0_MME_QM_CP_MSG_BASE0_ADDR_HI_4,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_0,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_1,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_2,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_3,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_LO_4,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_0,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_1,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_2,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_3,
mmDCORE0_MME_QM_CP_MSG_BASE1_ADDR_HI_4,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_0,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_1,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_2,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_3,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_LO_4,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_0,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_1,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_2,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_3,
mmDCORE0_MME_QM_CP_MSG_BASE2_ADDR_HI_4,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_0,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_1,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_2,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_3,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_LO_4,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_0,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_1,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_2,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_3,
mmDCORE0_MME_QM_CP_MSG_BASE3_ADDR_HI_4,
mmDCORE0_MME_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
mmDCORE0_MME_QM_ARC_CQ_CTL_MSG_BASE_LO,
mmDCORE0_MME_QM_CQ_IFIFO_MSG_BASE_LO,
mmDCORE0_MME_QM_CQ_CTL_MSG_BASE_LO
};
static const u32 gaudi2_pb_dcr0_mme_eng_unsecured_regs[] = {
mmDCORE0_MME_CTRL_LO_CMD,
mmDCORE0_MME_CTRL_LO_AGU,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_MASTER,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_SLAVE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_OUTER_LOOP,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SB_REPEAT,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_FP8_BIAS,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_RATE_LIMITER,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_USER_DATA,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PERF_EVT_IN,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PERF_EVT_OUT,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_PCU,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SLAVE_SYNC_OBJ0_ADDR,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SLAVE_SYNC_OBJ1_ADDR,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_POWER_LOOP,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE0_MASTER,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE1_MASTER,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE2_MASTER,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE3_MASTER,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE0_SLAVE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE1_SLAVE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE2_SLAVE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SPARE3_SLAVE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_WKL_ID,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_VALID_ELEMENTS_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_LOOP_STRIDE_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_ROI_SIZE_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_SPATIAL_STRIDES_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_START_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT1_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT1_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT0_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_COUT0_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_A_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_A_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_B_LOW,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_B_HIGH,
mmDCORE0_MME_CTRL_LO_ARCH_STATUS,
mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0,
mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0,
mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL0,
mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR1,
mmDCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_VAL1,
mmDCORE0_MME_CTRL_LO_ARCH_A_SS,
mmDCORE0_MME_CTRL_LO_ARCH_B_SS,
mmDCORE0_MME_CTRL_LO_ARCH_COUT_SS,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_VALID_ELEMENTS_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_LOOP_STRIDE_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_ROI_SIZE_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_SPATIAL_STRIDES_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_START_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_BASE_ADDR_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_A_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_COUT_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN1_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN2_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN3_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_MASTER_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT1_SLAVE_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_BASE,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_COUT0_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN4_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_VALID_ELEMENTS_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_LOOP_STRIDE_4,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_ROI_SIZE_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_SPATIAL_STRIDES_3,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_TENSOR_B_START_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_0,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_1,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_2,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_3,
mmDCORE0_MME_CTRL_LO_ARCH_AGU_IN0_SLAVE_ROI_BASE_OFFSET_4,
mmDCORE0_MME_ACC_AP_LFSR_POLY,
mmDCORE0_MME_ACC_AP_LFSR_SEED_WDATA,
mmDCORE0_MME_ACC_AP_LFSR_SEED_SEL,
mmDCORE0_MME_ACC_AP_LFSR_SEED_RDATA,
mmDCORE0_MME_ACC_AP_LFSR_CLOSE_CGATE_DLY,
mmDCORE0_MME_ACC_WBC_SRC_BP,
};
static const u32 gaudi2_pb_dcr0_tpc0[] = {
mmDCORE0_TPC0_QM_BASE,
mmDCORE0_TPC0_CFG_BASE,
mmDCORE0_TPC0_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_dcr0_tpc0_arc[] = {
mmDCORE0_TPC0_QM_ARC_AUX_BASE,
};
static const struct range gaudi2_pb_dcr0_tpc0_arc_unsecured_regs[] = {
{mmDCORE0_TPC0_QM_ARC_AUX_RUN_HALT_REQ, mmDCORE0_TPC0_QM_ARC_AUX_RUN_HALT_ACK},
{mmDCORE0_TPC0_QM_ARC_AUX_CLUSTER_NUM, mmDCORE0_TPC0_QM_ARC_AUX_WAKE_UP_EVENT},
{mmDCORE0_TPC0_QM_ARC_AUX_ARC_RST_REQ, mmDCORE0_TPC0_QM_ARC_AUX_CID_OFFSET_7},
{mmDCORE0_TPC0_QM_ARC_AUX_SCRATCHPAD_0, mmDCORE0_TPC0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
{mmDCORE0_TPC0_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmDCORE0_TPC0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
{mmDCORE0_TPC0_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmDCORE0_TPC0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
{mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0,
mmDCORE0_TPC0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
{mmDCORE0_TPC0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT,
mmDCORE0_TPC0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
{mmDCORE0_TPC0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT,
mmDCORE0_TPC0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
};
static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_QM_CQ_CFG0_0,
mmDCORE0_TPC0_QM_CQ_CFG0_1,
mmDCORE0_TPC0_QM_CQ_CFG0_2,
mmDCORE0_TPC0_QM_CQ_CFG0_3,
mmDCORE0_TPC0_QM_CQ_CFG0_4,
mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_0,
mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_1,
mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_2,
mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_3,
mmDCORE0_TPC0_QM_CP_FENCE0_RDATA_4,
mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_0,
mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_1,
mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_2,
mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_3,
mmDCORE0_TPC0_QM_CP_FENCE1_RDATA_4,
mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_0,
mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_1,
mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_2,
mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_3,
mmDCORE0_TPC0_QM_CP_FENCE2_RDATA_4,
mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_0,
mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_1,
mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_2,
mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_3,
mmDCORE0_TPC0_QM_CP_FENCE3_RDATA_4,
mmDCORE0_TPC0_QM_CP_FENCE0_CNT_0,
mmDCORE0_TPC0_QM_CP_FENCE0_CNT_1,
mmDCORE0_TPC0_QM_CP_FENCE0_CNT_2,
mmDCORE0_TPC0_QM_CP_FENCE0_CNT_3,
mmDCORE0_TPC0_QM_CP_FENCE0_CNT_4,
mmDCORE0_TPC0_QM_CP_FENCE1_CNT_0,
mmDCORE0_TPC0_QM_CP_FENCE1_CNT_1,
mmDCORE0_TPC0_QM_CP_FENCE1_CNT_2,
mmDCORE0_TPC0_QM_CP_FENCE1_CNT_3,
mmDCORE0_TPC0_QM_CP_FENCE1_CNT_4,
mmDCORE0_TPC0_QM_CP_FENCE2_CNT_0,
mmDCORE0_TPC0_QM_CP_FENCE2_CNT_1,
mmDCORE0_TPC0_QM_CP_FENCE2_CNT_2,
mmDCORE0_TPC0_QM_CP_FENCE2_CNT_3,
mmDCORE0_TPC0_QM_CP_FENCE2_CNT_4,
mmDCORE0_TPC0_QM_CP_FENCE3_CNT_0,
mmDCORE0_TPC0_QM_CP_FENCE3_CNT_1,
mmDCORE0_TPC0_QM_CP_FENCE3_CNT_2,
mmDCORE0_TPC0_QM_CP_FENCE3_CNT_3,
mmDCORE0_TPC0_QM_CP_FENCE3_CNT_4,
mmDCORE0_TPC0_QM_CQ_PTR_LO_0,
mmDCORE0_TPC0_QM_CQ_PTR_HI_0,
mmDCORE0_TPC0_QM_CQ_TSIZE_0,
mmDCORE0_TPC0_QM_CQ_CTL_0,
mmDCORE0_TPC0_QM_CQ_PTR_LO_1,
mmDCORE0_TPC0_QM_CQ_PTR_HI_1,
mmDCORE0_TPC0_QM_CQ_TSIZE_1,
mmDCORE0_TPC0_QM_CQ_CTL_1,
mmDCORE0_TPC0_QM_CQ_PTR_LO_2,
mmDCORE0_TPC0_QM_CQ_PTR_HI_2,
mmDCORE0_TPC0_QM_CQ_TSIZE_2,
mmDCORE0_TPC0_QM_CQ_CTL_2,
mmDCORE0_TPC0_QM_CQ_PTR_LO_3,
mmDCORE0_TPC0_QM_CQ_PTR_HI_3,
mmDCORE0_TPC0_QM_CQ_TSIZE_3,
mmDCORE0_TPC0_QM_CQ_CTL_3,
mmDCORE0_TPC0_QM_CQ_PTR_LO_4,
mmDCORE0_TPC0_QM_CQ_PTR_HI_4,
mmDCORE0_TPC0_QM_CQ_TSIZE_4,
mmDCORE0_TPC0_QM_CQ_CTL_4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE,
mmDCORE0_TPC0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
mmDCORE0_TPC0_QM_ARC_CQ_PTR_LO,
mmDCORE0_TPC0_QM_ARC_CQ_PTR_LO_STS,
mmDCORE0_TPC0_QM_ARC_CQ_PTR_HI,
mmDCORE0_TPC0_QM_ARC_CQ_PTR_HI_STS,
mmDCORE0_TPC0_QM_ARB_CFG_0,
mmDCORE0_TPC0_QM_ARB_MST_QUIET_PER,
mmDCORE0_TPC0_QM_ARB_CHOICE_Q_PUSH,
mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_0,
mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_1,
mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_2,
mmDCORE0_TPC0_QM_ARB_WRR_WEIGHT_3,
mmDCORE0_TPC0_QM_ARB_BASE_LO,
mmDCORE0_TPC0_QM_ARB_BASE_HI,
mmDCORE0_TPC0_QM_ARB_MST_SLAVE_EN,
mmDCORE0_TPC0_QM_ARB_MST_SLAVE_EN_1,
mmDCORE0_TPC0_QM_ARB_MST_CRED_INC,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
mmDCORE0_TPC0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
mmDCORE0_TPC0_QM_ARB_SLV_ID,
mmDCORE0_TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
mmDCORE0_TPC0_QM_ARC_CQ_CFG0,
mmDCORE0_TPC0_QM_CQ_IFIFO_CI_0,
mmDCORE0_TPC0_QM_CQ_IFIFO_CI_1,
mmDCORE0_TPC0_QM_CQ_IFIFO_CI_2,
mmDCORE0_TPC0_QM_CQ_IFIFO_CI_3,
mmDCORE0_TPC0_QM_CQ_IFIFO_CI_4,
mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_CI,
mmDCORE0_TPC0_QM_CQ_CTL_CI_0,
mmDCORE0_TPC0_QM_CQ_CTL_CI_1,
mmDCORE0_TPC0_QM_CQ_CTL_CI_2,
mmDCORE0_TPC0_QM_CQ_CTL_CI_3,
mmDCORE0_TPC0_QM_CQ_CTL_CI_4,
mmDCORE0_TPC0_QM_ARC_CQ_CTL_CI,
mmDCORE0_TPC0_QM_ARC_CQ_TSIZE,
mmDCORE0_TPC0_QM_ARC_CQ_CTL,
mmDCORE0_TPC0_QM_CP_SWITCH_WD_SET,
mmDCORE0_TPC0_QM_CP_EXT_SWITCH,
mmDCORE0_TPC0_QM_CP_PRED_0,
mmDCORE0_TPC0_QM_CP_PRED_1,
mmDCORE0_TPC0_QM_CP_PRED_2,
mmDCORE0_TPC0_QM_CP_PRED_3,
mmDCORE0_TPC0_QM_CP_PRED_4,
mmDCORE0_TPC0_QM_CP_PRED_UPEN_0,
mmDCORE0_TPC0_QM_CP_PRED_UPEN_1,
mmDCORE0_TPC0_QM_CP_PRED_UPEN_2,
mmDCORE0_TPC0_QM_CP_PRED_UPEN_3,
mmDCORE0_TPC0_QM_CP_PRED_UPEN_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_LO_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE0_ADDR_HI_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_LO_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE1_ADDR_HI_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_LO_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE2_ADDR_HI_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_LO_4,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_0,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_1,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_2,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_3,
mmDCORE0_TPC0_QM_CP_MSG_BASE3_ADDR_HI_4,
mmDCORE0_TPC0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
mmDCORE0_TPC0_QM_ARC_CQ_CTL_MSG_BASE_LO,
mmDCORE0_TPC0_QM_CQ_IFIFO_MSG_BASE_LO,
mmDCORE0_TPC0_QM_CQ_CTL_MSG_BASE_LO,
mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_MESSAGE,
mmDCORE0_TPC0_CFG_QM_SYNC_OBJECT_ADDR,
mmDCORE0_TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW,
mmDCORE0_TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH,
mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_0,
mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_0,
mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_1,
mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_1,
mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_2,
mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_2,
mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_3,
mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_3,
mmDCORE0_TPC0_CFG_QM_TID_BASE_DIM_4,
mmDCORE0_TPC0_CFG_QM_TID_SIZE_DIM_4,
mmDCORE0_TPC0_CFG_QM_KERNEL_CONFIG,
mmDCORE0_TPC0_CFG_QM_KERNEL_ID,
mmDCORE0_TPC0_CFG_QM_POWER_LOOP,
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_0,
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_1,
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_2,
mmDCORE0_TPC0_CFG_TSB_CFG_MTRR_2_3,
mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE2_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE2_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE2_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE2_ADDR_HI,
mmDCORE0_TPC0_CFG_FP8_143_BIAS,
mmDCORE0_TPC0_CFG_ROUND_CSR,
mmDCORE0_TPC0_CFG_CONV_ROUND_CSR,
mmDCORE0_TPC0_CFG_SEMAPHORE,
mmDCORE0_TPC0_CFG_LFSR_POLYNOM,
mmDCORE0_TPC0_CFG_STATUS,
mmDCORE0_TPC0_CFG_TPC_CMD,
mmDCORE0_TPC0_CFG_TPC_EXECUTE,
mmDCORE0_TPC0_CFG_TPC_DCACHE_L0CD,
mmDCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_LOW,
mmDCORE0_TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH,
mmDCORE0_TPC0_CFG_RD_RATE_LIMIT,
mmDCORE0_TPC0_CFG_WR_RATE_LIMIT,
mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI,
mmDCORE0_TPC0_CFG_KERNEL_KERNEL_CONFIG,
mmDCORE0_TPC0_CFG_KERNEL_SRF_0,
mmDCORE0_TPC0_CFG_KERNEL_SRF_1,
mmDCORE0_TPC0_CFG_KERNEL_SRF_2,
mmDCORE0_TPC0_CFG_KERNEL_SRF_3,
mmDCORE0_TPC0_CFG_KERNEL_SRF_4,
mmDCORE0_TPC0_CFG_KERNEL_SRF_5,
mmDCORE0_TPC0_CFG_KERNEL_SRF_6,
mmDCORE0_TPC0_CFG_KERNEL_SRF_7,
mmDCORE0_TPC0_CFG_KERNEL_SRF_8,
mmDCORE0_TPC0_CFG_KERNEL_SRF_9,
mmDCORE0_TPC0_CFG_KERNEL_SRF_10,
mmDCORE0_TPC0_CFG_KERNEL_SRF_11,
mmDCORE0_TPC0_CFG_KERNEL_SRF_12,
mmDCORE0_TPC0_CFG_KERNEL_SRF_13,
mmDCORE0_TPC0_CFG_KERNEL_SRF_14,
mmDCORE0_TPC0_CFG_KERNEL_SRF_15,
mmDCORE0_TPC0_CFG_KERNEL_SRF_16,
mmDCORE0_TPC0_CFG_KERNEL_SRF_17,
mmDCORE0_TPC0_CFG_KERNEL_SRF_18,
mmDCORE0_TPC0_CFG_KERNEL_SRF_19,
mmDCORE0_TPC0_CFG_KERNEL_SRF_20,
mmDCORE0_TPC0_CFG_KERNEL_SRF_21,
mmDCORE0_TPC0_CFG_KERNEL_SRF_22,
mmDCORE0_TPC0_CFG_KERNEL_SRF_23,
mmDCORE0_TPC0_CFG_KERNEL_SRF_24,
mmDCORE0_TPC0_CFG_KERNEL_SRF_25,
mmDCORE0_TPC0_CFG_KERNEL_SRF_26,
mmDCORE0_TPC0_CFG_KERNEL_SRF_27,
mmDCORE0_TPC0_CFG_KERNEL_SRF_28,
mmDCORE0_TPC0_CFG_KERNEL_SRF_29,
mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
mmDCORE0_TPC0_CFG_TPC_ID,
mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_1,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_2,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_3,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_4,
mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_0,
mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_1,
mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_2,
mmDCORE0_TPC0_CFG_SPECIAL_GLBL_SPARE_3
};
static const u32 gaudi2_pb_dcr0_tpc0_ktensor_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_PREF_STRIDE,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_STRIDE_HIGH,
};
static const u32 gaudi2_pb_dcr0_tpc0_qtensor_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_PADDING_VALUE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_PREF_STRIDE,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_STRIDE_HIGH,
mmDCORE0_TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_STRIDE_HIGH,
};
static const u32 gaudi2_pb_dcr0_sram0[] = {
mmDCORE0_SRAM0_BANK_BASE,
mmDCORE0_SRAM0_DBG_CNT_N_HBW_DBG_CNT_BASE,
mmDCORE0_SRAM0_RTR_BASE,
};
static const u32 gaudi2_pb_dcr0_sm_mstr_if[] = {
mmDCORE0_SYNC_MNGR_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_dcr0_sm_glbl[] = {
mmDCORE0_SYNC_MNGR_GLBL_BASE,
};
static const u32 gaudi2_pb_dcr1_sm_glbl[] = {
mmDCORE1_SYNC_MNGR_GLBL_BASE,
};
static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
{mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
{mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
{mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
{mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63},
{mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
{mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
{mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_1, mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63},
{mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
};
static const struct range gaudi2_pb_dcr_x_sm_glbl_unsecured_regs[] = {
{mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
{mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
{mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
{mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63},
{mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
{mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
{mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63},
{mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
};
static const u32 gaudi2_pb_arc_sched[] = {
mmARC_FARM_ARC0_AUX_BASE,
mmARC_FARM_ARC0_DUP_ENG_BASE,
mmARC_FARM_ARC0_ACP_ENG_BASE,
};
static const struct range gaudi2_pb_arc_sched_unsecured_regs[] = {
{mmARC_FARM_ARC0_AUX_RUN_HALT_REQ, mmARC_FARM_ARC0_AUX_RUN_HALT_ACK},
{mmARC_FARM_ARC0_AUX_CLUSTER_NUM, mmARC_FARM_ARC0_AUX_WAKE_UP_EVENT},
{mmARC_FARM_ARC0_AUX_ARC_RST_REQ, mmARC_FARM_ARC0_AUX_CID_OFFSET_7},
{mmARC_FARM_ARC0_AUX_SCRATCHPAD_0, mmARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT},
{mmARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN, mmARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN},
{mmARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN, mmARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN},
{mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_0, mmARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG},
{mmARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT, mmARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI},
{mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT, mmARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN},
{mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_0, mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_63},
{mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_STRONG_ORDER, mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_OVRD},
{mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_0, mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_REG},
};
static const u32 gaudi2_pb_xbar_mid[] = {
mmXBAR_MID_0_BASE,
};
static const u32 gaudi2_pb_xbar_mid_unsecured_regs[] = {
mmXBAR_MID_0_UPSCALE,
mmXBAR_MID_0_DOWN_CONV,
mmXBAR_MID_0_DOWN_CONV_LFSR_EN,
mmXBAR_MID_0_DOWN_CONV_LFSR_SET_VLD,
mmXBAR_MID_0_DOWN_CONV_LFSR_SET_VALUE,
mmXBAR_MID_0_DOWN_CONV_LFSR_CFG_POLY,
};
static const u32 gaudi2_pb_xbar_edge[] = {
mmXBAR_EDGE_0_BASE,
};
static const u32 gaudi2_pb_xbar_edge_unsecured_regs[] = {
mmXBAR_EDGE_0_UPSCALE,
mmXBAR_EDGE_0_DOWN_CONV,
mmXBAR_EDGE_0_DOWN_CONV_LFSR_EN,
mmXBAR_EDGE_0_DOWN_CONV_LFSR_SET_VLD,
mmXBAR_EDGE_0_DOWN_CONV_LFSR_SET_VALUE,
mmXBAR_EDGE_0_DOWN_CONV_LFSR_CFG_POLY,
};
static const u32 gaudi2_pb_nic0[] = {
mmNIC0_TMR_BASE,
mmNIC0_RXB_CORE_BASE,
mmNIC0_RXE0_BASE,
mmNIC0_RXE1_BASE,
mmNIC0_RXE0_AXUSER_AXUSER_CQ0_BASE,
mmNIC0_RXE1_AXUSER_AXUSER_CQ0_BASE,
mmNIC0_TXS0_BASE,
mmNIC0_TXS1_BASE,
mmNIC0_TXE0_BASE,
mmNIC0_TXE1_BASE,
mmNIC0_TXB_BASE,
mmNIC0_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_nic0_qm_qpc[] = {
mmNIC0_QM0_BASE,
mmNIC0_QPC0_BASE,
};
static const u32 gaudi2_pb_nic0_qm_arc_aux0[] = {
mmNIC0_QM_ARC_AUX0_BASE,
};
static const struct range gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs[] = {
{mmNIC0_QM_ARC_AUX0_RUN_HALT_REQ, mmNIC0_QM_ARC_AUX0_RUN_HALT_ACK},
{mmNIC0_QM_ARC_AUX0_CLUSTER_NUM, mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT},
{mmNIC0_QM_ARC_AUX0_ARC_RST_REQ, mmNIC0_QM_ARC_AUX0_CID_OFFSET_7},
{mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0, mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT},
{mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN},
{mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN},
{mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0, mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG},
{mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT, mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI},
{mmNIC0_QM_ARC_AUX0_ARC_AXI_ORDERING_WR_IF_CNT, mmNIC0_QM_ARC_AUX0_MME_ARC_UPPER_DCCM_EN},
};
static const u32 gaudi2_pb_nic0_umr[] = {
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE,
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 1, /* UMR0_1 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 2, /* UMR0_2 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 3, /* UMR0_3 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 4, /* UMR0_4 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 5, /* UMR0_5 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 6, /* UMR0_6 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 7, /* UMR0_7 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 8, /* UMR0_8 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 9, /* UMR0_9 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 10, /* UMR0_10 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 11, /* UMR0_11 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 12, /* UMR0_12 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 13, /* UMR0_13 */
mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + HL_BLOCK_SIZE * 14, /* UMR0_14 */
};
static const struct range gaudi2_pb_nic0_umr_unsecured_regs[] = {
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32,
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 1, /* UMR0_1 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 1},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 2, /* UMR0_2 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 2},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 3, /* UMR0_3 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 3},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 4, /* UMR0_4 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 4},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 5, /* UMR0_5 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 5},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 6, /* UMR0_6 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 6},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 7, /* UMR0_7 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 7},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 8, /* UMR0_8 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 8},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 9, /* UMR0_9 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 9},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 10, /* UMR0_10 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 10},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 11, /* UMR0_11 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 11},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 12, /* UMR0_12 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 12},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 13, /* UMR0_13 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 13},
{mmNIC0_UMR0_0_UNSECURE_DOORBELL0_UNSECURE_DB_FIRST32 + HL_BLOCK_SIZE * 14, /* UMR0_14 */
mmNIC0_UMR0_0_COMPLETION_QUEUE_CI_1_CQ_CONSUMER_INDEX + HL_BLOCK_SIZE * 14},
};
/*
* mmNIC0_QPC0_LINEAR_WQE_QPN and mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN are 32-bit
* registers and since the user writes in bulks of 64 bits we need to un-secure
* also the following 32 bits (that's why we added also the next 4 bytes to the
* table). In the RTL, as part of ECO (2874), writing to the next 4 bytes
* triggers a write to the SPECIAL_GLBL_SPARE register, hence it's must be
* unsecured as well.
*/
#define mmNIC0_QPC0_LINEAR_WQE_RSV (mmNIC0_QPC0_LINEAR_WQE_QPN + 4)
#define mmNIC0_QPC0_MULTI_STRIDE_WQE_RSV (mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN + 4)
#define mmNIC0_QPC0_SPECIAL_GLBL_SPARE 0x541FF60
static const u32 gaudi2_pb_nic0_qm_qpc_unsecured_regs[] = {
mmNIC0_QPC0_LINEAR_WQE_STATIC_0,
mmNIC0_QPC0_LINEAR_WQE_STATIC_1,
mmNIC0_QPC0_LINEAR_WQE_STATIC_2,
mmNIC0_QPC0_LINEAR_WQE_STATIC_3,
mmNIC0_QPC0_LINEAR_WQE_STATIC_4,
mmNIC0_QPC0_LINEAR_WQE_STATIC_5,
mmNIC0_QPC0_LINEAR_WQE_STATIC_6,
mmNIC0_QPC0_LINEAR_WQE_STATIC_7,
mmNIC0_QPC0_LINEAR_WQE_STATIC_8,
mmNIC0_QPC0_LINEAR_WQE_STATIC_9,
mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_0,
mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_1,
mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_2,
mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_3,
mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_4,
mmNIC0_QPC0_LINEAR_WQE_DYNAMIC_5,
mmNIC0_QPC0_LINEAR_WQE_QPN,
mmNIC0_QPC0_LINEAR_WQE_RSV,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_0,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_1,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_2,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_3,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_4,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_5,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_6,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_7,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_8,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_9,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_10,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_11,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_12,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_13,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_14,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_15,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_16,
mmNIC0_QPC0_MULTI_STRIDE_WQE_STATIC_17,
mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_0,
mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_1,
mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_2,
mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_3,
mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_4,
mmNIC0_QPC0_MULTI_STRIDE_WQE_DYNAMIC_5,
mmNIC0_QPC0_MULTI_STRIDE_WQE_QPN,
mmNIC0_QPC0_MULTI_STRIDE_WQE_RSV,
mmNIC0_QPC0_QMAN_DOORBELL,
mmNIC0_QPC0_QMAN_DOORBELL_QPN,
mmNIC0_QPC0_SPECIAL_GLBL_SPARE,
mmNIC0_QM0_CQ_CFG0_0,
mmNIC0_QM0_CQ_CFG0_1,
mmNIC0_QM0_CQ_CFG0_2,
mmNIC0_QM0_CQ_CFG0_3,
mmNIC0_QM0_CQ_CFG0_4,
mmNIC0_QM0_CP_FENCE0_RDATA_0,
mmNIC0_QM0_CP_FENCE0_RDATA_1,
mmNIC0_QM0_CP_FENCE0_RDATA_2,
mmNIC0_QM0_CP_FENCE0_RDATA_3,
mmNIC0_QM0_CP_FENCE0_RDATA_4,
mmNIC0_QM0_CP_FENCE1_RDATA_0,
mmNIC0_QM0_CP_FENCE1_RDATA_1,
mmNIC0_QM0_CP_FENCE1_RDATA_2,
mmNIC0_QM0_CP_FENCE1_RDATA_3,
mmNIC0_QM0_CP_FENCE1_RDATA_4,
mmNIC0_QM0_CP_FENCE2_RDATA_0,
mmNIC0_QM0_CP_FENCE2_RDATA_1,
mmNIC0_QM0_CP_FENCE2_RDATA_2,
mmNIC0_QM0_CP_FENCE2_RDATA_3,
mmNIC0_QM0_CP_FENCE2_RDATA_4,
mmNIC0_QM0_CP_FENCE3_RDATA_0,
mmNIC0_QM0_CP_FENCE3_RDATA_1,
mmNIC0_QM0_CP_FENCE3_RDATA_2,
mmNIC0_QM0_CP_FENCE3_RDATA_3,
mmNIC0_QM0_CP_FENCE3_RDATA_4,
mmNIC0_QM0_CP_FENCE0_CNT_0,
mmNIC0_QM0_CP_FENCE0_CNT_1,
mmNIC0_QM0_CP_FENCE0_CNT_2,
mmNIC0_QM0_CP_FENCE0_CNT_3,
mmNIC0_QM0_CP_FENCE0_CNT_4,
mmNIC0_QM0_CP_FENCE1_CNT_0,
mmNIC0_QM0_CP_FENCE1_CNT_1,
mmNIC0_QM0_CP_FENCE1_CNT_2,
mmNIC0_QM0_CP_FENCE1_CNT_3,
mmNIC0_QM0_CP_FENCE1_CNT_4,
mmNIC0_QM0_CP_FENCE2_CNT_0,
mmNIC0_QM0_CP_FENCE2_CNT_1,
mmNIC0_QM0_CP_FENCE2_CNT_2,
mmNIC0_QM0_CP_FENCE2_CNT_3,
mmNIC0_QM0_CP_FENCE2_CNT_4,
mmNIC0_QM0_CP_FENCE3_CNT_0,
mmNIC0_QM0_CP_FENCE3_CNT_1,
mmNIC0_QM0_CP_FENCE3_CNT_2,
mmNIC0_QM0_CP_FENCE3_CNT_3,
mmNIC0_QM0_CP_FENCE3_CNT_4,
mmNIC0_QM0_CQ_PTR_LO_0,
mmNIC0_QM0_CQ_PTR_HI_0,
mmNIC0_QM0_CQ_TSIZE_0,
mmNIC0_QM0_CQ_CTL_0,
mmNIC0_QM0_CQ_PTR_LO_1,
mmNIC0_QM0_CQ_PTR_HI_1,
mmNIC0_QM0_CQ_TSIZE_1,
mmNIC0_QM0_CQ_CTL_1,
mmNIC0_QM0_CQ_PTR_LO_2,
mmNIC0_QM0_CQ_PTR_HI_2,
mmNIC0_QM0_CQ_TSIZE_2,
mmNIC0_QM0_CQ_CTL_2,
mmNIC0_QM0_CQ_PTR_LO_3,
mmNIC0_QM0_CQ_PTR_HI_3,
mmNIC0_QM0_CQ_TSIZE_3,
mmNIC0_QM0_CQ_CTL_3,
mmNIC0_QM0_CQ_PTR_LO_4,
mmNIC0_QM0_CQ_PTR_HI_4,
mmNIC0_QM0_CQ_TSIZE_4,
mmNIC0_QM0_CQ_CTL_4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR0_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR0_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR1_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR1_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR2_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR2_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR3_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR3_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR4_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR4_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR5_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR5_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR6_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR6_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR7_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR7_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR8_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR8_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR9_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR9_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR10_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR10_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR11_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR11_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR12_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR12_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR13_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR13_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR14_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR14_BASE + 4,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR15_BASE,
mmNIC0_QM0_QMAN_WR64_BASE_ADDR15_BASE + 4,
mmNIC0_QM0_ARC_CQ_PTR_LO,
mmNIC0_QM0_ARC_CQ_PTR_LO_STS,
mmNIC0_QM0_ARC_CQ_PTR_HI,
mmNIC0_QM0_ARC_CQ_PTR_HI_STS,
mmNIC0_QM0_ARB_CFG_0,
mmNIC0_QM0_ARB_MST_QUIET_PER,
mmNIC0_QM0_ARB_CHOICE_Q_PUSH,
mmNIC0_QM0_ARB_WRR_WEIGHT_0,
mmNIC0_QM0_ARB_WRR_WEIGHT_1,
mmNIC0_QM0_ARB_WRR_WEIGHT_2,
mmNIC0_QM0_ARB_WRR_WEIGHT_3,
mmNIC0_QM0_ARB_BASE_LO,
mmNIC0_QM0_ARB_BASE_HI,
mmNIC0_QM0_ARB_MST_SLAVE_EN,
mmNIC0_QM0_ARB_MST_SLAVE_EN_1,
mmNIC0_QM0_ARB_MST_CRED_INC,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_0,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_1,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_2,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_3,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_4,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_5,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_6,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_7,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_8,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_9,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_10,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_11,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_12,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_13,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_14,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_15,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_16,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_17,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_18,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_19,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_20,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_21,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_22,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_23,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_24,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_25,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_26,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_27,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_28,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_29,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_30,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_31,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_32,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_33,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_34,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_35,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_36,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_37,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_38,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_39,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_40,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_41,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_42,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_43,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_44,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_45,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_46,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_47,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_48,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_49,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_50,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_51,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_52,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_53,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_54,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_55,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_56,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_57,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_58,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_59,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_60,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_61,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_62,
mmNIC0_QM0_ARB_MST_CHOICE_PUSH_OFST_63,
mmNIC0_QM0_ARB_SLV_ID,
mmNIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST,
mmNIC0_QM0_ARC_CQ_CFG0,
mmNIC0_QM0_CQ_IFIFO_CI_0,
mmNIC0_QM0_CQ_IFIFO_CI_1,
mmNIC0_QM0_CQ_IFIFO_CI_2,
mmNIC0_QM0_CQ_IFIFO_CI_3,
mmNIC0_QM0_CQ_IFIFO_CI_4,
mmNIC0_QM0_ARC_CQ_IFIFO_CI,
mmNIC0_QM0_CQ_CTL_CI_0,
mmNIC0_QM0_CQ_CTL_CI_1,
mmNIC0_QM0_CQ_CTL_CI_2,
mmNIC0_QM0_CQ_CTL_CI_3,
mmNIC0_QM0_CQ_CTL_CI_4,
mmNIC0_QM0_ARC_CQ_CTL_CI,
mmNIC0_QM0_ARC_CQ_TSIZE,
mmNIC0_QM0_ARC_CQ_CTL,
mmNIC0_QM0_CP_SWITCH_WD_SET,
mmNIC0_QM0_CP_EXT_SWITCH,
mmNIC0_QM0_CP_PRED_0,
mmNIC0_QM0_CP_PRED_1,
mmNIC0_QM0_CP_PRED_2,
mmNIC0_QM0_CP_PRED_3,
mmNIC0_QM0_CP_PRED_4,
mmNIC0_QM0_CP_PRED_UPEN_0,
mmNIC0_QM0_CP_PRED_UPEN_1,
mmNIC0_QM0_CP_PRED_UPEN_2,
mmNIC0_QM0_CP_PRED_UPEN_3,
mmNIC0_QM0_CP_PRED_UPEN_4,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3,
mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3,
mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3,
mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3,
mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4,
mmNIC0_QM0_ARC_CQ_IFIFO_MSG_BASE_LO,
mmNIC0_QM0_ARC_CQ_CTL_MSG_BASE_LO,
mmNIC0_QM0_CQ_IFIFO_MSG_BASE_LO,
mmNIC0_QM0_CQ_CTL_MSG_BASE_LO
};
static const u32 gaudi2_pb_rot0[] = {
mmROT0_BASE,
mmROT0_MSTR_IF_RR_SHRD_HBW_BASE,
mmROT0_QM_BASE,
};
static const u32 gaudi2_pb_rot0_arc[] = {
mmROT0_QM_ARC_AUX_BASE
};
static const struct range gaudi2_pb_rot0_arc_unsecured_regs[] = {
{mmROT0_QM_ARC_AUX_RUN_HALT_REQ, mmROT0_QM_ARC_AUX_RUN_HALT_ACK},
{mmROT0_QM_ARC_AUX_CLUSTER_NUM, mmROT0_QM_ARC_AUX_WAKE_UP_EVENT},
{mmROT0_QM_ARC_AUX_ARC_RST_REQ, mmROT0_QM_ARC_AUX_CID_OFFSET_7},
{mmROT0_QM_ARC_AUX_SCRATCHPAD_0, mmROT0_QM_ARC_AUX_INFLIGHT_LBU_RD_CNT},
{mmROT0_QM_ARC_AUX_CBU_EARLY_BRESP_EN, mmROT0_QM_ARC_AUX_CBU_EARLY_BRESP_EN},
{mmROT0_QM_ARC_AUX_LBU_EARLY_BRESP_EN, mmROT0_QM_ARC_AUX_LBU_EARLY_BRESP_EN},
{mmROT0_QM_ARC_AUX_DCCM_QUEUE_BASE_ADDR_0, mmROT0_QM_ARC_AUX_DCCM_QUEUE_ALERT_MSG},
{mmROT0_QM_ARC_AUX_DCCM_Q_PUSH_FIFO_CNT, mmROT0_QM_ARC_AUX_QMAN_ARC_CQ_SHADOW_CI},
{mmROT0_QM_ARC_AUX_ARC_AXI_ORDERING_WR_IF_CNT, mmROT0_QM_ARC_AUX_MME_ARC_UPPER_DCCM_EN},
};
static const u32 gaudi2_pb_rot0_unsecured_regs[] = {
mmROT0_QM_CQ_CFG0_0,
mmROT0_QM_CQ_CFG0_1,
mmROT0_QM_CQ_CFG0_2,
mmROT0_QM_CQ_CFG0_3,
mmROT0_QM_CQ_CFG0_4,
mmROT0_QM_CP_FENCE0_RDATA_0,
mmROT0_QM_CP_FENCE0_RDATA_1,
mmROT0_QM_CP_FENCE0_RDATA_2,
mmROT0_QM_CP_FENCE0_RDATA_3,
mmROT0_QM_CP_FENCE0_RDATA_4,
mmROT0_QM_CP_FENCE1_RDATA_0,
mmROT0_QM_CP_FENCE1_RDATA_1,
mmROT0_QM_CP_FENCE1_RDATA_2,
mmROT0_QM_CP_FENCE1_RDATA_3,
mmROT0_QM_CP_FENCE1_RDATA_4,
mmROT0_QM_CP_FENCE2_RDATA_0,
mmROT0_QM_CP_FENCE2_RDATA_1,
mmROT0_QM_CP_FENCE2_RDATA_2,
mmROT0_QM_CP_FENCE2_RDATA_3,
mmROT0_QM_CP_FENCE2_RDATA_4,
mmROT0_QM_CP_FENCE3_RDATA_0,
mmROT0_QM_CP_FENCE3_RDATA_1,
mmROT0_QM_CP_FENCE3_RDATA_2,
mmROT0_QM_CP_FENCE3_RDATA_3,
mmROT0_QM_CP_FENCE3_RDATA_4,
mmROT0_QM_CP_FENCE0_CNT_0,
mmROT0_QM_CP_FENCE0_CNT_1,
mmROT0_QM_CP_FENCE0_CNT_2,
mmROT0_QM_CP_FENCE0_CNT_3,
mmROT0_QM_CP_FENCE0_CNT_4,
mmROT0_QM_CP_FENCE1_CNT_0,
mmROT0_QM_CP_FENCE1_CNT_1,
mmROT0_QM_CP_FENCE1_CNT_2,
mmROT0_QM_CP_FENCE1_CNT_3,
mmROT0_QM_CP_FENCE1_CNT_4,
mmROT0_QM_CP_FENCE2_CNT_0,
mmROT0_QM_CP_FENCE2_CNT_1,
mmROT0_QM_CP_FENCE2_CNT_2,
mmROT0_QM_CP_FENCE2_CNT_3,
mmROT0_QM_CP_FENCE2_CNT_4,
mmROT0_QM_CP_FENCE3_CNT_0,
mmROT0_QM_CP_FENCE3_CNT_1,
mmROT0_QM_CP_FENCE3_CNT_2,
mmROT0_QM_CP_FENCE3_CNT_3,
mmROT0_QM_CP_FENCE3_CNT_4,
mmROT0_QM_CQ_PTR_LO_0,
mmROT0_QM_CQ_PTR_HI_0,
mmROT0_QM_CQ_TSIZE_0,
mmROT0_QM_CQ_CTL_0,
mmROT0_QM_CQ_PTR_LO_1,
mmROT0_QM_CQ_PTR_HI_1,
mmROT0_QM_CQ_TSIZE_1,
mmROT0_QM_CQ_CTL_1,
mmROT0_QM_CQ_PTR_LO_2,
mmROT0_QM_CQ_PTR_HI_2,
mmROT0_QM_CQ_TSIZE_2,
mmROT0_QM_CQ_CTL_2,
mmROT0_QM_CQ_PTR_LO_3,
mmROT0_QM_CQ_PTR_HI_3,
mmROT0_QM_CQ_TSIZE_3,
mmROT0_QM_CQ_CTL_3,
mmROT0_QM_CQ_PTR_LO_4,
mmROT0_QM_CQ_PTR_HI_4,
mmROT0_QM_CQ_TSIZE_4,
mmROT0_QM_CQ_CTL_4,
mmROT0_QM_QMAN_WR64_BASE_ADDR0_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR0_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR1_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR1_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR2_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR2_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR3_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR3_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR4_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR4_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR5_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR5_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR6_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR6_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR7_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR7_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR8_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR8_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR9_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR9_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR10_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR10_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR11_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR11_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR12_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR12_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR13_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR13_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR14_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR14_BASE + 4,
mmROT0_QM_QMAN_WR64_BASE_ADDR15_BASE,
mmROT0_QM_QMAN_WR64_BASE_ADDR15_BASE + 4,
mmROT0_QM_ARC_CQ_PTR_LO,
mmROT0_QM_ARC_CQ_PTR_LO_STS,
mmROT0_QM_ARC_CQ_PTR_HI,
mmROT0_QM_ARC_CQ_PTR_HI_STS,
mmROT0_QM_ARB_CFG_0,
mmROT0_QM_ARB_MST_QUIET_PER,
mmROT0_QM_ARB_CHOICE_Q_PUSH,
mmROT0_QM_ARB_WRR_WEIGHT_0,
mmROT0_QM_ARB_WRR_WEIGHT_1,
mmROT0_QM_ARB_WRR_WEIGHT_2,
mmROT0_QM_ARB_WRR_WEIGHT_3,
mmROT0_QM_ARB_BASE_LO,
mmROT0_QM_ARB_BASE_HI,
mmROT0_QM_ARB_MST_SLAVE_EN,
mmROT0_QM_ARB_MST_SLAVE_EN_1,
mmROT0_QM_ARB_MST_CRED_INC,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_0,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_1,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_2,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_3,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_4,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_5,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_6,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_7,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_8,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_9,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_10,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_11,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_12,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_13,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_14,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_15,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_16,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_17,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_18,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_19,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_20,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_21,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_22,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_23,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_24,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_25,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_26,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_27,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_28,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_29,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_30,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_31,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_32,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_33,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_34,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_35,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_36,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_37,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_38,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_39,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_40,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_41,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_42,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_43,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_44,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_45,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_46,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_47,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_48,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_49,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_50,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_51,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_52,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_53,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_54,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_55,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_56,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_57,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_58,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_59,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_60,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_61,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_62,
mmROT0_QM_ARB_MST_CHOICE_PUSH_OFST_63,
mmROT0_QM_ARB_SLV_ID,
mmROT0_QM_ARB_SLV_MASTER_INC_CRED_OFST,
mmROT0_QM_ARC_CQ_CFG0,
mmROT0_QM_CQ_IFIFO_CI_0,
mmROT0_QM_CQ_IFIFO_CI_1,
mmROT0_QM_CQ_IFIFO_CI_2,
mmROT0_QM_CQ_IFIFO_CI_3,
mmROT0_QM_CQ_IFIFO_CI_4,
mmROT0_QM_ARC_CQ_IFIFO_CI,
mmROT0_QM_CQ_CTL_CI_0,
mmROT0_QM_CQ_CTL_CI_1,
mmROT0_QM_CQ_CTL_CI_2,
mmROT0_QM_CQ_CTL_CI_3,
mmROT0_QM_CQ_CTL_CI_4,
mmROT0_QM_ARC_CQ_CTL_CI,
mmROT0_QM_ARC_CQ_TSIZE,
mmROT0_QM_ARC_CQ_CTL,
mmROT0_QM_CP_SWITCH_WD_SET,
mmROT0_QM_CP_EXT_SWITCH,
mmROT0_QM_CP_PRED_0,
mmROT0_QM_CP_PRED_1,
mmROT0_QM_CP_PRED_2,
mmROT0_QM_CP_PRED_3,
mmROT0_QM_CP_PRED_4,
mmROT0_QM_CP_PRED_UPEN_0,
mmROT0_QM_CP_PRED_UPEN_1,
mmROT0_QM_CP_PRED_UPEN_2,
mmROT0_QM_CP_PRED_UPEN_3,
mmROT0_QM_CP_PRED_UPEN_4,
mmROT0_QM_CP_MSG_BASE0_ADDR_LO_0,
mmROT0_QM_CP_MSG_BASE0_ADDR_LO_1,
mmROT0_QM_CP_MSG_BASE0_ADDR_LO_2,
mmROT0_QM_CP_MSG_BASE0_ADDR_LO_3,
mmROT0_QM_CP_MSG_BASE0_ADDR_LO_4,
mmROT0_QM_CP_MSG_BASE0_ADDR_HI_0,
mmROT0_QM_CP_MSG_BASE0_ADDR_HI_1,
mmROT0_QM_CP_MSG_BASE0_ADDR_HI_2,
mmROT0_QM_CP_MSG_BASE0_ADDR_HI_3,
mmROT0_QM_CP_MSG_BASE0_ADDR_HI_4,
mmROT0_QM_CP_MSG_BASE1_ADDR_LO_0,
mmROT0_QM_CP_MSG_BASE1_ADDR_LO_1,
mmROT0_QM_CP_MSG_BASE1_ADDR_LO_2,
mmROT0_QM_CP_MSG_BASE1_ADDR_LO_3,
mmROT0_QM_CP_MSG_BASE1_ADDR_LO_4,
mmROT0_QM_CP_MSG_BASE1_ADDR_HI_0,
mmROT0_QM_CP_MSG_BASE1_ADDR_HI_1,
mmROT0_QM_CP_MSG_BASE1_ADDR_HI_2,
mmROT0_QM_CP_MSG_BASE1_ADDR_HI_3,
mmROT0_QM_CP_MSG_BASE1_ADDR_HI_4,
mmROT0_QM_CP_MSG_BASE2_ADDR_LO_0,
mmROT0_QM_CP_MSG_BASE2_ADDR_LO_1,
mmROT0_QM_CP_MSG_BASE2_ADDR_LO_2,
mmROT0_QM_CP_MSG_BASE2_ADDR_LO_3,
mmROT0_QM_CP_MSG_BASE2_ADDR_LO_4,
mmROT0_QM_CP_MSG_BASE2_ADDR_HI_0,
mmROT0_QM_CP_MSG_BASE2_ADDR_HI_1,
mmROT0_QM_CP_MSG_BASE2_ADDR_HI_2,
mmROT0_QM_CP_MSG_BASE2_ADDR_HI_3,
mmROT0_QM_CP_MSG_BASE2_ADDR_HI_4,
mmROT0_QM_CP_MSG_BASE3_ADDR_LO_0,
mmROT0_QM_CP_MSG_BASE3_ADDR_LO_1,
mmROT0_QM_CP_MSG_BASE3_ADDR_LO_2,
mmROT0_QM_CP_MSG_BASE3_ADDR_LO_3,
mmROT0_QM_CP_MSG_BASE3_ADDR_LO_4,
mmROT0_QM_CP_MSG_BASE3_ADDR_HI_0,
mmROT0_QM_CP_MSG_BASE3_ADDR_HI_1,
mmROT0_QM_CP_MSG_BASE3_ADDR_HI_2,
mmROT0_QM_CP_MSG_BASE3_ADDR_HI_3,
mmROT0_QM_CP_MSG_BASE3_ADDR_HI_4,
mmROT0_QM_ARC_CQ_IFIFO_MSG_BASE_LO,
mmROT0_QM_ARC_CQ_CTL_MSG_BASE_LO,
mmROT0_QM_CQ_IFIFO_MSG_BASE_LO,
mmROT0_QM_CQ_CTL_MSG_BASE_LO,
mmROT0_DESC_CONTEXT_ID,
mmROT0_DESC_IN_IMG_START_ADDR_L,
mmROT0_DESC_IN_IMG_START_ADDR_H,
mmROT0_DESC_OUT_IMG_START_ADDR_L,
mmROT0_DESC_OUT_IMG_START_ADDR_H,
mmROT0_DESC_CFG,
mmROT0_DESC_IM_READ_SLOPE,
mmROT0_DESC_SIN_D,
mmROT0_DESC_COS_D,
mmROT0_DESC_IN_IMG,
mmROT0_DESC_IN_STRIDE,
mmROT0_DESC_IN_STRIPE,
mmROT0_DESC_IN_CENTER,
mmROT0_DESC_OUT_IMG,
mmROT0_DESC_OUT_STRIDE,
mmROT0_DESC_OUT_STRIPE,
mmROT0_DESC_OUT_CENTER,
mmROT0_DESC_BACKGROUND,
mmROT0_DESC_CPL_MSG_EN,
mmROT0_DESC_IDLE_STATE,
mmROT0_DESC_CPL_MSG_ADDR,
mmROT0_DESC_CPL_MSG_DATA,
mmROT0_DESC_X_I_START_OFFSET,
mmROT0_DESC_X_I_START_OFFSET_FLIP,
mmROT0_DESC_X_I_FIRST,
mmROT0_DESC_Y_I_FIRST,
mmROT0_DESC_Y_I,
mmROT0_DESC_OUT_STRIPE_SIZE,
mmROT0_DESC_RSB_CFG_0,
mmROT0_DESC_RSB_PAD_VAL,
mmROT0_DESC_OWM_CFG,
mmROT0_DESC_CTRL_CFG,
mmROT0_DESC_PIXEL_PAD,
mmROT0_DESC_PREC_SHIFT,
mmROT0_DESC_MAX_VAL,
mmROT0_DESC_A0_M11,
mmROT0_DESC_A1_M12,
mmROT0_DESC_A2,
mmROT0_DESC_B0_M21,
mmROT0_DESC_B1_M22,
mmROT0_DESC_B2,
mmROT0_DESC_C0,
mmROT0_DESC_C1,
mmROT0_DESC_C2,
mmROT0_DESC_D0,
mmROT0_DESC_D1,
mmROT0_DESC_D2,
mmROT0_DESC_INV_PROC_SIZE_M_1,
mmROT0_DESC_MESH_IMG_START_ADDR_L,
mmROT0_DESC_MESH_IMG_START_ADDR_H,
mmROT0_DESC_MESH_IMG,
mmROT0_DESC_MESH_STRIDE,
mmROT0_DESC_MESH_STRIPE,
mmROT0_DESC_MESH_CTRL,
mmROT0_DESC_MESH_GH,
mmROT0_DESC_MESH_GV,
mmROT0_DESC_MRSB_CFG_0,
mmROT0_DESC_MRSB_PAD_VAL,
mmROT0_DESC_BUF_CFG,
mmROT0_DESC_CID_OFFSET,
mmROT0_DESC_PUSH_DESC
};
static const u32 gaudi2_pb_psoc_global_conf[] = {
mmPSOC_GLOBAL_CONF_BASE
};
static const u32 gaudi2_pb_psoc[] = {
mmPSOC_EFUSE_BASE,
mmPSOC_BTL_BASE,
mmPSOC_CS_TRACE_BASE,
mmPSOC_DFT_EFUSE_BASE,
mmPSOC_PID_BASE,
mmPSOC_ARC0_CFG_BASE,
mmPSOC_ARC0_MSTR_IF_RR_SHRD_HBW_BASE,
mmPSOC_ARC0_AUX_BASE,
mmPSOC_ARC1_CFG_BASE,
mmPSOC_ARC1_MSTR_IF_RR_SHRD_HBW_BASE,
mmPSOC_ARC1_AUX_BASE,
mmJT_MSTR_IF_RR_SHRD_HBW_BASE,
mmSMI_MSTR_IF_RR_SHRD_HBW_BASE,
mmI2C_S_MSTR_IF_RR_SHRD_HBW_BASE,
mmPSOC_SVID0_BASE,
mmPSOC_SVID1_BASE,
mmPSOC_SVID2_BASE,
mmPSOC_AVS0_BASE,
mmPSOC_AVS1_BASE,
mmPSOC_AVS2_BASE,
mmPSOC_PWM0_BASE,
mmPSOC_PWM1_BASE,
mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE,
};
static const u32 gaudi2_pb_pmmu[] = {
mmPMMU_HBW_MMU_BASE,
mmPMMU_HBW_STLB_BASE,
mmPMMU_HBW_MSTR_IF_RR_SHRD_HBW_BASE,
mmPMMU_PIF_BASE,
};
static const u32 gaudi2_pb_psoc_pll[] = {
mmPSOC_MME_PLL_CTRL_BASE,
mmPSOC_CPU_PLL_CTRL_BASE,
mmPSOC_VID_PLL_CTRL_BASE
};
static const u32 gaudi2_pb_pmmu_pll[] = {
mmPMMU_MME_PLL_CTRL_BASE,
mmPMMU_VID_PLL_CTRL_BASE
};
static const u32 gaudi2_pb_xbar_pll[] = {
mmDCORE0_XBAR_DMA_PLL_CTRL_BASE,
mmDCORE0_XBAR_MMU_PLL_CTRL_BASE,
mmDCORE0_XBAR_IF_PLL_CTRL_BASE,
mmDCORE0_XBAR_MESH_PLL_CTRL_BASE,
mmDCORE1_XBAR_DMA_PLL_CTRL_BASE,
mmDCORE1_XBAR_MMU_PLL_CTRL_BASE,
mmDCORE1_XBAR_IF_PLL_CTRL_BASE,
mmDCORE1_XBAR_MESH_PLL_CTRL_BASE,
mmDCORE1_XBAR_HBM_PLL_CTRL_BASE,
mmDCORE2_XBAR_DMA_PLL_CTRL_BASE,
mmDCORE2_XBAR_MMU_PLL_CTRL_BASE,
mmDCORE2_XBAR_IF_PLL_CTRL_BASE,
mmDCORE2_XBAR_BANK_PLL_CTRL_BASE,
mmDCORE2_XBAR_HBM_PLL_CTRL_BASE,
mmDCORE3_XBAR_DMA_PLL_CTRL_BASE,
mmDCORE3_XBAR_MMU_PLL_CTRL_BASE,
mmDCORE3_XBAR_IF_PLL_CTRL_BASE,
mmDCORE3_XBAR_BANK_PLL_CTRL_BASE
};
static const u32 gaudi2_pb_xft_pll[] = {
mmDCORE0_HBM_PLL_CTRL_BASE,
mmDCORE0_TPC_PLL_CTRL_BASE,
mmDCORE0_PCI_PLL_CTRL_BASE,
mmDCORE1_HBM_PLL_CTRL_BASE,
mmDCORE1_TPC_PLL_CTRL_BASE,
mmDCORE1_NIC_PLL_CTRL_BASE,
mmDCORE2_HBM_PLL_CTRL_BASE,
mmDCORE2_TPC_PLL_CTRL_BASE,
mmDCORE3_HBM_PLL_CTRL_BASE,
mmDCORE3_TPC_PLL_CTRL_BASE,
mmDCORE3_NIC_PLL_CTRL_BASE,
};
static const u32 gaudi2_pb_pcie[] = {
mmPCIE_ELBI_RR_MSTR_IF_RR_SHRD_HBW_BASE,
mmPCIE_LBW_RR_MSTR_IF_RR_SHRD_HBW_BASE,
mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE,
mmPCIE_WRAP_BASE,
};
static const u32 gaudi2_pb_pcie_unsecured_regs[] = {
mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0,
};
static const u32 gaudi2_pb_thermal_sensor0[] = {
mmDCORE0_XFT_BASE,
mmDCORE0_TSTDVS_BASE,
};
static const u32 gaudi2_pb_hbm[] = {
mmHBM0_MC0_BASE,
mmHBM0_MC1_BASE,
};
static const u32 gaudi2_pb_mme_qm_arc_acp_eng[] = {
mmDCORE0_MME_QM_ARC_ACP_ENG_BASE,
};
static const struct range gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs[] = {
{mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_PI_REG_0, mmDCORE0_MME_QM_ARC_ACP_ENG_ACP_DBG_REG},
};
struct gaudi2_tpc_pb_data {
struct hl_block_glbl_sec *glbl_sec;
u32 block_array_size;
};
static void gaudi2_config_tpcs_glbl_sec(struct hl_device *hdev, int dcore, int inst, u32 offset,
struct iterate_module_ctx *ctx)
{
struct gaudi2_tpc_pb_data *pb_data = ctx->data;
hl_config_glbl_sec(hdev, gaudi2_pb_dcr0_tpc0, pb_data->glbl_sec,
offset, pb_data->block_array_size);
}
static int gaudi2_init_pb_tpc(struct hl_device *hdev)
{
u32 stride, kernel_tensor_stride, qm_tensor_stride, block_array_size;
struct gaudi2_tpc_pb_data tpc_pb_data;
struct hl_block_glbl_sec *glbl_sec;
struct iterate_module_ctx tpc_iter;
int i;
block_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0);
glbl_sec = kcalloc(block_array_size, sizeof(struct hl_block_glbl_sec), GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
kernel_tensor_stride = mmDCORE0_TPC0_CFG_KERNEL_TENSOR_1_BASE -
mmDCORE0_TPC0_CFG_KERNEL_TENSOR_0_BASE;
qm_tensor_stride = mmDCORE0_TPC0_CFG_QM_TENSOR_1_BASE - mmDCORE0_TPC0_CFG_QM_TENSOR_0_BASE;
hl_secure_block(hdev, glbl_sec, block_array_size);
hl_unsecure_registers(hdev, gaudi2_pb_dcr0_tpc0_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_unsecured_regs),
0, gaudi2_pb_dcr0_tpc0, glbl_sec,
block_array_size);
/* Unsecure all TPC kernel tensors */
for (i = 0 ; i < TPC_NUM_OF_KERNEL_TENSORS ; i++)
hl_unsecure_registers(hdev,
gaudi2_pb_dcr0_tpc0_ktensor_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_ktensor_unsecured_regs),
i * kernel_tensor_stride, gaudi2_pb_dcr0_tpc0,
glbl_sec, block_array_size);
/* Unsecure all TPC QM tensors */
for (i = 0 ; i < TPC_NUM_OF_QM_TENSORS ; i++)
hl_unsecure_registers(hdev,
gaudi2_pb_dcr0_tpc0_qtensor_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_qtensor_unsecured_regs),
i * qm_tensor_stride,
gaudi2_pb_dcr0_tpc0, glbl_sec, block_array_size);
/* unsecure all 32 TPC QM SRF regs */
stride = mmDCORE0_TPC0_CFG_QM_SRF_1 - mmDCORE0_TPC0_CFG_QM_SRF_0;
for (i = 0 ; i < 32 ; i++)
hl_unsecure_register(hdev, mmDCORE0_TPC0_CFG_QM_SRF_0,
i * stride, gaudi2_pb_dcr0_tpc0, glbl_sec,
block_array_size);
/* unsecure the 4 TPC LOCK VALUE regs */
stride = mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_1 - mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_0;
for (i = 0 ; i < 4 ; i++)
hl_unsecure_register(hdev, mmDCORE0_TPC0_CFG_TPC_LOCK_VALUE_0,
i * stride, gaudi2_pb_dcr0_tpc0, glbl_sec,
block_array_size);
/* prepare data for TPC iterator */
tpc_pb_data.glbl_sec = glbl_sec;
tpc_pb_data.block_array_size = block_array_size;
tpc_iter.fn = &gaudi2_config_tpcs_glbl_sec;
tpc_iter.data = &tpc_pb_data;
gaudi2_iterate_tpcs(hdev, &tpc_iter);
kfree(glbl_sec);
return 0;
}
struct gaudi2_tpc_arc_pb_data {
u32 unsecured_regs_arr_size;
u32 arc_regs_arr_size;
};
static void gaudi2_config_tpcs_pb_ranges(struct hl_device *hdev, int dcore, int inst, u32 offset,
struct iterate_module_ctx *ctx)
{
struct gaudi2_tpc_arc_pb_data *pb_data = ctx->data;
ctx->rc = hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 1,
offset, gaudi2_pb_dcr0_tpc0_arc,
pb_data->arc_regs_arr_size,
gaudi2_pb_dcr0_tpc0_arc_unsecured_regs,
pb_data->unsecured_regs_arr_size);
}
static int gaudi2_init_pb_tpc_arc(struct hl_device *hdev)
{
struct gaudi2_tpc_arc_pb_data tpc_arc_pb_data;
struct iterate_module_ctx tpc_iter;
tpc_arc_pb_data.arc_regs_arr_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc);
tpc_arc_pb_data.unsecured_regs_arr_size =
ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc_unsecured_regs);
tpc_iter.fn = &gaudi2_config_tpcs_pb_ranges;
tpc_iter.data = &tpc_arc_pb_data;
gaudi2_iterate_tpcs(hdev, &tpc_iter);
return tpc_iter.rc;
}
static int gaudi2_init_pb_sm_objs(struct hl_device *hdev)
{
int i, j, glbl_sec_array_len = gaudi2_pb_dcr0_sm_objs.glbl_sec_length;
u32 sec_entry, *sec_array, array_base, first_sob, first_mon;
array_base = gaudi2_pb_dcr0_sm_objs.mm_block_base_addr +
gaudi2_pb_dcr0_sm_objs.glbl_sec_offset;
sec_array = kcalloc(glbl_sec_array_len, sizeof(u32), GFP_KERNEL);
if (!sec_array)
return -ENOMEM;
first_sob = GAUDI2_RESERVED_SOB_NUMBER;
first_mon = GAUDI2_RESERVED_MON_NUMBER;
/* 8192 SOB_OBJs skipping first GAUDI2_MAX_PENDING_CS of them */
for (j = i = first_sob ; i < DCORE_NUM_OF_SOB ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* 2048 MON_PAY ADDR_L skipping first GAUDI2_MAX_PENDING_CS of them */
for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* 2048 MON_PAY ADDR_H skipping first GAUDI2_MAX_PENDING_CS of them */
for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* 2048 MON_PAY DATA skipping first GAUDI2_MAX_PENDING_CS of them */
for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* 2048 MON_ARM skipping first GAUDI2_MAX_PENDING_CS of them */
for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* 2048 MON_CONFIG skipping first GAUDI2_MAX_PENDING_CS of them */
for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* 2048 MON_STATUS skipping first GAUDI2_MAX_PENDING_CS of them */
for (i = first_mon, j += i ; i < DCORE_NUM_OF_MONITORS ; i++, j++)
UNSET_GLBL_SEC_BIT(sec_array, j);
/* Unsecure selected Dcore0 registers */
for (i = 0 ; i < glbl_sec_array_len ; i++) {
sec_entry = array_base + i * sizeof(u32);
WREG32(sec_entry, sec_array[i]);
}
/* Unsecure Dcore1 - Dcore3 registers */
memset(sec_array, -1, glbl_sec_array_len * sizeof(u32));
for (i = 1 ; i < NUM_OF_DCORES ; i++) {
for (j = 0 ; j < glbl_sec_array_len ; j++) {
sec_entry = DCORE_OFFSET * i + array_base + j * sizeof(u32);
WREG32(sec_entry, sec_array[j]);
}
}
kfree(sec_array);
return 0;
}
static void gaudi2_write_lbw_range_register(struct hl_device *hdev, u64 base, void *data)
{
u32 reg_min_offset, reg_max_offset, write_min, write_max;
struct rr_config *rr_cfg = (struct rr_config *) data;
switch (rr_cfg->type) {
case RR_TYPE_SHORT:
reg_min_offset = RR_LBW_SEC_RANGE_MIN_SHORT_0_OFFSET;
reg_max_offset = RR_LBW_SEC_RANGE_MAX_SHORT_0_OFFSET;
break;
case RR_TYPE_LONG:
reg_min_offset = RR_LBW_SEC_RANGE_MIN_0_OFFSET;
reg_max_offset = RR_LBW_SEC_RANGE_MAX_0_OFFSET;
break;
case RR_TYPE_SHORT_PRIV:
reg_min_offset = RR_LBW_PRIV_RANGE_MIN_SHORT_0_OFFSET;
reg_max_offset = RR_LBW_PRIV_RANGE_MAX_SHORT_0_OFFSET;
break;
case RR_TYPE_LONG_PRIV:
reg_min_offset = RR_LBW_PRIV_RANGE_MIN_0_OFFSET;
reg_max_offset = RR_LBW_PRIV_RANGE_MAX_0_OFFSET;
break;
default:
dev_err(hdev->dev, "Invalid LBW RR type %u\n", rr_cfg->type);
return;
}
reg_min_offset += rr_cfg->index * sizeof(u32);
reg_max_offset += rr_cfg->index * sizeof(u32);
if (rr_cfg->type == RR_TYPE_SHORT || rr_cfg->type == RR_TYPE_SHORT_PRIV) {
write_min = FIELD_GET(RR_LBW_SHORT_MASK, lower_32_bits(rr_cfg->min));
write_max = FIELD_GET(RR_LBW_SHORT_MASK, lower_32_bits(rr_cfg->max));
} else {
write_min = FIELD_GET(RR_LBW_LONG_MASK, lower_32_bits(rr_cfg->min));
write_max = FIELD_GET(RR_LBW_LONG_MASK, lower_32_bits(rr_cfg->max));
}
/* Configure LBW RR:
* Both RR types start blocking from base address 0x1000007FF8000000
* SHORT RRs address bits [26:12]
* LONG RRs address bits [26:0]
*/
WREG32(base + reg_min_offset, write_min);
WREG32(base + reg_max_offset, write_max);
}
void gaudi2_write_rr_to_all_lbw_rtrs(struct hl_device *hdev, u8 rr_type, u32 rr_index, u64 min_val,
u64 max_val)
{
struct dup_block_ctx block_ctx;
struct rr_config rr_cfg;
if ((rr_type == RR_TYPE_SHORT || rr_type == RR_TYPE_SHORT_PRIV) &&
rr_index >= NUM_SHORT_LBW_RR) {
dev_err(hdev->dev, "invalid short LBW %s range register index: %u",
rr_type == RR_TYPE_SHORT ? "secure" : "privileged", rr_index);
return;
}
if ((rr_type == RR_TYPE_LONG || rr_type == RR_TYPE_LONG_PRIV) &&
rr_index >= NUM_LONG_LBW_RR) {
dev_err(hdev->dev, "invalid long LBW %s range register index: %u",
rr_type == RR_TYPE_LONG ? "secure" : "privileged", rr_index);
return;
}
rr_cfg.type = rr_type;
rr_cfg.index = rr_index;
rr_cfg.min = min_val;
rr_cfg.max = max_val;
block_ctx.instance_cfg_fn = &gaudi2_write_lbw_range_register;
block_ctx.data = &rr_cfg;
/* SFT */
block_ctx.base = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_LBW_BASE;
block_ctx.blocks = NUM_OF_SFT;
block_ctx.block_off = SFT_OFFSET;
block_ctx.instances = SFT_NUM_OF_LBW_RTR;
block_ctx.instance_off = SFT_LBW_RTR_OFFSET;
gaudi2_init_blocks(hdev, &block_ctx);
/* SIF */
block_ctx.base = mmDCORE0_RTR0_MSTR_IF_RR_SHRD_LBW_BASE;
block_ctx.blocks = NUM_OF_DCORES;
block_ctx.block_off = DCORE_OFFSET;
block_ctx.instances = NUM_OF_RTR_PER_DCORE;
block_ctx.instance_off = DCORE_RTR_OFFSET;
gaudi2_init_blocks(hdev, &block_ctx);
block_ctx.blocks = 1;
block_ctx.block_off = 0;
block_ctx.instances = 1;
block_ctx.instance_off = 0;
/* PCIE ELBI */
block_ctx.base = mmPCIE_ELBI_RR_MSTR_IF_RR_SHRD_LBW_BASE;
gaudi2_init_blocks(hdev, &block_ctx);
/* PCIE MSTR */
block_ctx.base = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_LBW_BASE;
gaudi2_init_blocks(hdev, &block_ctx);
/* PCIE LBW */
block_ctx.base = mmPCIE_LBW_RR_MSTR_IF_RR_SHRD_LBW_BASE;
gaudi2_init_blocks(hdev, &block_ctx);
}
static void gaudi2_init_lbw_range_registers_secure(struct hl_device *hdev)
{
int i;
/* Up to 14 14bit-address regs.
*
* - range 0: NIC0_CFG
* - range 1: NIC1_CFG
* - range 2: NIC2_CFG
* - range 3: NIC3_CFG
* - range 4: NIC4_CFG
* - range 5: NIC5_CFG
* - range 6: NIC6_CFG
* - range 7: NIC7_CFG
* - range 8: NIC8_CFG
* - range 9: NIC9_CFG
* - range 10: NIC10_CFG
* - range 11: NIC11_CFG + *_DBG (not including TPC_DBG)
*
* If F/W security is not enabled:
* - ranges 12,13: PSOC_CFG (excluding PSOC_TIMESTAMP)
*/
u64 lbw_range_min_short[] = {
mmNIC0_TX_AXUSER_BASE,
mmNIC1_TX_AXUSER_BASE,
mmNIC2_TX_AXUSER_BASE,
mmNIC3_TX_AXUSER_BASE,
mmNIC4_TX_AXUSER_BASE,
mmNIC5_TX_AXUSER_BASE,
mmNIC6_TX_AXUSER_BASE,
mmNIC7_TX_AXUSER_BASE,
mmNIC8_TX_AXUSER_BASE,
mmNIC9_TX_AXUSER_BASE,
mmNIC10_TX_AXUSER_BASE,
mmNIC11_TX_AXUSER_BASE,
mmPSOC_I2C_M0_BASE,
mmPSOC_EFUSE_BASE
};
u64 lbw_range_max_short[] = {
mmNIC0_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC1_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC2_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC3_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC4_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC5_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC6_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC7_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC8_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC9_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC10_MAC_CH3_MAC_PCS_BASE + HL_BLOCK_SIZE,
mmNIC11_DBG_FUNNEL_NCH_BASE + HL_BLOCK_SIZE,
mmPSOC_WDOG_BASE + HL_BLOCK_SIZE,
mmSVID2_AC_BASE + HL_BLOCK_SIZE
};
/* Up to 4 26bit-address regs.
*
* - range 0: TPC_DBG
* - range 1: PCIE_DBI.MSIX_DOORBELL_OFF
* - range 2/3: used in soft reset to block access to several blocks and are cleared here
*/
u64 lbw_range_min_long[] = {
mmDCORE0_TPC0_ROM_TABLE_BASE,
mmPCIE_DBI_MSIX_DOORBELL_OFF,
0x0,
0x0
};
u64 lbw_range_max_long[] = {
mmDCORE3_TPC5_EML_CS_BASE + HL_BLOCK_SIZE,
mmPCIE_DBI_MSIX_DOORBELL_OFF + 0x4,
0x0,
0x0
};
/* write short range registers to all lbw rtrs */
for (i = 0 ; i < ARRAY_SIZE(lbw_range_min_short) ; i++) {
if ((lbw_range_min_short[i] == mmPSOC_I2C_M0_BASE ||
lbw_range_min_short[i] == mmPSOC_EFUSE_BASE) &&
hdev->asic_prop.fw_security_enabled)
continue;
gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_SHORT, i,
lbw_range_min_short[i], lbw_range_max_short[i]);
}
/* write long range registers to all lbw rtrs */
for (i = 0 ; i < ARRAY_SIZE(lbw_range_min_long) ; i++) {
gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, i,
lbw_range_min_long[i], lbw_range_max_long[i]);
}
}
static void gaudi2_init_lbw_range_registers(struct hl_device *hdev)
{
gaudi2_init_lbw_range_registers_secure(hdev);
}
static void gaudi2_write_hbw_range_register(struct hl_device *hdev, u64 base, void *data)
{
u32 min_lo_reg_offset, min_hi_reg_offset, max_lo_reg_offset, max_hi_reg_offset;
struct rr_config *rr_cfg = (struct rr_config *) data;
u64 val_min, val_max;
switch (rr_cfg->type) {
case RR_TYPE_SHORT:
min_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_LO_0_OFFSET;
min_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_SHORT_HI_0_OFFSET;
max_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_LO_0_OFFSET;
max_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_SHORT_HI_0_OFFSET;
break;
case RR_TYPE_LONG:
min_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_LO_0_OFFSET;
min_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MIN_HI_0_OFFSET;
max_lo_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_LO_0_OFFSET;
max_hi_reg_offset = RR_SHRD_HBW_SEC_RANGE_MAX_HI_0_OFFSET;
break;
case RR_TYPE_SHORT_PRIV:
min_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_LO_0_OFFSET;
min_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_SHORT_HI_0_OFFSET;
max_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_LO_0_OFFSET;
max_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_SHORT_HI_0_OFFSET;
break;
case RR_TYPE_LONG_PRIV:
min_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_LO_0_OFFSET;
min_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MIN_HI_0_OFFSET;
max_lo_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_LO_0_OFFSET;
max_hi_reg_offset = RR_SHRD_HBW_PRIV_RANGE_MAX_HI_0_OFFSET;
break;
default:
dev_err(hdev->dev, "Invalid HBW RR type %u\n", rr_cfg->type);
return;
}
min_lo_reg_offset += rr_cfg->index * sizeof(u32);
min_hi_reg_offset += rr_cfg->index * sizeof(u32);
max_lo_reg_offset += rr_cfg->index * sizeof(u32);
max_hi_reg_offset += rr_cfg->index * sizeof(u32);
if (rr_cfg->type == RR_TYPE_SHORT || rr_cfg->type == RR_TYPE_SHORT_PRIV) {
val_min = FIELD_GET(RR_HBW_SHORT_HI_MASK, rr_cfg->min) |
FIELD_GET(RR_HBW_SHORT_LO_MASK, rr_cfg->min);
val_max = FIELD_GET(RR_HBW_SHORT_HI_MASK, rr_cfg->max) |
FIELD_GET(RR_HBW_SHORT_LO_MASK, rr_cfg->max);
} else {
val_min = FIELD_GET(RR_HBW_LONG_HI_MASK, rr_cfg->min) |
FIELD_GET(RR_HBW_LONG_LO_MASK, rr_cfg->min);
val_max = FIELD_GET(RR_HBW_LONG_HI_MASK, rr_cfg->max) |
FIELD_GET(RR_HBW_LONG_LO_MASK, rr_cfg->max);
}
/* Configure HBW RR:
* SHORT RRs (0x1000_<36bits>000) - HI: address bits [47:44], LO: address bits [43:12]
* LONG RRs (0x<52bits>000) - HI: address bits [63:44], LO: address bits [43:12]
*/
WREG32(base + min_lo_reg_offset, lower_32_bits(val_min));
WREG32(base + min_hi_reg_offset, upper_32_bits(val_min));
WREG32(base + max_lo_reg_offset, lower_32_bits(val_max));
WREG32(base + max_hi_reg_offset, upper_32_bits(val_max));
}
static void gaudi2_write_hbw_rr_to_all_mstr_if(struct hl_device *hdev, u8 rr_type, u32 rr_index,
u64 min_val, u64 max_val)
{
struct dup_block_ctx block_ctx;
struct rr_config rr_cfg;
if ((rr_type == RR_TYPE_SHORT || rr_type == RR_TYPE_SHORT_PRIV) &&
rr_index >= NUM_SHORT_HBW_RR) {
dev_err(hdev->dev, "invalid short HBW %s range register index: %u",
rr_type == RR_TYPE_SHORT ? "secure" : "privileged", rr_index);
return;
}
if ((rr_type == RR_TYPE_LONG || rr_type == RR_TYPE_LONG_PRIV) &&
rr_index >= NUM_LONG_HBW_RR) {
dev_err(hdev->dev, "invalid long HBW %s range register index: %u",
rr_type == RR_TYPE_LONG ? "secure" : "privileged", rr_index);
return;
}
rr_cfg.type = rr_type;
rr_cfg.index = rr_index;
rr_cfg.min = min_val;
rr_cfg.max = max_val;
block_ctx.instance_cfg_fn = &gaudi2_write_hbw_range_register;
block_ctx.data = &rr_cfg;
/* SFT */
block_ctx.base = mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE;
block_ctx.blocks = NUM_OF_SFT;
block_ctx.block_off = SFT_OFFSET;
block_ctx.instances = SFT_NUM_OF_HBW_RTR;
block_ctx.instance_off = SFT_IF_RTR_OFFSET;
gaudi2_init_blocks(hdev, &block_ctx);
/* SIF */
block_ctx.base = mmDCORE0_RTR0_MSTR_IF_RR_SHRD_HBW_BASE;
block_ctx.blocks = NUM_OF_DCORES;
block_ctx.block_off = DCORE_OFFSET;
block_ctx.instances = NUM_OF_RTR_PER_DCORE;
block_ctx.instance_off = DCORE_RTR_OFFSET;
gaudi2_init_blocks(hdev, &block_ctx);
/* PCIE MSTR */
block_ctx.base = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE;
block_ctx.blocks = 1;
block_ctx.block_off = 0;
block_ctx.instances = 1;
block_ctx.instance_off = 0;
gaudi2_init_blocks(hdev, &block_ctx);
}
static void gaudi2_init_hbw_range_registers(struct hl_device *hdev)
{
int i;
/* Up to 6 short RR (0x1000_<36bits>000) and 4 long RR (0x<52bits>000).
*
* - short range 0:
* SPI Flash, ARC0/1 ICCM/DCCM, Secure Boot ROM, PSOC_FW/Scratchpad/PCIE_FW SRAM
*/
u64 hbw_range_min_short[] = {
SPI_FLASH_BASE_ADDR
};
u64 hbw_range_max_short[] = {
PCIE_FW_SRAM_ADDR + PCIE_FW_SRAM_SIZE
};
for (i = 0 ; i < ARRAY_SIZE(hbw_range_min_short) ; i++) {
gaudi2_write_hbw_rr_to_all_mstr_if(hdev, RR_TYPE_SHORT, i, hbw_range_min_short[i],
hbw_range_max_short[i]);
}
}
static void gaudi2_write_mmu_range_register(struct hl_device *hdev, u64 base,
struct rr_config *rr_cfg)
{
u32 min_lo_reg_offset, min_hi_reg_offset, max_lo_reg_offset, max_hi_reg_offset;
switch (rr_cfg->type) {
case RR_TYPE_LONG:
min_lo_reg_offset = MMU_RR_SEC_MIN_31_0_0_OFFSET;
min_hi_reg_offset = MMU_RR_SEC_MIN_63_32_0_OFFSET;
max_lo_reg_offset = MMU_RR_SEC_MAX_31_0_0_OFFSET;
max_hi_reg_offset = MMU_RR_SEC_MAX_63_32_0_OFFSET;
break;
case RR_TYPE_LONG_PRIV:
min_lo_reg_offset = MMU_RR_PRIV_MIN_31_0_0_OFFSET;
min_hi_reg_offset = MMU_RR_PRIV_MIN_63_32_0_OFFSET;
max_lo_reg_offset = MMU_RR_PRIV_MAX_31_0_0_OFFSET;
max_hi_reg_offset = MMU_RR_PRIV_MAX_63_32_0_OFFSET;
break;
default:
dev_err(hdev->dev, "Invalid MMU RR type %u\n", rr_cfg->type);
return;
}
min_lo_reg_offset += rr_cfg->index * sizeof(u32);
min_hi_reg_offset += rr_cfg->index * sizeof(u32);
max_lo_reg_offset += rr_cfg->index * sizeof(u32);
max_hi_reg_offset += rr_cfg->index * sizeof(u32);
/* Configure MMU RR (address bits [63:0]) */
WREG32(base + min_lo_reg_offset, lower_32_bits(rr_cfg->min));
WREG32(base + min_hi_reg_offset, upper_32_bits(rr_cfg->min));
WREG32(base + max_lo_reg_offset, lower_32_bits(rr_cfg->max));
WREG32(base + max_hi_reg_offset, upper_32_bits(rr_cfg->max));
}
static void gaudi2_init_mmu_range_registers(struct hl_device *hdev)
{
u32 dcore_id, hmmu_id, hmmu_base;
struct rr_config rr_cfg;
/* Up to 8 ranges [63:0].
*
* - range 0: Reserved HBM area for F/W and driver
*/
/* The RRs are located after the HMMU so need to use the scrambled addresses */
rr_cfg.min = hdev->asic_funcs->scramble_addr(hdev, DRAM_PHYS_BASE);
rr_cfg.max = hdev->asic_funcs->scramble_addr(hdev, hdev->asic_prop.dram_user_base_address);
rr_cfg.index = 0;
rr_cfg.type = RR_TYPE_LONG;
for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) {
for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) {
if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id))
continue;
hmmu_base = mmDCORE0_HMMU0_MMU_BASE + dcore_id * DCORE_OFFSET +
hmmu_id * DCORE_HMMU_OFFSET;
gaudi2_write_mmu_range_register(hdev, hmmu_base, &rr_cfg);
}
}
}
/**
* gaudi2_init_range_registers -
* Initialize range registers of all initiators
*
* @hdev: pointer to hl_device structure
*/
static void gaudi2_init_range_registers(struct hl_device *hdev)
{
gaudi2_init_lbw_range_registers(hdev);
gaudi2_init_hbw_range_registers(hdev);
gaudi2_init_mmu_range_registers(hdev);
}
/**
* gaudi2_init_protection_bits -
* Initialize protection bits of specific registers
*
* @hdev: pointer to hl_device structure
*
* All protection bits are 1 by default, means not protected. Need to set to 0
* each bit that belongs to a protected register.
*
*/
static int gaudi2_init_protection_bits(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 instance_offset;
int rc = 0;
u8 i;
/* SFT */
instance_offset = mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE - mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE;
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
gaudi2_pb_sft0, ARRAY_SIZE(gaudi2_pb_sft0),
NULL, HL_PB_NA);
/* HIF */
instance_offset = mmDCORE0_HIF1_BASE - mmDCORE0_HIF0_BASE;
rc |= hl_init_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
NUM_OF_HIF_PER_DCORE, instance_offset,
gaudi2_pb_dcr0_hif, ARRAY_SIZE(gaudi2_pb_dcr0_hif),
NULL, HL_PB_NA, prop->hmmu_hif_enabled_mask);
/* RTR */
instance_offset = mmDCORE0_RTR1_CTRL_BASE - mmDCORE0_RTR0_CTRL_BASE;
rc |= hl_init_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
gaudi2_pb_dcr0_rtr0, ARRAY_SIZE(gaudi2_pb_dcr0_rtr0),
NULL, HL_PB_NA);
/* HMMU */
rc |= hl_init_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
NUM_OF_HMMU_PER_DCORE, DCORE_HMMU_OFFSET,
gaudi2_pb_dcr0_hmmu0, ARRAY_SIZE(gaudi2_pb_dcr0_hmmu0),
NULL, HL_PB_NA, prop->hmmu_hif_enabled_mask);
/* CPU.
* Except for CPU_IF, skip when security is enabled in F/W, because the blocks are protected
* by privileged RR.
*/
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_cpu_if, ARRAY_SIZE(gaudi2_pb_cpu_if),
NULL, HL_PB_NA);
if (!hdev->asic_prop.fw_security_enabled)
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_cpu, ARRAY_SIZE(gaudi2_pb_cpu),
NULL, HL_PB_NA);
/* KDMA */
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_kdma, ARRAY_SIZE(gaudi2_pb_kdma),
NULL, HL_PB_NA);
/* PDMA */
instance_offset = mmPDMA1_CORE_BASE - mmPDMA0_CORE_BASE;
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, 2, instance_offset,
gaudi2_pb_pdma0, ARRAY_SIZE(gaudi2_pb_pdma0),
gaudi2_pb_pdma0_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_pdma0_unsecured_regs));
/* ARC PDMA */
rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 2,
instance_offset, gaudi2_pb_pdma0_arc,
ARRAY_SIZE(gaudi2_pb_pdma0_arc),
gaudi2_pb_pdma0_arc_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_pdma0_arc_unsecured_regs));
/* EDMA */
instance_offset = mmDCORE0_EDMA1_CORE_BASE - mmDCORE0_EDMA0_CORE_BASE;
rc |= hl_init_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
instance_offset, gaudi2_pb_dcr0_edma0,
ARRAY_SIZE(gaudi2_pb_dcr0_edma0),
gaudi2_pb_dcr0_edma0_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_edma0_unsecured_regs),
prop->edma_enabled_mask);
/* ARC EDMA */
rc |= hl_init_pb_ranges_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
instance_offset, gaudi2_pb_dcr0_edma0_arc,
ARRAY_SIZE(gaudi2_pb_dcr0_edma0_arc),
gaudi2_pb_dcr0_edma0_arc_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_edma0_arc_unsecured_regs),
prop->edma_enabled_mask);
/* MME */
instance_offset = mmDCORE0_MME_SBTE1_BASE - mmDCORE0_MME_SBTE0_BASE;
for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
/* MME SBTE */
rc |= hl_init_pb_single_dcore(hdev, (DCORE_OFFSET * i), 5,
instance_offset, gaudi2_pb_dcr0_mme_sbte,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_sbte), NULL,
HL_PB_NA);
/* MME */
rc |= hl_init_pb_single_dcore(hdev, (DCORE_OFFSET * i),
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_mme_eng,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_eng),
gaudi2_pb_dcr0_mme_eng_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_eng_unsecured_regs));
}
/*
* we have special iteration for case in which we would like to
* configure stubbed MME's ARC/QMAN
*/
for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
/* MME QM */
rc |= hl_init_pb_single_dcore(hdev, (DCORE_OFFSET * i),
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_mme_qm,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_qm),
gaudi2_pb_dcr0_mme_qm_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_qm_unsecured_regs));
/* ARC MME */
rc |= hl_init_pb_ranges_single_dcore(hdev, (DCORE_OFFSET * i),
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_mme_arc,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_arc),
gaudi2_pb_dcr0_mme_arc_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_arc_unsecured_regs));
}
/* MME QM ARC ACP ENG */
rc |= hl_init_pb_ranges_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_mme_qm_arc_acp_eng,
ARRAY_SIZE(gaudi2_pb_mme_qm_arc_acp_eng),
gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_mme_qm_arc_acp_eng_unsecured_regs),
(BIT(NUM_OF_DCORES * NUM_OF_MME_PER_DCORE) - 1));
/* TPC */
rc |= gaudi2_init_pb_tpc(hdev);
rc |= gaudi2_init_pb_tpc_arc(hdev);
/* SRAM */
instance_offset = mmDCORE0_SRAM1_BANK_BASE - mmDCORE0_SRAM0_BANK_BASE;
rc |= hl_init_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
gaudi2_pb_dcr0_sram0, ARRAY_SIZE(gaudi2_pb_dcr0_sram0),
NULL, HL_PB_NA);
/* Sync Manager MSTR IF */
rc |= hl_init_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_sm_mstr_if,
ARRAY_SIZE(gaudi2_pb_dcr0_sm_mstr_if),
NULL, HL_PB_NA);
/* Sync Manager GLBL */
/* Secure Dcore0 CQ0 registers */
rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_sm_glbl,
ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl),
gaudi2_pb_dcr0_sm_glbl_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl_unsecured_regs));
/* Unsecure all other CQ registers */
rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES - 1, DCORE_OFFSET,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr1_sm_glbl,
ARRAY_SIZE(gaudi2_pb_dcr1_sm_glbl),
gaudi2_pb_dcr_x_sm_glbl_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs));
/* PSOC.
* Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are
* protected by privileged RR.
*/
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_psoc_global_conf, ARRAY_SIZE(gaudi2_pb_psoc_global_conf),
NULL, HL_PB_NA);
if (!hdev->asic_prop.fw_security_enabled)
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_psoc, ARRAY_SIZE(gaudi2_pb_psoc),
NULL, HL_PB_NA);
/* PMMU */
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pmmu, ARRAY_SIZE(gaudi2_pb_pmmu),
NULL, HL_PB_NA);
/* PLL.
* Skip PSOC/XFT PLL when security is enabled in F/W, because these blocks are protected by
* privileged RR.
*/
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pmmu_pll, ARRAY_SIZE(gaudi2_pb_pmmu_pll),
NULL, HL_PB_NA);
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_xbar_pll, ARRAY_SIZE(gaudi2_pb_xbar_pll),
NULL, HL_PB_NA);
if (!hdev->asic_prop.fw_security_enabled) {
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_psoc_pll, ARRAY_SIZE(gaudi2_pb_psoc_pll),
NULL, HL_PB_NA);
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_xft_pll, ARRAY_SIZE(gaudi2_pb_xft_pll),
NULL, HL_PB_NA);
}
/* PCIE */
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pcie, ARRAY_SIZE(gaudi2_pb_pcie),
gaudi2_pb_pcie_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_pcie_unsecured_regs));
/* Thermal Sensor.
* Skip when security is enabled in F/W, because the blocks are protected by privileged RR.
*/
if (!hdev->asic_prop.fw_security_enabled) {
instance_offset = mmDCORE1_XFT_BASE - mmDCORE0_XFT_BASE;
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
gaudi2_pb_thermal_sensor0,
ARRAY_SIZE(gaudi2_pb_thermal_sensor0), NULL, HL_PB_NA);
}
/* Scheduler ARCs */
instance_offset = mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE;
rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
NUM_OF_ARC_FARMS_ARC,
instance_offset, gaudi2_pb_arc_sched,
ARRAY_SIZE(gaudi2_pb_arc_sched),
gaudi2_pb_arc_sched_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_arc_sched_unsecured_regs));
/* XBAR MIDs */
instance_offset = mmXBAR_MID_1_BASE - mmXBAR_MID_0_BASE;
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
instance_offset, gaudi2_pb_xbar_mid,
ARRAY_SIZE(gaudi2_pb_xbar_mid),
gaudi2_pb_xbar_mid_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_xbar_mid_unsecured_regs));
/* XBAR EDGEs */
instance_offset = mmXBAR_EDGE_1_BASE - mmXBAR_EDGE_0_BASE;
rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
instance_offset, gaudi2_pb_xbar_edge,
ARRAY_SIZE(gaudi2_pb_xbar_edge),
gaudi2_pb_xbar_edge_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_xbar_edge_unsecured_regs),
prop->xbar_edge_enabled_mask);
/* NIC */
rc |= hl_init_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_nic0, ARRAY_SIZE(gaudi2_pb_nic0),
NULL, HL_PB_NA, hdev->nic_ports_mask);
/* NIC QM and QPC */
rc |= hl_init_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET,
NIC_NUMBER_OF_QM_PER_MACRO, NIC_QM_OFFSET,
gaudi2_pb_nic0_qm_qpc, ARRAY_SIZE(gaudi2_pb_nic0_qm_qpc),
gaudi2_pb_nic0_qm_qpc_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_nic0_qm_qpc_unsecured_regs),
hdev->nic_ports_mask);
/* NIC QM ARC */
rc |= hl_init_pb_ranges_with_mask(hdev, NIC_NUMBER_OF_MACROS,
NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO, NIC_QM_OFFSET,
gaudi2_pb_nic0_qm_arc_aux0,
ARRAY_SIZE(gaudi2_pb_nic0_qm_arc_aux0),
gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs),
hdev->nic_ports_mask);
/* NIC UMR */
rc |= hl_init_pb_ranges_with_mask(hdev, NIC_NUMBER_OF_MACROS,
NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO, NIC_QM_OFFSET,
gaudi2_pb_nic0_umr,
ARRAY_SIZE(gaudi2_pb_nic0_umr),
gaudi2_pb_nic0_umr_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_nic0_umr_unsecured_regs),
hdev->nic_ports_mask);
/* Rotators */
instance_offset = mmROT1_BASE - mmROT0_BASE;
rc |= hl_init_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ROT,
instance_offset, gaudi2_pb_rot0,
ARRAY_SIZE(gaudi2_pb_rot0),
gaudi2_pb_rot0_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_rot0_unsecured_regs),
(BIT(NUM_OF_ROT) - 1));
/* Rotators ARCS */
rc |= hl_init_pb_ranges_with_mask(hdev, HL_PB_SHARED,
HL_PB_NA, NUM_OF_ROT, instance_offset,
gaudi2_pb_rot0_arc, ARRAY_SIZE(gaudi2_pb_rot0_arc),
gaudi2_pb_rot0_arc_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_rot0_arc_unsecured_regs),
(BIT(NUM_OF_ROT) - 1));
rc |= gaudi2_init_pb_sm_objs(hdev);
return rc;
}
/**
* gaudi2_init_security - Initialize security model
*
* @hdev: pointer to hl_device structure
*
* Initialize the security model of the device
* That includes range registers and protection bit per register.
*/
int gaudi2_init_security(struct hl_device *hdev)
{
int rc;
rc = gaudi2_init_protection_bits(hdev);
if (rc)
return rc;
gaudi2_init_range_registers(hdev);
return 0;
}
struct gaudi2_ack_pb_tpc_data {
u32 tpc_regs_array_size;
u32 arc_tpc_regs_array_size;
};
static void gaudi2_ack_pb_tpc_config(struct hl_device *hdev, int dcore, int inst, u32 offset,
struct iterate_module_ctx *ctx)
{
struct gaudi2_ack_pb_tpc_data *pb_data = ctx->data;
hl_ack_pb_single_dcore(hdev, offset, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_tpc0, pb_data->tpc_regs_array_size);
hl_ack_pb_single_dcore(hdev, offset, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_tpc0_arc, pb_data->arc_tpc_regs_array_size);
}
static void gaudi2_ack_pb_tpc(struct hl_device *hdev)
{
struct iterate_module_ctx tpc_iter = {
.fn = &gaudi2_ack_pb_tpc_config,
};
struct gaudi2_ack_pb_tpc_data data;
data.tpc_regs_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0);
data.arc_tpc_regs_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc);
tpc_iter.data = &data;
gaudi2_iterate_tpcs(hdev, &tpc_iter);
}
/**
* gaudi2_ack_protection_bits_errors - scan all blocks having protection bits
* and for every protection error found, display the appropriate error message
* and clear the error.
*
* @hdev: pointer to hl_device structure
*
* All protection bits are 1 by default, means not protected. Need to set to 0
* each bit that belongs to a protected register.
*
*/
void gaudi2_ack_protection_bits_errors(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 instance_offset;
u8 i;
/* SFT */
instance_offset = mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE - mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE;
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
gaudi2_pb_sft0, ARRAY_SIZE(gaudi2_pb_sft0));
/* HIF */
instance_offset = mmDCORE0_HIF1_BASE - mmDCORE0_HIF0_BASE;
hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
NUM_OF_HIF_PER_DCORE, instance_offset,
gaudi2_pb_dcr0_hif, ARRAY_SIZE(gaudi2_pb_dcr0_hif),
prop->hmmu_hif_enabled_mask);
/* RTR */
instance_offset = mmDCORE0_RTR1_CTRL_BASE - mmDCORE0_RTR0_CTRL_BASE;
hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
gaudi2_pb_dcr0_rtr0, ARRAY_SIZE(gaudi2_pb_dcr0_rtr0));
/* HMMU */
hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
NUM_OF_HMMU_PER_DCORE, DCORE_HMMU_OFFSET,
gaudi2_pb_dcr0_hmmu0, ARRAY_SIZE(gaudi2_pb_dcr0_hmmu0),
prop->hmmu_hif_enabled_mask);
/* CPU.
* Except for CPU_IF, skip when security is enabled in F/W, because the blocks are protected
* by privileged RR.
*/
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_cpu_if, ARRAY_SIZE(gaudi2_pb_cpu_if));
if (!hdev->asic_prop.fw_security_enabled)
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_cpu, ARRAY_SIZE(gaudi2_pb_cpu));
/* KDMA */
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_kdma, ARRAY_SIZE(gaudi2_pb_kdma));
/* PDMA */
instance_offset = mmPDMA1_CORE_BASE - mmPDMA0_CORE_BASE;
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 2, instance_offset,
gaudi2_pb_pdma0, ARRAY_SIZE(gaudi2_pb_pdma0));
/* ARC PDMA */
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 2, instance_offset,
gaudi2_pb_pdma0_arc, ARRAY_SIZE(gaudi2_pb_pdma0_arc));
/* EDMA */
instance_offset = mmDCORE0_EDMA1_CORE_BASE - mmDCORE0_EDMA0_CORE_BASE;
hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
instance_offset, gaudi2_pb_dcr0_edma0,
ARRAY_SIZE(gaudi2_pb_dcr0_edma0),
prop->edma_enabled_mask);
/* ARC EDMA */
hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET, 2,
instance_offset, gaudi2_pb_dcr0_edma0_arc,
ARRAY_SIZE(gaudi2_pb_dcr0_edma0_arc),
prop->edma_enabled_mask);
/* MME */
instance_offset = mmDCORE0_MME_SBTE1_BASE - mmDCORE0_MME_SBTE0_BASE;
for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
/* MME SBTE */
hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i), 5,
instance_offset, gaudi2_pb_dcr0_mme_sbte,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_sbte));
/* MME */
hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i),
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_mme_eng,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_eng));
}
/*
* we have special iteration for case in which we would like to
* configure stubbed MME's ARC/QMAN
*/
for (i = 0 ; i < NUM_OF_DCORES * NUM_OF_MME_PER_DCORE ; i++) {
/* MME QM */
hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i),
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_mme_qm,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_qm));
/* ARC MME */
hl_ack_pb_single_dcore(hdev, (DCORE_OFFSET * i),
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_mme_arc,
ARRAY_SIZE(gaudi2_pb_dcr0_mme_arc));
}
/* MME QM ARC ACP ENG */
hl_ack_pb_with_mask(hdev, NUM_OF_DCORES, DCORE_OFFSET,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_mme_qm_arc_acp_eng,
ARRAY_SIZE(gaudi2_pb_mme_qm_arc_acp_eng),
(BIT(NUM_OF_DCORES * NUM_OF_MME_PER_DCORE) - 1));
/* TPC */
gaudi2_ack_pb_tpc(hdev);
/* SRAM */
instance_offset = mmDCORE0_SRAM1_BANK_BASE - mmDCORE0_SRAM0_BANK_BASE;
hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, 8, instance_offset,
gaudi2_pb_dcr0_sram0, ARRAY_SIZE(gaudi2_pb_dcr0_sram0));
/* Sync Manager MSTR IF */
hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_sm_mstr_if, ARRAY_SIZE(gaudi2_pb_dcr0_sm_mstr_if));
/* Sync Manager */
hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_sm_glbl, ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl));
hl_ack_pb(hdev, NUM_OF_DCORES, DCORE_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_sm_mstr_if, ARRAY_SIZE(gaudi2_pb_dcr0_sm_mstr_if));
/* PSOC.
* Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are
* protected by privileged RR.
*/
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_psoc_global_conf, ARRAY_SIZE(gaudi2_pb_psoc_global_conf));
if (!hdev->asic_prop.fw_security_enabled)
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_psoc, ARRAY_SIZE(gaudi2_pb_psoc));
/* PMMU */
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pmmu, ARRAY_SIZE(gaudi2_pb_pmmu));
/* PLL.
* Skip PSOC/XFT PLL when security is enabled in F/W, because these blocks are protected by
* privileged RR.
*/
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pmmu_pll, ARRAY_SIZE(gaudi2_pb_pmmu_pll));
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_xbar_pll, ARRAY_SIZE(gaudi2_pb_xbar_pll));
if (!hdev->asic_prop.fw_security_enabled) {
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_psoc_pll, ARRAY_SIZE(gaudi2_pb_psoc_pll));
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_xft_pll, ARRAY_SIZE(gaudi2_pb_xft_pll));
}
/* PCIE */
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pcie, ARRAY_SIZE(gaudi2_pb_pcie));
/* Thermal Sensor.
* Skip when security is enabled in F/W, because the blocks are protected by privileged RR.
*/
if (!hdev->asic_prop.fw_security_enabled) {
instance_offset = mmDCORE1_XFT_BASE - mmDCORE0_XFT_BASE;
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, 4, instance_offset,
gaudi2_pb_thermal_sensor0, ARRAY_SIZE(gaudi2_pb_thermal_sensor0));
}
/* HBM */
instance_offset = mmHBM1_MC0_BASE - mmHBM0_MC0_BASE;
hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, GAUDI2_HBM_NUM,
instance_offset, gaudi2_pb_hbm,
ARRAY_SIZE(gaudi2_pb_hbm), prop->dram_enabled_mask);
/* Scheduler ARCs */
instance_offset = mmARC_FARM_ARC1_AUX_BASE - mmARC_FARM_ARC0_AUX_BASE;
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ARC_FARMS_ARC,
instance_offset, gaudi2_pb_arc_sched,
ARRAY_SIZE(gaudi2_pb_arc_sched));
/* XBAR MIDs */
instance_offset = mmXBAR_MID_1_BASE - mmXBAR_MID_0_BASE;
hl_ack_pb(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
instance_offset, gaudi2_pb_xbar_mid,
ARRAY_SIZE(gaudi2_pb_xbar_mid));
/* XBAR EDGEs */
instance_offset = mmXBAR_EDGE_1_BASE - mmXBAR_EDGE_0_BASE;
hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_XBAR,
instance_offset, gaudi2_pb_xbar_edge,
ARRAY_SIZE(gaudi2_pb_xbar_edge), prop->xbar_edge_enabled_mask);
/* NIC */
hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_nic0, ARRAY_SIZE(gaudi2_pb_nic0), hdev->nic_ports_mask);
/* NIC QM and QPC */
hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO,
NIC_QM_OFFSET, gaudi2_pb_nic0_qm_qpc, ARRAY_SIZE(gaudi2_pb_nic0_qm_qpc),
hdev->nic_ports_mask);
/* NIC QM ARC */
hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO,
NIC_QM_OFFSET, gaudi2_pb_nic0_qm_arc_aux0,
ARRAY_SIZE(gaudi2_pb_nic0_qm_arc_aux0), hdev->nic_ports_mask);
/* NIC UMR */
hl_ack_pb_with_mask(hdev, NIC_NUMBER_OF_MACROS, NIC_OFFSET, NIC_NUMBER_OF_QM_PER_MACRO,
NIC_QM_OFFSET, gaudi2_pb_nic0_umr, ARRAY_SIZE(gaudi2_pb_nic0_umr),
hdev->nic_ports_mask);
/* Rotators */
instance_offset = mmROT1_BASE - mmROT0_BASE;
hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ROT, instance_offset,
gaudi2_pb_rot0, ARRAY_SIZE(gaudi2_pb_rot0), (BIT(NUM_OF_ROT) - 1));
/* Rotators ARCS */
hl_ack_pb_with_mask(hdev, HL_PB_SHARED, HL_PB_NA, NUM_OF_ROT, instance_offset,
gaudi2_pb_rot0_arc, ARRAY_SIZE(gaudi2_pb_rot0_arc), (BIT(NUM_OF_ROT) - 1));
}
/*
* Print PB security errors
*/
void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32 cause,
u32 offended_addr)
{
int i = 0;
const char *error_format =
"Security error at block 0x%x, offending address 0x%x\n"
"Cause 0x%x: %s %s %s %s %s %s %s %s\n";
char *mcause[8] = {"Unknown", "", "", "", "", "", "", "" };
if (!cause)
return;
if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_RD)
mcause[i++] = "APB_PRIV_RD";
if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_SEC_RD)
mcause[i++] = "APB_SEC_RD";
if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_RD)
mcause[i++] = "APB_UNMAPPED_RD";
if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_PRIV_WR)
mcause[i++] = "APB_PRIV_WR";
if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_SEC_WR)
mcause[i++] = "APB_SEC_WR";
if (cause & SPECIAL_GLBL_ERR_CAUSE_APB_UNMAPPED_WR)
mcause[i++] = "APB_UNMAPPED_WR";
if (cause & SPECIAL_GLBL_ERR_CAUSE_EXT_SEC_WR)
mcause[i++] = "EXT_SEC_WR";
if (cause & SPECIAL_GLBL_ERR_CAUSE_EXT_UNMAPPED_WR)
mcause[i++] = "APB_EXT_UNMAPPED_WR";
dev_err_ratelimited(hdev->dev, error_format, block_addr, offended_addr,
cause, mcause[0], mcause[1], mcause[2], mcause[3],
mcause[4], mcause[5], mcause[6], mcause[7]);
}
| linux-master | drivers/accel/habanalabs/gaudi2/gaudi2_security.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/circ_buf.h>
#include <linux/highmem.h>
#include "ivpu_drv.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#include "ivpu_pm.h"
#define IVPU_MMU_IDR0_REF 0x080f3e0f
#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
#define IVPU_MMU_IDR1_REF 0x0e739d18
#define IVPU_MMU_IDR3_REF 0x0000003c
#define IVPU_MMU_IDR5_REF 0x00040070
#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
#define IVPU_MMU_CDTAB_ENT_SIZE 64
#define IVPU_MMU_CDTAB_ENT_COUNT_LOG2 8 /* 256 entries */
#define IVPU_MMU_CDTAB_ENT_COUNT ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
#define IVPU_MMU_STREAM_ID0 0
#define IVPU_MMU_STREAM_ID3 3
#define IVPU_MMU_STRTAB_ENT_SIZE 64
#define IVPU_MMU_STRTAB_ENT_COUNT 4
#define IVPU_MMU_STRTAB_CFG_LOG2SIZE 2
#define IVPU_MMU_STRTAB_CFG IVPU_MMU_STRTAB_CFG_LOG2SIZE
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
#define IVPU_MMU_CMDQ_CMD_SIZE 16
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
#define IVPU_MMU_EVTQ_CMD_SIZE 32
#define IVPU_MMU_EVTQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
#define IVPU_MMU_CMD_OPCODE GENMASK(7, 0)
#define IVPU_MMU_CMD_SYNC_0_CS GENMASK(13, 12)
#define IVPU_MMU_CMD_SYNC_0_MSH GENMASK(23, 22)
#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
#define IVPU_MMU_CMD_SYNC_0_MSI_DATA GENMASK(63, 32)
#define IVPU_MMU_CMD_CFGI_0_SSEC BIT(10)
#define IVPU_MMU_CMD_CFGI_0_SSV BIT(11)
#define IVPU_MMU_CMD_CFGI_0_SSID GENMASK(31, 12)
#define IVPU_MMU_CMD_CFGI_0_SID GENMASK(63, 32)
#define IVPU_MMU_CMD_CFGI_1_RANGE GENMASK(4, 0)
#define IVPU_MMU_CMD_TLBI_0_ASID GENMASK(63, 48)
#define IVPU_MMU_CMD_TLBI_0_VMID GENMASK(47, 32)
#define CMD_PREFETCH_CFG 0x1
#define CMD_CFGI_STE 0x3
#define CMD_CFGI_ALL 0x4
#define CMD_CFGI_CD 0x5
#define CMD_CFGI_CD_ALL 0x6
#define CMD_TLBI_NH_ASID 0x11
#define CMD_TLBI_EL2_ALL 0x20
#define CMD_TLBI_NSNH_ALL 0x30
#define CMD_SYNC 0x46
#define IVPU_MMU_EVT_F_UUT 0x01
#define IVPU_MMU_EVT_C_BAD_STREAMID 0x02
#define IVPU_MMU_EVT_F_STE_FETCH 0x03
#define IVPU_MMU_EVT_C_BAD_STE 0x04
#define IVPU_MMU_EVT_F_BAD_ATS_TREQ 0x05
#define IVPU_MMU_EVT_F_STREAM_DISABLED 0x06
#define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07
#define IVPU_MMU_EVT_C_BAD_SUBSTREAMID 0x08
#define IVPU_MMU_EVT_F_CD_FETCH 0x09
#define IVPU_MMU_EVT_C_BAD_CD 0x0a
#define IVPU_MMU_EVT_F_WALK_EABT 0x0b
#define IVPU_MMU_EVT_F_TRANSLATION 0x10
#define IVPU_MMU_EVT_F_ADDR_SIZE 0x11
#define IVPU_MMU_EVT_F_ACCESS 0x12
#define IVPU_MMU_EVT_F_PERMISSION 0x13
#define IVPU_MMU_EVT_F_TLB_CONFLICT 0x20
#define IVPU_MMU_EVT_F_CFG_CONFLICT 0x21
#define IVPU_MMU_EVT_E_PAGE_REQUEST 0x24
#define IVPU_MMU_EVT_F_VMS_FETCH 0x25
#define IVPU_MMU_EVT_OP_MASK GENMASK_ULL(7, 0)
#define IVPU_MMU_EVT_SSID_MASK GENMASK_ULL(31, 12)
#define IVPU_MMU_Q_BASE_RWA BIT(62)
#define IVPU_MMU_Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
#define IVPU_MMU_STRTAB_BASE_RA BIT(62)
#define IVPU_MMU_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
#define IVPU_MMU_CR0_ATSCHK BIT(4)
#define IVPU_MMU_CR0_CMDQEN BIT(3)
#define IVPU_MMU_CR0_EVTQEN BIT(2)
#define IVPU_MMU_CR0_PRIQEN BIT(1)
#define IVPU_MMU_CR0_SMMUEN BIT(0)
#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
#define IVPU_MMU_CR1_QUEUE_SH GENMASK(5, 4)
#define IVPU_MMU_CR1_QUEUE_OC GENMASK(3, 2)
#define IVPU_MMU_CR1_QUEUE_IC GENMASK(1, 0)
#define IVPU_MMU_CACHE_NC 0
#define IVPU_MMU_CACHE_WB 1
#define IVPU_MMU_CACHE_WT 2
#define IVPU_MMU_SH_NSH 0
#define IVPU_MMU_SH_OSH 2
#define IVPU_MMU_SH_ISH 3
#define IVPU_MMU_CMDQ_OP GENMASK_ULL(7, 0)
#define IVPU_MMU_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
#define IVPU_MMU_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
#define IVPU_MMU_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
#define IVPU_MMU_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
#define IVPU_MMU_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
#define IVPU_MMU_CD_0_TCR_EPD0 BIT_ULL(14)
#define IVPU_MMU_CD_0_TCR_EPD1 BIT_ULL(30)
#define IVPU_MMU_CD_0_ENDI BIT(15)
#define IVPU_MMU_CD_0_V BIT(31)
#define IVPU_MMU_CD_0_TCR_IPS GENMASK_ULL(34, 32)
#define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
#define IVPU_MMU_CD_0_AA64 BIT(41)
#define IVPU_MMU_CD_0_S BIT(44)
#define IVPU_MMU_CD_0_R BIT(45)
#define IVPU_MMU_CD_0_A BIT(46)
#define IVPU_MMU_CD_0_ASET BIT(47)
#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
#define IVPU_MMU_T0SZ_48BIT 16
#define IVPU_MMU_T0SZ_38BIT 26
#define IVPU_MMU_IPS_48BIT 5
#define IVPU_MMU_IPS_44BIT 4
#define IVPU_MMU_IPS_42BIT 3
#define IVPU_MMU_IPS_40BIT 2
#define IVPU_MMU_IPS_36BIT 1
#define IVPU_MMU_IPS_32BIT 0
#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
#define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
#define IVPU_MMU_STE_0_S1FMT_LINEAR 0
#define IVPU_MMU_STE_DWORDS 8
#define IVPU_MMU_STE_0_CFG_S1_TRANS 5
#define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)
#define IVPU_MMU_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
#define IVPU_MMU_STE_0_V BIT(0)
#define IVPU_MMU_STE_1_STRW_NSEL1 0ul
#define IVPU_MMU_STE_1_CONT GENMASK_ULL(16, 13)
#define IVPU_MMU_STE_1_STRW GENMASK_ULL(31, 30)
#define IVPU_MMU_STE_1_PRIVCFG GENMASK_ULL(49, 48)
#define IVPU_MMU_STE_1_PRIVCFG_UNPRIV 2ul
#define IVPU_MMU_STE_1_INSTCFG GENMASK_ULL(51, 50)
#define IVPU_MMU_STE_1_INSTCFG_DATA 2ul
#define IVPU_MMU_STE_1_MEV BIT(19)
#define IVPU_MMU_STE_1_S1STALLD BIT(27)
#define IVPU_MMU_STE_1_S1C_CACHE_NC 0ul
#define IVPU_MMU_STE_1_S1C_CACHE_WBRA 1ul
#define IVPU_MMU_STE_1_S1C_CACHE_WT 2ul
#define IVPU_MMU_STE_1_S1C_CACHE_WB 3ul
#define IVPU_MMU_STE_1_S1CIR GENMASK_ULL(3, 2)
#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd)
{
switch (cmd) {
case IVPU_MMU_EVT_F_UUT:
return "Unsupported Upstream Transaction";
case IVPU_MMU_EVT_C_BAD_STREAMID:
return "Transaction StreamID out of range";
case IVPU_MMU_EVT_F_STE_FETCH:
return "Fetch of STE caused external abort";
case IVPU_MMU_EVT_C_BAD_STE:
return "Used STE invalid";
case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
return "Address Request disallowed for a StreamID";
case IVPU_MMU_EVT_F_STREAM_DISABLED:
return "Transaction marks non-substream disabled";
case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
return "MMU bypass is disallowed for this StreamID";
case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
return "Invalid StreamID";
case IVPU_MMU_EVT_F_CD_FETCH:
return "Fetch of CD caused external abort";
case IVPU_MMU_EVT_C_BAD_CD:
return "Fetched CD invalid";
case IVPU_MMU_EVT_F_WALK_EABT:
return " An external abort occurred fetching a TLB";
case IVPU_MMU_EVT_F_TRANSLATION:
return "Translation fault";
case IVPU_MMU_EVT_F_ADDR_SIZE:
return " Output address caused address size fault";
case IVPU_MMU_EVT_F_ACCESS:
return "Access flag fault";
case IVPU_MMU_EVT_F_PERMISSION:
return "Permission fault occurred on page access";
case IVPU_MMU_EVT_F_TLB_CONFLICT:
return "A TLB conflict";
case IVPU_MMU_EVT_F_CFG_CONFLICT:
return "A configuration cache conflict";
case IVPU_MMU_EVT_E_PAGE_REQUEST:
return "Page request hint from a client device";
case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort";
default:
return "Unknown CMDQ command";
}
}
static void ivpu_mmu_config_check(struct ivpu_device *vdev)
{
u32 val_ref;
u32 val;
if (ivpu_is_simics(vdev))
val_ref = IVPU_MMU_IDR0_REF_SIMICS;
else
val_ref = IVPU_MMU_IDR0_REF;
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
if (ivpu_is_simics(vdev))
val_ref = IVPU_MMU_IDR5_REF_SIMICS;
else if (ivpu_is_fpga(vdev))
val_ref = IVPU_MMU_IDR5_REF_FPGA;
else
val_ref = IVPU_MMU_IDR5_REF;
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
}
static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
if (!cdtab->base)
return -ENOMEM;
ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
return 0;
}
static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_strtab *strtab = &mmu->strtab;
size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
if (!strtab->base)
return -ENOMEM;
strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
&strtab->dma, &strtab->dma_q, size);
return 0;
}
static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_queue *q = &mmu->cmdq;
q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
if (!q->base)
return -ENOMEM;
q->dma_q = IVPU_MMU_Q_BASE_RWA;
q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
&q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
return 0;
}
static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_queue *q = &mmu->evtq;
q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
if (!q->base)
return -ENOMEM;
q->dma_q = IVPU_MMU_Q_BASE_RWA;
q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
&q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
return 0;
}
static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_mmu_cdtab_alloc(vdev);
if (ret) {
ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
return ret;
}
ret = ivpu_mmu_strtab_alloc(vdev);
if (ret) {
ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
return ret;
}
ret = ivpu_mmu_cmdq_alloc(vdev);
if (ret) {
ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
return ret;
}
ret = ivpu_mmu_evtq_alloc(vdev);
if (ret)
ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
return ret;
}
static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
{
u32 reg_ack = reg + 4; /* ACK register is 4B after base register */
u32 val_ack;
int ret;
REGV_WR32(reg, val);
ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
return ret;
}
static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
{
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
if (ret)
return ret;
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
}
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
{
struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
u64 *queue_buffer = q->base;
int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
return -EBUSY;
}
queue_buffer[idx] = data0;
queue_buffer[idx + 1] = data1;
q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
return 0;
}
static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
u64 val;
int ret;
val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) |
FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) |
FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
if (ret)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret)
ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
return ret;
}
static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
{
u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
}
static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
{
u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
}
static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
{
u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
}
static int ivpu_mmu_reset(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
u32 val;
int ret;
memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
mmu->cmdq.prod = 0;
mmu->cmdq.cons = 0;
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
if (ret)
return ret;
val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
return ret;
ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
if (ret)
return ret;
ret = ivpu_mmu_cmdq_sync(vdev);
if (ret)
return ret;
REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
ret = ivpu_mmu_irqs_setup(vdev);
if (ret)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_strtab *strtab = &mmu->strtab;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
u64 str[2];
str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
IVPU_MMU_STE_0_V |
(cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
IVPU_MMU_STE_1_MEV |
IVPU_MMU_STE_1_S1STALLD;
WRITE_ONCE(entry[1], str[1]);
WRITE_ONCE(entry[0], str[0]);
clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
}
static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
{
ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
return 0;
}
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
int ret = 0;
mutex_lock(&mmu->lock);
if (!mmu->on)
goto unlock;
ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
if (ret)
goto unlock;
ret = ivpu_mmu_cmdq_sync(vdev);
unlock:
mutex_unlock(&mmu->lock);
return ret;
}
static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
u64 *entry;
u64 cd[4];
int ret = 0;
if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
return -EINVAL;
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
if (cd_dma != 0) {
cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
IVPU_MMU_CD_0_TCR_EPD1 |
IVPU_MMU_CD_0_AA64 |
IVPU_MMU_CD_0_R |
IVPU_MMU_CD_0_ASET |
IVPU_MMU_CD_0_V;
cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
cd[2] = 0;
cd[3] = 0x0000000000007444;
/* For global context generate memory fault on VPU */
if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
cd[0] |= IVPU_MMU_CD_0_A;
} else {
memset(cd, 0, sizeof(cd));
}
WRITE_ONCE(entry[1], cd[1]);
WRITE_ONCE(entry[2], cd[2]);
WRITE_ONCE(entry[3], cd[3]);
WRITE_ONCE(entry[0], cd[0]);
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
mutex_lock(&mmu->lock);
if (!mmu->on)
goto unlock;
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
goto unlock;
ret = ivpu_mmu_cmdq_sync(vdev);
unlock:
mutex_unlock(&mmu->lock);
return ret;
}
static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
if (ret)
ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
return ret;
}
static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
{
int ret;
if (ssid == 0) {
ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
return -EINVAL;
}
ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
if (ret)
ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
return ret;
}
int ivpu_mmu_init(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
int ret;
ivpu_dbg(vdev, MMU, "Init..\n");
drmm_mutex_init(&vdev->drm, &mmu->lock);
ivpu_mmu_config_check(vdev);
ret = ivpu_mmu_structs_alloc(vdev);
if (ret)
return ret;
ret = ivpu_mmu_strtab_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
return ret;
}
ret = ivpu_mmu_cd_add_gbl(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
return ret;
}
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
return ret;
}
ivpu_dbg(vdev, MMU, "Init done\n");
return 0;
}
int ivpu_mmu_enable(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
int ret;
mutex_lock(&mmu->lock);
mmu->on = true;
ret = ivpu_mmu_reset(vdev);
if (ret) {
ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
goto err;
}
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
goto err;
ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
if (ret)
goto err;
ret = ivpu_mmu_cmdq_sync(vdev);
if (ret)
goto err;
mutex_unlock(&mmu->lock);
return 0;
err:
mmu->on = false;
mutex_unlock(&mmu->lock);
return ret;
}
void ivpu_mmu_disable(struct ivpu_device *vdev)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
mutex_lock(&mmu->lock);
mmu->on = false;
mutex_unlock(&mmu->lock);
}
static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
{
u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
u64 in_addr = ((u64)event[5]) << 32 | event[4];
u32 sid = event[1];
ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
}
static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
bool schedule_recovery = false;
u32 *event;
u32 ssid;
ivpu_dbg(vdev, IRQ, "MMU event queue\n");
while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
ivpu_mmu_dump_event(vdev, event);
ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
schedule_recovery = true;
else
ivpu_mmu_user_context_mark_invalid(vdev, ssid);
}
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
}
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
{
u32 gerror_val, gerrorn_val, active;
ivpu_dbg(vdev, IRQ, "MMU error\n");
gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return;
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
{
return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
}
void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
{
ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
}
| linux-master | drivers/accel/ivpu/ivpu_mmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <drm/drm_file.h>
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
#define CMD_BUF_IDX 0
#define JOB_ID_JOB_MASK GENMASK(7, 0)
#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define JOB_MAX_BUFFER_COUNT 65535
static unsigned int ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
{
ivpu_hw_reg_db_set(vdev, cmdq->db_id);
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
{
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
struct ivpu_cmdq *cmdq;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return NULL;
cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
if (!cmdq->mem)
goto cmdq_free;
cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) /
sizeof(struct vpu_job_queue_entry));
cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr;
jobq_header = &cmdq->jobq->header;
jobq_header->engine_idx = engine;
jobq_header->head = 0;
jobq_header->tail = 0;
wmb(); /* Flush WC buffer for jobq->header */
return cmdq;
cmdq_free:
kfree(cmdq);
return NULL;
}
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
if (!cmdq)
return;
ivpu_bo_free_internal(cmdq->mem);
kfree(cmdq);
}
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
int ret;
lockdep_assert_held(&file_priv->lock);
if (!cmdq) {
cmdq = ivpu_cmdq_alloc(file_priv, engine);
if (!cmdq)
return NULL;
file_priv->cmdq[engine] = cmdq;
}
if (cmdq->db_registered)
return cmdq;
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, cmdq->mem->base.size);
if (ret)
return NULL;
cmdq->db_registered = true;
return cmdq;
}
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
{
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
lockdep_assert_held(&file_priv->lock);
if (cmdq) {
file_priv->cmdq[engine] = NULL;
if (cmdq->db_registered)
ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
ivpu_cmdq_free(file_priv, cmdq);
}
}
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
{
int i;
mutex_lock(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i);
mutex_unlock(&file_priv->lock);
}
/*
* Mark the doorbell as unregistered and reset job queue pointers.
* This function needs to be called when the VPU hardware is restarted
* and FW looses job queue state. The next time job queue is used it
* will be registered again.
*/
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
{
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
lockdep_assert_held(&file_priv->lock);
if (cmdq) {
cmdq->db_registered = false;
cmdq->jobq->header.head = 0;
cmdq->jobq->header.tail = 0;
wmb(); /* Flush WC buffer for jobq header */
}
}
static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
{
int i;
mutex_lock(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_reset_locked(file_priv, i);
mutex_unlock(&file_priv->lock);
}
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
if (!file_priv)
continue;
ivpu_cmdq_reset_all(file_priv);
ivpu_file_priv_put(&file_priv);
}
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
{
struct ivpu_device *vdev = job->vdev;
struct vpu_job_queue_header *header = &cmdq->jobq->header;
struct vpu_job_queue_entry *entry;
u32 tail = READ_ONCE(header->tail);
u32 next_entry = (tail + 1) % cmdq->entry_count;
/* Check if there is space left in job queue */
if (next_entry == header->head) {
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
return -EBUSY;
}
entry = &cmdq->jobq->job[tail];
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
entry->job_id = job->job_id;
entry->flags = 0;
wmb(); /* Ensure that tail is updated after filling entry */
header->tail = next_entry;
wmb(); /* Flush WC buffer for jobq header */
return 0;
}
struct ivpu_fence {
struct dma_fence base;
spinlock_t lock; /* protects base */
struct ivpu_device *vdev;
};
static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
{
return container_of(fence, struct ivpu_fence, base);
}
static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
{
struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
return dev_name(ivpu_fence->vdev->drm.dev);
}
static const struct dma_fence_ops ivpu_fence_ops = {
.get_driver_name = ivpu_fence_get_driver_name,
.get_timeline_name = ivpu_fence_get_timeline_name,
};
static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
{
struct ivpu_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return NULL;
fence->vdev = vdev;
spin_lock_init(&fence->lock);
dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
return &fence->base;
}
static void job_get(struct ivpu_job *job, struct ivpu_job **link)
{
struct ivpu_device *vdev = job->vdev;
kref_get(&job->ref);
*link = job;
ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
}
static void job_release(struct kref *ref)
{
struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
struct ivpu_device *vdev = job->vdev;
u32 i;
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
drm_gem_object_put(&job->bos[i]->base);
dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv);
ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
kfree(job);
/* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
ivpu_rpm_put(vdev);
}
static void job_put(struct ivpu_job *job)
{
struct ivpu_device *vdev = job->vdev;
ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
kref_put(&job->ref, job_release);
}
static struct ivpu_job *
ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return NULL;
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job)
goto err_rpm_put;
kref_init(&job->ref);
job->vdev = vdev;
job->engine_idx = engine_idx;
job->bo_count = bo_count;
job->done_fence = ivpu_fence_create(vdev);
if (!job->done_fence) {
ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
goto err_free_job;
}
job->file_priv = ivpu_file_priv_get(file_priv);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
return job;
err_free_job:
kfree(job);
err_rpm_put:
ivpu_rpm_put(vdev);
return NULL;
}
static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
struct ivpu_job *job;
job = xa_erase(&vdev->submitted_jobs_xa, job_id);
if (!job)
return -ENOENT;
if (job->file_priv->has_mmu_faults)
job_status = VPU_JSM_STATUS_ABORTED;
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
job_put(job);
return 0;
}
static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
struct vpu_jsm_msg *job_ret_msg = msg;
int ret;
payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
if (ret)
ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
}
void ivpu_jobs_abort_all(struct ivpu_device *vdev)
{
struct ivpu_job *job;
unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
}
static int ivpu_direct_job_submission(struct ivpu_job *job)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
struct xa_limit job_id_range;
struct ivpu_cmdq *cmdq;
int ret;
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
if (!cmdq) {
ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
file_priv->ctx.id, job->engine_idx);
ret = -EINVAL;
goto err_unlock;
}
job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
job_get(job, &job);
ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
goto err_job_put;
}
ret = ivpu_cmdq_push_job(cmdq, job);
if (ret)
goto err_xa_erase;
ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
job->engine_idx, cmdq->jobq->header.tail);
if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
cmdq->jobq->header.head = cmdq->jobq->header.tail;
wmb(); /* Flush WC buffer for jobq header */
} else {
ivpu_cmdq_ring_db(vdev, cmdq);
}
mutex_unlock(&file_priv->lock);
return 0;
err_xa_erase:
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_job_put:
job_put(job);
err_unlock:
mutex_unlock(&file_priv->lock);
return ret;
}
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
u32 buf_count, u32 commands_offset)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
enum dma_resv_usage usage;
struct ivpu_bo *bo;
int ret;
u32 i;
for (i = 0; i < buf_count; i++) {
struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
if (!obj)
return -ENOENT;
job->bos[i] = to_ivpu_bo(obj);
ret = ivpu_bo_pin(job->bos[i]);
if (ret)
return ret;
}
bo = job->bos[CMD_BUF_IDX];
if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
ivpu_warn(vdev, "Buffer is already in use\n");
return -EBUSY;
}
if (commands_offset >= bo->base.size) {
ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
return -EINVAL;
}
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
for (i = 0; i < buf_count; i++) {
ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
}
}
for (i = 0; i < buf_count; i++) {
usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
}
unlock_reservations:
drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
wmb(); /* Flush write combining buffers */
return ret;
}
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_submit *params = data;
struct ivpu_job *job;
u32 *buf_handles;
int idx, ret;
if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL;
if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL;
if (!IS_ALIGNED(params->commands_offset, 8))
return -EINVAL;
if (!file_priv->ctx.id)
return -EINVAL;
if (file_priv->has_mmu_faults)
return -EBADFD;
buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
if (!buf_handles)
return -ENOMEM;
ret = copy_from_user(buf_handles,
(void __user *)params->buffers_ptr,
params->buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
goto free_handles;
}
if (!drm_dev_enter(&vdev->drm, &idx)) {
ret = -ENODEV;
goto free_handles;
}
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
file_priv->ctx.id, params->buffer_count);
job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto dev_exit;
}
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
params->commands_offset);
if (ret) {
ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
goto job_put;
}
ret = ivpu_direct_job_submission(job);
if (ret) {
dma_fence_signal(job->done_fence);
ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
}
job_put:
job_put(job);
dev_exit:
drm_dev_exit(idx);
free_handles:
kfree(buf_handles);
return ret;
}
static int ivpu_job_done_thread(void *arg)
{
struct ivpu_device *vdev = (struct ivpu_device *)arg;
struct ivpu_ipc_consumer cons;
struct vpu_jsm_msg jsm_msg;
bool jobs_submitted;
unsigned int timeout;
int ret;
ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
while (!kthread_should_stop()) {
timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
if (!ret) {
ivpu_job_done_message(vdev, &jsm_msg);
} else if (ret == -ETIMEDOUT) {
if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) {
ivpu_err(vdev, "TDR detected, timeout %d ms", timeout);
ivpu_hw_diagnose_failure(vdev);
ivpu_pm_schedule_recovery(vdev);
}
}
}
ivpu_ipc_consumer_del(vdev, &cons);
ivpu_jobs_abort_all(vdev);
ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
return 0;
}
int ivpu_job_done_thread_init(struct ivpu_device *vdev)
{
struct task_struct *thread;
thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
if (IS_ERR(thread)) {
ivpu_err(vdev, "Failed to start job completion thread\n");
return -EIO;
}
get_task_struct(thread);
wake_up_process(thread);
vdev->job_done_thread = thread;
return 0;
}
void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
{
kthread_stop(vdev->job_done_thread);
put_task_struct(vdev->job_done_thread);
}
| linux-master | drivers/accel/ivpu/ivpu_job.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/genalloc.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
#define IPC_MAX_RX_MSG 128
#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD)
struct ivpu_ipc_tx_buf {
struct ivpu_ipc_hdr ipc;
struct vpu_jsm_msg jsm;
};
struct ivpu_ipc_rx_msg {
struct list_head link;
struct ivpu_ipc_hdr *ipc_hdr;
struct vpu_jsm_msg *jsm_msg;
};
static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
{
ivpu_dbg(vdev, IPC,
"%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
}
static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
{
u32 *payload = (u32 *)&jsm_msg->payload;
ivpu_dbg(vdev, JSM,
"%s: vpu:0x%08x (type:0x%x, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
c, vpu_addr, jsm_msg->type, jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
payload[0], payload[1], payload[2], payload[3], payload[4]);
}
static void
ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
ipc_hdr->status = IVPU_IPC_HDR_FREE;
if (jsm_msg)
jsm_msg->status = VPU_JSM_MSG_FREE;
wmb(); /* Flush WC buffers for message statuses */
}
static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
ivpu_bo_free_internal(ipc->mem_rx);
ivpu_bo_free_internal(ipc->mem_tx);
}
static int
ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct vpu_jsm_msg *req)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_tx_buf *tx_buf;
u32 tx_buf_vpu_addr;
u32 jsm_vpu_addr;
tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
if (!tx_buf_vpu_addr) {
ivpu_err(vdev, "Failed to reserve IPC buffer, size %ld\n",
sizeof(*tx_buf));
return -ENOMEM;
}
tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
return -EIO;
}
jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
ivpu_warn(vdev, "IPC message vpu:0x%x not released by firmware\n",
tx_buf_vpu_addr);
if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
ivpu_warn(vdev, "JSM message vpu:0x%x not released by firmware\n",
jsm_vpu_addr);
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->ipc.data_addr = jsm_vpu_addr;
/* TODO: Set data_size to actual JSM message size, not union of all messages */
tx_buf->ipc.data_size = sizeof(*req);
tx_buf->ipc.channel = cons->channel;
tx_buf->ipc.src_node = 0;
tx_buf->ipc.dst_node = 1;
tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
tx_buf->jsm.type = req->type;
tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
tx_buf->jsm.payload = req->payload;
req->request_id = atomic_inc_return(&ipc->request_id);
tx_buf->jsm.request_id = req->request_id;
cons->request_id = req->request_id;
wmb(); /* Flush WC buffers for IPC, JSM msgs */
cons->tx_vpu_addr = tx_buf_vpu_addr;
ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
return 0;
}
static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
if (vpu_addr)
gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
}
static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
{
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
}
void
ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
INIT_LIST_HEAD(&cons->link);
cons->channel = channel;
cons->tx_vpu_addr = 0;
cons->request_id = 0;
spin_lock_init(&cons->rx_msg_lock);
INIT_LIST_HEAD(&cons->rx_msg_list);
init_waitqueue_head(&cons->rx_msg_wq);
spin_lock_irq(&ipc->cons_list_lock);
list_add_tail(&cons->link, &ipc->cons_list);
spin_unlock_irq(&ipc->cons_list_lock);
}
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
spin_lock_irq(&ipc->cons_list_lock);
list_del(&cons->link);
spin_unlock_irq(&ipc->cons_list_lock);
spin_lock_irq(&cons->rx_msg_lock);
list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
list_del(&rx_msg->link);
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
atomic_dec(&ipc->rx_msg_count);
kfree(rx_msg);
}
spin_unlock_irq(&cons->rx_msg_lock);
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
}
static int
ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
mutex_lock(&ipc->lock);
if (!ipc->on) {
ret = -EAGAIN;
goto unlock;
}
ret = ivpu_ipc_tx_prepare(vdev, cons, req);
if (ret)
goto unlock;
ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
unlock:
mutex_unlock(&ipc->lock);
return ret;
}
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf,
struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg;
int wait_ret, ret = 0;
wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
(IS_KTHREAD() && kthread_should_stop()) ||
!list_empty(&cons->rx_msg_list),
msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop())
return -EINTR;
if (wait_ret == 0)
return -ETIMEDOUT;
if (wait_ret < 0)
return -ERESTARTSYS;
spin_lock_irq(&cons->rx_msg_lock);
rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
if (!rx_msg) {
spin_unlock_irq(&cons->rx_msg_lock);
return -EAGAIN;
}
list_del(&rx_msg->link);
spin_unlock_irq(&cons->rx_msg_lock);
if (ipc_buf)
memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
if (rx_msg->jsm_msg) {
u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
ret = -EBADMSG;
}
if (ipc_payload)
memcpy(ipc_payload, rx_msg->jsm_msg, size);
}
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
atomic_dec(&ipc->rx_msg_count);
kfree(rx_msg);
return ret;
}
static int
ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp_type,
struct vpu_jsm_msg *resp, u32 channel,
unsigned long timeout_ms)
{
struct ivpu_ipc_consumer cons;
int ret;
ivpu_ipc_consumer_add(vdev, &cons, channel);
ret = ivpu_ipc_send(vdev, &cons, req);
if (ret) {
ivpu_warn(vdev, "IPC send failed: %d\n", ret);
goto consumer_del;
}
ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
if (ret) {
ivpu_warn(vdev, "IPC receive failed: type 0x%x, ret %d\n", req->type, ret);
goto consumer_del;
}
if (resp->type != expected_resp_type) {
ivpu_warn(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
ret = -EBADE;
}
consumer_del:
ivpu_ipc_consumer_del(vdev, &cons);
return ret;
}
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp_type,
struct vpu_jsm_msg *resp, u32 channel,
unsigned long timeout_ms)
{
struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg hb_resp;
int ret, hb_ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp,
channel, timeout_ms);
if (ret != -ETIMEDOUT)
goto rpm_put;
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm);
if (hb_ret == -ETIMEDOUT) {
ivpu_hw_diagnose_failure(vdev);
ivpu_pm_schedule_recovery(vdev);
}
rpm_put:
ivpu_rpm_put(vdev);
return ret;
}
static bool
ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
{
if (cons->channel != ipc_hdr->channel)
return false;
if (!jsm_msg || jsm_msg->request_id == cons->request_id)
return true;
return false;
}
static void
ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg;
unsigned long flags;
lockdep_assert_held(&ipc->cons_list_lock);
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
return;
}
atomic_inc(&ipc->rx_msg_count);
rx_msg->ipc_hdr = ipc_hdr;
rx_msg->jsm_msg = jsm_msg;
spin_lock_irqsave(&cons->rx_msg_lock, flags);
list_add_tail(&rx_msg->link, &cons->rx_msg_list);
spin_unlock_irqrestore(&cons->rx_msg_lock, flags);
wake_up(&cons->rx_msg_wq);
}
int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons;
struct ivpu_ipc_hdr *ipc_hdr;
struct vpu_jsm_msg *jsm_msg;
unsigned long flags;
bool dispatched;
u32 vpu_addr;
/*
* Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
* Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
*/
while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
if (vpu_addr == REG_IO_ERROR) {
ivpu_err(vdev, "Failed to read IPC rx addr register\n");
return -EIO;
}
ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
if (!ipc_hdr) {
ivpu_warn(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
continue;
}
ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
jsm_msg = NULL;
if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
if (!jsm_msg) {
ivpu_warn(vdev, "JSM msg 0x%x out of range\n", ipc_hdr->data_addr);
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
continue;
}
ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
}
if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
ivpu_warn(vdev, "IPC RX msg dropped, msg count %d\n", IPC_MAX_RX_MSG);
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
continue;
}
dispatched = false;
spin_lock_irqsave(&ipc->cons_list_lock, flags);
list_for_each_entry(cons, &ipc->cons_list, link) {
if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
dispatched = true;
break;
}
}
spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
if (!dispatched) {
ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
}
}
return 0;
}
int ivpu_ipc_init(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret = -ENOMEM;
ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
if (!ipc->mem_tx)
return ret;
ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
if (!ipc->mem_rx)
goto err_free_tx;
ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
-1, "TX_IPC_JSM");
if (IS_ERR(ipc->mm_tx)) {
ret = PTR_ERR(ipc->mm_tx);
ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
goto err_free_rx;
}
ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ipc->mem_tx->base.size, -1);
if (ret) {
ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
goto err_free_rx;
}
INIT_LIST_HEAD(&ipc->cons_list);
spin_lock_init(&ipc->cons_list_lock);
drmm_mutex_init(&vdev->drm, &ipc->lock);
ivpu_ipc_reset(vdev);
return 0;
err_free_rx:
ivpu_bo_free_internal(ipc->mem_rx);
err_free_tx:
ivpu_bo_free_internal(ipc->mem_tx);
return ret;
}
void ivpu_ipc_fini(struct ivpu_device *vdev)
{
ivpu_ipc_mem_fini(vdev);
}
void ivpu_ipc_enable(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
mutex_lock(&ipc->lock);
ipc->on = true;
mutex_unlock(&ipc->lock);
}
void ivpu_ipc_disable(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons, *c;
unsigned long flags;
mutex_lock(&ipc->lock);
ipc->on = false;
mutex_unlock(&ipc->lock);
spin_lock_irqsave(&ipc->cons_list_lock, flags);
list_for_each_entry_safe(cons, c, &ipc->cons_list, link)
wake_up(&cons->rx_msg_wq);
spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
}
void ivpu_ipc_reset(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
mutex_lock(&ipc->lock);
memset(ipc->mem_tx->kvaddr, 0, ipc->mem_tx->base.size);
memset(ipc->mem_rx->kvaddr, 0, ipc->mem_rx->base.size);
wmb(); /* Flush WC buffers for TX and RX rings */
mutex_unlock(&ipc->lock);
}
| linux-master | drivers/accel/ivpu/ivpu_ipc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/firmware.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include "vpu_boot_api.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_pm.h"
#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
#define FW_RUNTIME_MAX_SIZE SZ_512M
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
#define FW_VERSION_HEADER_SIZE SZ_4K
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
#define WATCHDOG_MSS_REDIRECT 32
#define WATCHDOG_NCE_REDIRECT 33
#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
ivpu_fw_check_api(vdev, fw_hdr, #name, \
VPU_##name##_API_VER_INDEX, \
VPU_##name##_API_VER_MAJOR, \
VPU_##name##_API_VER_MINOR, min_major)
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
{ IVPU_HW_37XX, "mtl_vpu.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
static int ivpu_fw_request(struct ivpu_device *vdev)
{
int ret = -ENOENT;
int i;
if (ivpu_firmware) {
ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
if (!ret)
vdev->fw->name = ivpu_firmware;
return ret;
}
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
if (fw_names[i].gen != ivpu_hw_gen(vdev))
continue;
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
if (!ret) {
vdev->fw->name = fw_names[i].name;
return 0;
}
}
ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
return ret;
}
static int
ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
const char *str, int index, u16 expected_major, u16 expected_minor,
u16 min_major)
{
u16 major = (u16)(fw_hdr->api_version[index] >> 16);
u16 minor = (u16)(fw_hdr->api_version[index]);
if (major < min_major) {
ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
str, major, minor, min_major);
return -EINVAL;
}
if (major != expected_major) {
ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
str, major, minor, expected_major, expected_minor);
}
ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
str, major, minor, expected_major, expected_minor);
return 0;
}
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
u64 runtime_addr, image_load_addr, runtime_size, image_size;
if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
return -EINVAL;
}
if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
return -EINVAL;
}
runtime_addr = fw_hdr->boot_params_load_address;
runtime_size = fw_hdr->runtime_size;
image_load_addr = fw_hdr->image_load_address;
image_size = fw_hdr->image_size;
if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
return -EINVAL;
}
if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
return -EINVAL;
}
if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
return -EINVAL;
}
if (image_load_addr < runtime_addr ||
image_load_addr + image_size > runtime_addr + runtime_size) {
ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
image_load_addr, image_size);
return -EINVAL;
}
if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
return -EINVAL;
}
if (fw_hdr->entry_point < image_load_addr ||
fw_hdr->entry_point >= image_load_addr + image_size) {
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
return -EINVAL;
}
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
(const char *)fw_hdr + VPU_FW_HEADER_SIZE);
if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
return -EINVAL;
fw->runtime_addr = runtime_addr;
fw->runtime_size = runtime_size;
fw->image_load_offset = image_load_addr - runtime_addr;
fw->image_size = image_size;
fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
fw->runtime_addr, image_load_addr, fw->entry_point);
return 0;
}
static void ivpu_fw_release(struct ivpu_device *vdev)
{
release_firmware(vdev->fw->file);
}
static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
u64 size = FW_SHARED_MEM_SIZE;
if (start + size > FW_GLOBAL_MEM_END) {
ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
return -EINVAL;
}
ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0;
}
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
int log_verb_size;
int ret;
ret = ivpu_fw_update_global_range(vdev);
if (ret)
return ret;
fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
}
fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
DRM_IVPU_BO_CACHED);
if (!fw->mem_log_crit) {
ivpu_err(vdev, "Failed to allocate critical log buffer\n");
ret = -ENOMEM;
goto err_free_fw_mem;
}
if (ivpu_log_level <= IVPU_FW_LOG_INFO)
log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
if (!fw->mem_log_verb) {
ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
ret = -ENOMEM;
goto err_free_log_crit;
}
if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
ret = -ENOMEM;
goto err_free_log_verb;
}
}
return 0;
err_free_log_verb:
ivpu_bo_free_internal(fw->mem_log_verb);
err_free_log_crit:
ivpu_bo_free_internal(fw->mem_log_crit);
err_free_fw_mem:
ivpu_bo_free_internal(fw->mem);
return ret;
}
static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
if (fw->mem_shave_nn) {
ivpu_bo_free_internal(fw->mem_shave_nn);
fw->mem_shave_nn = NULL;
}
ivpu_bo_free_internal(fw->mem_log_verb);
ivpu_bo_free_internal(fw->mem_log_crit);
ivpu_bo_free_internal(fw->mem);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
fw->mem = NULL;
}
int ivpu_fw_init(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_fw_request(vdev);
if (ret)
return ret;
ret = ivpu_fw_parse(vdev);
if (ret)
goto err_fw_release;
ret = ivpu_fw_mem_init(vdev);
if (ret)
goto err_fw_release;
return 0;
err_fw_release:
ivpu_fw_release(vdev);
return ret;
}
void ivpu_fw_fini(struct ivpu_device *vdev)
{
ivpu_fw_mem_fini(vdev);
ivpu_fw_release(vdev);
}
int ivpu_fw_load(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
u64 image_end_offset = fw->image_load_offset + fw->image_size;
memset(fw->mem->kvaddr, 0, fw->image_load_offset);
memcpy(fw->mem->kvaddr + fw->image_load_offset,
fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
if (IVPU_WA(clear_runtime_mem)) {
u8 *start = fw->mem->kvaddr + image_end_offset;
u64 size = fw->mem->base.size - image_end_offset;
memset(start, 0, size);
}
wmb(); /* Flush WC buffers after writing fw->mem */
return 0;
}
static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
{
ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
boot_params->magic);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
boot_params->vpu_id);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
boot_params->vpu_count);
ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
boot_params->frequency);
ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
boot_params->perf_clk_frequency);
ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
boot_params->ipc_header_area_start);
ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
boot_params->ipc_header_area_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
boot_params->shared_region_base);
ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
boot_params->shared_region_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
boot_params->ipc_payload_area_start);
ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
boot_params->ipc_payload_area_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
boot_params->global_aliased_pio_base);
ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
boot_params->global_aliased_pio_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
boot_params->autoconfig);
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
boot_params->global_memory_allocator_base);
ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
boot_params->global_memory_allocator_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
boot_params->shave_nn_fw_base);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
boot_params->watchdog_irq_mss);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
boot_params->watchdog_irq_nce);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
boot_params->host_to_vpu_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
boot_params->job_done_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
boot_params->host_version_id);
ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
boot_params->si_stepping);
ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
boot_params->device_id);
ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
boot_params->feature_exclusion);
ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
boot_params->sku);
ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
boot_params->min_freq_pll_ratio);
ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
boot_params->pn_freq_pll_ratio);
ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
boot_params->max_freq_pll_ratio);
ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
boot_params->default_trace_level);
ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
boot_params->tracing_buff_message_format_mask);
ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
boot_params->trace_destination_mask);
ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
boot_params->trace_hw_component_mask);
ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
boot_params->boot_type);
ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
boot_params->punit_telemetry_sram_base);
ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
boot_params->punit_telemetry_sram_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
boot_params->vpu_telemetry_enable);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
{
struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
/* In case of warm boot we only have to reset the entrypoint addr */
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
return;
}
vdev->pm->is_warmboot = false;
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
/*
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
boot_params->shared_region_base = vdev->hw->ranges.global.start;
boot_params->shared_region_size = vdev->hw->ranges.global.end -
vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
/* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
boot_params->si_stepping = ivpu_revision(vdev);
boot_params->device_id = ivpu_device_id(vdev);
boot_params->feature_exclusion = vdev->hw->tile_fuse;
boot_params->sku = vdev->hw->sku;
boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
boot_params->default_trace_level = vdev->fw->trace_level;
boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
wmb(); /* Flush WC buffers after writing bootparams */
ivpu_fw_boot_params_print(vdev, boot_params);
}
| linux-master | drivers/accel/ivpu/ivpu_fw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/set_memory.h>
#include <linux/xarray.h>
#include <drm/drm_cache.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_utils.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
MODULE_IMPORT_NS(DMA_BUF);
static const struct drm_gem_object_funcs ivpu_gem_funcs;
static struct lock_class_key prime_bo_lock_class_key;
static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
{
/* Pages are managed by the underlying dma-buf */
return 0;
}
static void prime_free_pages_locked(struct ivpu_bo *bo)
{
/* Pages are managed by the underlying dma-buf */
}
static int prime_map_pages_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
return PTR_ERR(sgt);
}
bo->sgt = sgt;
return 0;
}
static void prime_unmap_pages_locked(struct ivpu_bo *bo)
{
dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
bo->sgt = NULL;
}
static const struct ivpu_bo_ops prime_ops = {
.type = IVPU_BO_TYPE_PRIME,
.name = "prime",
.alloc_pages = prime_alloc_pages_locked,
.free_pages = prime_free_pages_locked,
.map_pages = prime_map_pages_locked,
.unmap_pages = prime_unmap_pages_locked,
};
static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
{
int npages = bo->base.size >> PAGE_SHIFT;
struct page **pages;
pages = drm_gem_get_pages(&bo->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(pages, npages);
else if (bo->flags & DRM_IVPU_BO_UNCACHED)
set_pages_array_uc(pages, npages);
bo->pages = pages;
return 0;
}
static void shmem_free_pages_locked(struct ivpu_bo *bo)
{
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
drm_gem_put_pages(&bo->base, bo->pages, true, false);
bo->pages = NULL;
}
static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
{
int npages = bo->base.size >> PAGE_SHIFT;
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret;
sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
if (IS_ERR(sgt)) {
ivpu_err(vdev, "Failed to allocate sgtable\n");
return PTR_ERR(sgt);
}
ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret) {
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
goto err_free_sgt;
}
bo->sgt = sgt;
return 0;
err_free_sgt:
kfree(sgt);
return ret;
}
static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(bo->sgt);
kfree(bo->sgt);
bo->sgt = NULL;
}
static const struct ivpu_bo_ops shmem_ops = {
.type = IVPU_BO_TYPE_SHMEM,
.name = "shmem",
.alloc_pages = shmem_alloc_pages_locked,
.free_pages = shmem_free_pages_locked,
.map_pages = ivpu_bo_map_pages_locked,
.unmap_pages = ivpu_bo_unmap_pages_locked,
};
static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
struct page **pages;
int ret;
pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (!pages[i]) {
ret = -ENOMEM;
goto err_free_pages;
}
cond_resched();
}
bo->pages = pages;
return 0;
err_free_pages:
while (i--)
put_page(pages[i]);
kvfree(pages);
return ret;
}
static void internal_free_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
for (i = 0; i < npages; i++)
put_page(bo->pages[i]);
kvfree(bo->pages);
bo->pages = NULL;
}
static const struct ivpu_bo_ops internal_ops = {
.type = IVPU_BO_TYPE_INTERNAL,
.name = "internal",
.alloc_pages = internal_alloc_pages_locked,
.free_pages = internal_free_pages_locked,
.map_pages = ivpu_bo_map_pages_locked,
.unmap_pages = ivpu_bo_unmap_pages_locked,
};
static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret;
lockdep_assert_held(&bo->lock);
drm_WARN_ON(&vdev->drm, bo->sgt);
ret = bo->ops->alloc_pages(bo);
if (ret) {
ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
return ret;
}
ret = bo->ops->map_pages(bo);
if (ret) {
ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
goto err_free_pages;
}
return ret;
err_free_pages:
bo->ops->free_pages(bo);
return ret;
}
static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
{
mutex_lock(&bo->lock);
WARN_ON(!bo->sgt);
bo->ops->unmap_pages(bo);
WARN_ON(bo->sgt);
bo->ops->free_pages(bo);
WARN_ON(bo->pages);
mutex_unlock(&bo->lock);
}
/*
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
*
* This function pins physical memory pages, then maps the physical pages
* to IOMMU address space and finally updates the VPU MMU page tables
* to allow the VPU to translate VPU address to IOMMU address.
*/
int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret = 0;
mutex_lock(&bo->lock);
if (!bo->vpu_addr) {
ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
bo->ctx->id, bo->handle);
ret = -EINVAL;
goto unlock;
}
if (!bo->sgt) {
ret = ivpu_bo_alloc_and_map_pages_locked(bo);
if (ret)
goto unlock;
}
if (!bo->mmu_mapped) {
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
ivpu_bo_is_snooped(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
goto unlock;
}
bo->mmu_mapped = true;
}
unlock:
mutex_unlock(&bo->lock);
return ret;
}
static int
ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret;
if (!range) {
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
range = &vdev->hw->ranges.dma;
else
range = &vdev->hw->ranges.user;
}
mutex_lock(&ctx->lock);
ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
bo->vpu_addr = bo->mm_node.start;
list_add_tail(&bo->ctx_node, &ctx->bo_list);
}
mutex_unlock(&ctx->lock);
return ret;
}
static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct ivpu_mmu_context *ctx = bo->ctx;
ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
mutex_lock(&bo->lock);
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->sgt);
ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
bo->mmu_mapped = false;
}
mutex_lock(&ctx->lock);
list_del(&bo->ctx_node);
bo->vpu_addr = 0;
bo->ctx = NULL;
ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
mutex_unlock(&ctx->lock);
mutex_unlock(&bo->lock);
}
void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
{
struct ivpu_bo *bo, *tmp;
list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
ivpu_bo_free_vpu_addr(bo);
}
static struct ivpu_bo *
ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
u64 size, u32 flags, const struct ivpu_bo_ops *ops,
const struct ivpu_addr_range *range, u64 user_ptr)
{
struct ivpu_bo *bo;
int ret = 0;
if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
return ERR_PTR(-EINVAL);
switch (flags & DRM_IVPU_BO_CACHE_MASK) {
case DRM_IVPU_BO_CACHED:
case DRM_IVPU_BO_UNCACHED:
case DRM_IVPU_BO_WC:
break;
default:
return ERR_PTR(-EINVAL);
}
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
mutex_init(&bo->lock);
bo->base.funcs = &ivpu_gem_funcs;
bo->flags = flags;
bo->ops = ops;
bo->user_ptr = user_ptr;
if (ops->type == IVPU_BO_TYPE_SHMEM)
ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
else
drm_gem_private_object_init(&vdev->drm, &bo->base, size);
if (ret) {
ivpu_err(vdev, "Failed to initialize drm object\n");
goto err_free;
}
if (flags & DRM_IVPU_BO_MAPPABLE) {
ret = drm_gem_create_mmap_offset(&bo->base);
if (ret) {
ivpu_err(vdev, "Failed to allocate mmap offset\n");
goto err_release;
}
}
if (mmu_context) {
ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
if (ret) {
ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
goto err_release;
}
}
return bo;
err_release:
drm_gem_object_release(&bo->base);
err_free:
kfree(bo);
return ERR_PTR(ret);
}
static void ivpu_bo_free(struct drm_gem_object *obj)
{
struct ivpu_bo *bo = to_ivpu_bo(obj);
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
if (bo->ctx)
ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
else
ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
(bool)bo->sgt, bo->mmu_mapped);
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
vunmap(bo->kvaddr);
if (bo->ctx)
ivpu_bo_free_vpu_addr(bo);
if (bo->sgt)
ivpu_bo_unmap_and_free_pages(bo);
if (bo->base.import_attach)
drm_prime_gem_destroy(&bo->base, bo->sgt);
drm_gem_object_release(&bo->base);
mutex_destroy(&bo->lock);
kfree(bo);
}
static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct ivpu_bo *bo = to_ivpu_bo(obj);
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);
vma->vm_private_data = NULL;
return dma_buf_mmap(obj->dma_buf, vma, 0);
}
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
return 0;
}
static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
{
struct ivpu_bo *bo = to_ivpu_bo(obj);
loff_t npages = obj->size >> PAGE_SHIFT;
int ret = 0;
mutex_lock(&bo->lock);
if (!bo->sgt)
ret = ivpu_bo_alloc_and_map_pages_locked(bo);
mutex_unlock(&bo->lock);
if (ret)
return ERR_PTR(ret);
return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
}
static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct ivpu_bo *bo = to_ivpu_bo(obj);
loff_t npages = obj->size >> PAGE_SHIFT;
pgoff_t page_offset;
struct page *page;
vm_fault_t ret;
int err;
mutex_lock(&bo->lock);
if (!bo->sgt) {
err = ivpu_bo_alloc_and_map_pages_locked(bo);
if (err) {
ret = vmf_error(err);
goto unlock;
}
}
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= npages) {
ret = VM_FAULT_SIGBUS;
} else {
page = bo->pages[page_offset];
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
unlock:
mutex_unlock(&bo->lock);
return ret;
}
static const struct vm_operations_struct ivpu_vm_ops = {
.fault = ivpu_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free,
.mmap = ivpu_bo_mmap,
.vm_ops = &ivpu_vm_ops,
.get_sg_table = ivpu_bo_get_sg_table,
};
int
ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_bo_create *args = data;
u64 size = PAGE_ALIGN(args->size);
struct ivpu_bo *bo;
int ret;
if (args->flags & ~DRM_IVPU_BO_FLAGS)
return -EINVAL;
if (size == 0)
return -EINVAL;
bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
if (!ret) {
args->vpu_addr = bo->vpu_addr;
args->handle = bo->handle;
}
drm_gem_object_put(&bo->base);
ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
return ret;
}
struct ivpu_bo *
ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
{
const struct ivpu_addr_range *range;
struct ivpu_addr_range fixed_range;
struct ivpu_bo *bo;
pgprot_t prot;
int ret;
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
if (vpu_addr) {
fixed_range.start = vpu_addr;
fixed_range.end = vpu_addr + size;
range = &fixed_range;
} else {
range = &vdev->hw->ranges.global;
}
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, vpu_addr, size, flags);
return NULL;
}
ret = ivpu_bo_pin(bo);
if (ret)
goto err_put;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
else if (bo->flags & DRM_IVPU_BO_UNCACHED)
set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
if (!bo->kvaddr) {
ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
goto err_put;
}
ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
bo->vpu_addr, bo->base.size, flags);
return bo;
err_put:
drm_gem_object_put(&bo->base);
return NULL;
}
void ivpu_bo_free_internal(struct ivpu_bo *bo)
{
drm_gem_object_put(&bo->base);
}
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
struct dma_buf_attachment *attach;
struct ivpu_bo *bo;
attach = dma_buf_attach(buf, dev->dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
get_dma_buf(buf);
bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
goto err_detach;
}
lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
bo->base.import_attach = attach;
return &bo->base;
err_detach:
dma_buf_detach(buf, attach);
dma_buf_put(buf);
return ERR_CAST(bo);
}
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = to_ivpu_device(dev);
struct drm_ivpu_bo_info *args = data;
struct drm_gem_object *obj;
struct ivpu_bo *bo;
int ret = 0;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
bo = to_ivpu_bo(obj);
mutex_lock(&bo->lock);
if (!bo->ctx) {
ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
if (ret) {
ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
goto unlock;
}
}
args->flags = bo->flags;
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
args->vpu_addr = bo->vpu_addr;
args->size = obj->size;
unlock:
mutex_unlock(&bo->lock);
drm_gem_object_put(obj);
return ret;
}
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_ivpu_bo_wait *args = data;
struct drm_gem_object *obj;
unsigned long timeout;
long ret;
timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -EINVAL;
ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
if (ret == 0) {
ret = -ETIMEDOUT;
} else if (ret > 0) {
ret = 0;
args->job_status = to_ivpu_bo(obj)->job_status;
}
drm_gem_object_put(obj);
return ret;
}
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
unsigned long dma_refcount = 0;
if (bo->base.dma_buf && bo->base.dma_buf->file)
dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
}
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
struct ivpu_bo *bo;
drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
"ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
mutex_lock(&vdev->gctx.lock);
list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
ivpu_bo_print_info(bo, p);
mutex_unlock(&vdev->gctx.lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
if (!file_priv)
continue;
mutex_lock(&file_priv->ctx.lock);
list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
ivpu_bo_print_info(bo, p);
mutex_unlock(&file_priv->ctx.lock);
ivpu_file_priv_put(&file_priv);
}
}
void ivpu_bo_list_print(struct drm_device *dev)
{
struct drm_printer p = drm_info_printer(dev->dev);
ivpu_bo_list(dev, &p);
}
| linux-master | drivers/accel/ivpu/ivpu_gem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
#include "vpu_boot_api.h"
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#include "ivpu_pm.h"
#ifndef DRIVER_VERSION_STR
#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
__stringify(DRM_IVPU_DRIVER_MINOR) "."
#endif
static const struct drm_driver driver;
static struct lock_class_key submitted_jobs_xa_lock_class_key;
int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw");
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
kref_get(&file_priv->ref);
ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
return file_priv;
}
struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
{
struct ivpu_file_priv *file_priv;
xa_lock_irq(&vdev->context_xa);
file_priv = xa_load(&vdev->context_xa, id);
/* file_priv may still be in context_xa during file_priv_release() */
if (file_priv && !kref_get_unless_zero(&file_priv->ref))
file_priv = NULL;
xa_unlock_irq(&vdev->context_xa);
if (file_priv)
ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
return file_priv;
}
static void file_priv_release(struct kref *ref)
{
struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
struct ivpu_device *vdev = file_priv->vdev;
ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
ivpu_cmdq_release_all(file_priv);
ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
}
void ivpu_file_priv_put(struct ivpu_file_priv **link)
{
struct ivpu_file_priv *file_priv = *link;
struct ivpu_device *vdev = file_priv->vdev;
drm_WARN_ON(&vdev->drm, !file_priv);
ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
*link = NULL;
kref_put(&file_priv->ref, file_priv_release);
}
static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
{
switch (args->index) {
case DRM_IVPU_CAP_METRIC_STREAMER:
args->value = 0;
break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
struct drm_ivpu_param *args = data;
int ret = 0;
int idx;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
switch (args->param) {
case DRM_IVPU_PARAM_DEVICE_ID:
args->value = pdev->device;
break;
case DRM_IVPU_PARAM_DEVICE_REVISION:
args->value = pdev->revision;
break;
case DRM_IVPU_PARAM_PLATFORM_TYPE:
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
args->value = ivpu_hw_reg_pll_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user.start;
break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
break;
case DRM_IVPU_PARAM_CONTEXT_ID:
args->value = file_priv->ctx.id;
break;
case DRM_IVPU_PARAM_FW_API_VERSION:
if (args->index < VPU_FW_API_VER_NUM) {
struct vpu_firmware_header *fw_hdr;
fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
args->value = fw_hdr->api_version[args->index];
} else {
ret = -EINVAL;
}
break;
case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
break;
case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
break;
case DRM_IVPU_PARAM_TILE_CONFIG:
args->value = vdev->hw->tile_fuse;
break;
case DRM_IVPU_PARAM_SKU:
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
ret = ivpu_get_capabilities(vdev, args);
break;
default:
ret = -EINVAL;
break;
}
drm_dev_exit(idx);
return ret;
}
static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_param *args = data;
int ret = 0;
switch (args->param) {
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
file_priv->priority = args->value;
else
ret = -EINVAL;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int ivpu_open(struct drm_device *dev, struct drm_file *file)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv;
u32 ctx_id;
void *old;
int ret;
ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
if (ret) {
ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
return ret;
}
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) {
ret = -ENOMEM;
goto err_xa_erase;
}
file_priv->vdev = vdev;
file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret)
goto err_mutex_destroy;
old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
if (xa_is_err(old)) {
ret = xa_err(old);
ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
goto err_ctx_fini;
}
ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
ctx_id, current->comm, task_pid_nr(current));
file->driver_priv = file_priv;
return 0;
err_ctx_fini:
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
err_mutex_destroy:
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id);
return ret;
}
static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = to_ivpu_device(dev);
ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
file_priv->ctx.id, current->comm, task_pid_nr(current));
ivpu_file_priv_put(&file_priv);
}
static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
{
struct ivpu_ipc_consumer cons;
struct ivpu_ipc_hdr ipc_hdr;
unsigned long timeout;
int ret;
if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST)
return 0;
ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
while (1) {
ret = ivpu_ipc_irq_handler(vdev);
if (ret)
break;
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
break;
cond_resched();
}
ivpu_ipc_consumer_del(vdev, &cons);
if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
ipc_hdr.data_addr);
return -EIO;
}
if (!ret)
ivpu_info(vdev, "VPU ready message received successfully\n");
else
ivpu_hw_diagnose_failure(vdev);
return ret;
}
/**
* ivpu_boot() - Start VPU firmware
* @vdev: VPU device
*
* This function is paired with ivpu_shutdown() but it doesn't power up the
* VPU because power up has to be called very early in ivpu_probe().
*/
int ivpu_boot(struct ivpu_device *vdev)
{
int ret;
/* Update boot params located at first 4KB of FW memory */
ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
return ret;
}
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
return ret;
}
ivpu_hw_irq_clear(vdev);
enable_irq(vdev->irq);
ivpu_hw_irq_enable(vdev);
ivpu_ipc_enable(vdev);
return 0;
}
int ivpu_shutdown(struct ivpu_device *vdev)
{
int ret;
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
ret = ivpu_hw_power_down(vdev);
if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
return ret;
}
static const struct file_operations ivpu_fops = {
.owner = THIS_MODULE,
DRM_ACCEL_FOPS,
};
static const struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
.open = ivpu_open,
.postclose = ivpu_postclose,
.gem_prime_import = ivpu_gem_prime_import,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = ivpu_debugfs_init,
#endif
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
.fops = &ivpu_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRM_IVPU_DRIVER_MAJOR,
.minor = DRM_IVPU_DRIVER_MINOR,
};
static int ivpu_irq_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
int ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0) {
ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
return ret;
}
vdev->irq = pci_irq_vector(pdev, 0);
ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
return ret;
}
static int ivpu_pci_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
struct resource *bar0 = &pdev->resource[0];
struct resource *bar4 = &pdev->resource[4];
int ret;
ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
if (IS_ERR(vdev->regv)) {
ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
return PTR_ERR(vdev->regv);
}
ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
if (IS_ERR(vdev->regb)) {
ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
return PTR_ERR(vdev->regb);
}
ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
if (ret) {
ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
return ret;
}
dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
/* VPU 37XX does not require 10m D3hot delay */
if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
if (ret) {
ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
return ret;
}
pci_set_master(pdev);
return 0;
}
static int ivpu_dev_init(struct ivpu_device *vdev)
{
int ret;
vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
if (!vdev->hw)
return -ENOMEM;
vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
if (!vdev->mmu)
return -ENOMEM;
vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
if (!vdev->fw)
return -ENOMEM;
vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
if (!vdev->ipc)
return -ENOMEM;
vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
if (!vdev->pm)
return -ENOMEM;
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
vdev->hw->ops = &ivpu_hw_40xx_ops;
vdev->hw->dma_bits = 48;
} else {
vdev->hw->ops = &ivpu_hw_37xx_ops;
vdev->hw->dma_bits = 38;
}
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
ret = ivpu_pci_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret);
goto err_xa_destroy;
}
ret = ivpu_irq_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret);
goto err_xa_destroy;
}
/* Init basic HW info based on buttress registers which are accessible before power up */
ret = ivpu_hw_info_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret);
goto err_xa_destroy;
}
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
goto err_xa_destroy;
}
ret = ivpu_mmu_global_context_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret);
goto err_power_down;
}
ret = ivpu_mmu_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret);
goto err_mmu_gctx_fini;
}
ret = ivpu_fw_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret);
goto err_mmu_gctx_fini;
}
ret = ivpu_ipc_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret);
goto err_fw_fini;
}
ret = ivpu_pm_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize PM: %d\n", ret);
goto err_ipc_fini;
}
ret = ivpu_job_done_thread_init(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret);
goto err_ipc_fini;
}
ret = ivpu_fw_load(vdev);
if (ret) {
ivpu_err(vdev, "Failed to load firmware: %d\n", ret);
goto err_job_done_thread_fini;
}
ret = ivpu_boot(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot: %d\n", ret);
goto err_job_done_thread_fini;
}
ivpu_pm_enable(vdev);
return 0;
err_job_done_thread_fini:
ivpu_job_done_thread_fini(vdev);
err_ipc_fini:
ivpu_ipc_fini(vdev);
err_fw_fini:
ivpu_fw_fini(vdev);
err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
err_xa_destroy:
xa_destroy(&vdev->submitted_jobs_xa);
xa_destroy(&vdev->context_xa);
return ret;
}
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_job_done_thread_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev);
ivpu_mmu_global_context_fini(vdev);
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
xa_destroy(&vdev->submitted_jobs_xa);
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
xa_destroy(&vdev->context_xa);
}
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ivpu_device *vdev;
int ret;
vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
pci_set_drvdata(pdev, vdev);
ret = ivpu_dev_init(vdev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize VPU device: %d\n", ret);
return ret;
}
ret = drm_dev_register(&vdev->drm, 0);
if (ret) {
dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
ivpu_dev_fini(vdev);
}
return ret;
}
static void ivpu_remove(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
drm_dev_unplug(&vdev->drm);
ivpu_dev_fini(vdev);
}
static const struct dev_pm_ops ivpu_drv_pci_pm = {
SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
};
static const struct pci_error_handlers ivpu_drv_pci_err = {
.reset_prepare = ivpu_pm_reset_prepare_cb,
.reset_done = ivpu_pm_reset_done_cb,
};
static struct pci_driver ivpu_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ivpu_pci_ids,
.probe = ivpu_probe,
.remove = ivpu_remove,
.driver = {
.pm = &ivpu_drv_pci_pm,
},
.err_handler = &ivpu_drv_pci_err,
};
module_pci_driver(ivpu_pci_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_VERSION(DRIVER_VERSION_STR);
| linux-master | drivers/accel/ivpu/ivpu_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
#define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))
#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
#define IVPU_MMU_PAGE_SIZE SZ_4K
#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
#define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
#define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
dma_addr_t pgd_dma;
pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
GFP_KERNEL);
if (!pgtable->pgd_dma_ptr)
return -ENOMEM;
pgtable->pgd_dma = pgd_dma;
return 0;
}
static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
{
if (cpu_addr)
dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
}
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
dma_addr_t pud_dma, pmd_dma, pte_dma;
u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
if (!pud_dma_ptr)
continue;
for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
if (!pmd_dma_ptr)
continue;
for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
}
kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
}
kfree(pgtable->pmd_ptrs[pgd_idx]);
kfree(pgtable->pte_ptrs[pgd_idx]);
ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
}
ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
}
static u64*
ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
{
u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
dma_addr_t pud_dma;
if (pud_dma_ptr)
return pud_dma_ptr;
pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
if (!pud_dma_ptr)
return NULL;
drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pgtable->pmd_ptrs[pgd_idx])
goto err_free_pud_dma_ptr;
drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pgtable->pte_ptrs[pgd_idx])
goto err_free_pmd_ptrs;
pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
return pud_dma_ptr;
err_free_pmd_ptrs:
kfree(pgtable->pmd_ptrs[pgd_idx]);
err_free_pud_dma_ptr:
ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
return NULL;
}
static u64*
ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
int pud_idx)
{
u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
dma_addr_t pmd_dma;
if (pmd_dma_ptr)
return pmd_dma_ptr;
pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
if (!pmd_dma_ptr)
return NULL;
drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
goto err_free_pmd_dma_ptr;
pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
return pmd_dma_ptr;
err_free_pmd_dma_ptr:
ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
return NULL;
}
static u64*
ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
int pgd_idx, int pud_idx, int pmd_idx)
{
u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
dma_addr_t pte_dma;
if (pte_dma_ptr)
return pte_dma_ptr;
pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
if (!pte_dma_ptr)
return NULL;
pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
return pte_dma_ptr;
}
static int
ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
{
u64 *pte;
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
/* Allocate PUD - second level page table if needed */
if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
return -ENOMEM;
/* Allocate PMD - third level page table if needed */
if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
return -ENOMEM;
/* Allocate PTE - fourth level page table if needed */
pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
if (!pte)
return -ENOMEM;
/* Update PTE */
pte[pte_idx] = dma_addr | prot;
return 0;
}
static int
ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
dma_addr_t dma_addr, u64 prot)
{
size_t size = IVPU_MMU_CONT_PAGES_SIZE;
drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
prot |= IVPU_MMU_ENTRY_FLAG_CONT;
while (size) {
int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
if (ret)
return ret;
size -= IVPU_MMU_PAGE_SIZE;
vpu_addr += IVPU_MMU_PAGE_SIZE;
dma_addr += IVPU_MMU_PAGE_SIZE;
}
return 0;
}
static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
{
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
/* Update PTE with dummy physical address and clear flags */
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
}
static void
ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
u64 end_addr = vpu_addr + size;
/* Align to PMD entry (2 MB) */
vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
while (vpu_addr < end_addr) {
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
while (vpu_addr < end_addr && vpu_addr < pud_end) {
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
while (vpu_addr < end_addr && vpu_addr < pmd_end) {
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
IVPU_MMU_PGTABLE_SIZE);
vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
}
clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
IVPU_MMU_PGTABLE_SIZE);
}
clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
}
clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
}
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
{
int map_size;
int ret;
while (size) {
if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
map_size = IVPU_MMU_CONT_PAGES_SIZE;
} else {
ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
map_size = IVPU_MMU_PAGE_SIZE;
}
if (ret)
return ret;
vpu_addr += map_size;
dma_addr += map_size;
size -= map_size;
}
return 0;
}
static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
while (size) {
ivpu_mmu_context_unmap_page(ctx, vpu_addr);
vpu_addr += IVPU_MMU_PAGE_SIZE;
size -= IVPU_MMU_PAGE_SIZE;
}
}
int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{
struct scatterlist *sg;
int ret;
u64 prot;
u64 i;
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
return -EINVAL;
/*
* VPU is only 32 bit, but DMA engine is 38 bit
* Ranges < 2 GB are reserved for VPU internal registers
* Limit range to 8 GB
*/
if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
return -EINVAL;
prot = IVPU_MMU_ENTRY_MAPPED;
if (llc_coherent)
prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
mutex_unlock(&ctx->lock);
return ret;
}
ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
vpu_addr += size;
}
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
return ret;
}
void
ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt)
{
struct scatterlist *sg;
int ret;
u64 i;
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
size_t size = sg_dma_len(sg) + sg->offset;
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
vpu_addr += size;
}
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
}
int
ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node)
{
lockdep_assert_held(&ctx->lock);
if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
range->start, range->end, DRM_MM_INSERT_BEST))
return 0;
}
return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
range->start, range->end, DRM_MM_INSERT_BEST);
}
void
ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
{
lockdep_assert_held(&ctx->lock);
drm_mm_remove_node(node);
}
static int
ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
{
u64 start, end;
int ret;
mutex_init(&ctx->lock);
INIT_LIST_HEAD(&ctx->bo_list);
ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
if (ret)
return ret;
if (!context_id) {
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.dma.end;
}
drm_mm_init(&ctx->mm, start, end - start);
ctx->id = context_id;
return 0;
}
static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
return;
mutex_destroy(&ctx->lock);
ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
ctx->pgtable.pgd_dma_ptr = NULL;
ctx->pgtable.pgd_dma = 0;
}
int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
{
return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
}
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
{
return ivpu_mmu_context_fini(vdev, &vdev->gctx);
}
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
{
struct ivpu_file_priv *file_priv;
xa_lock(&vdev->context_xa);
file_priv = xa_load(&vdev->context_xa, ssid);
if (file_priv)
file_priv->has_mmu_faults = true;
xa_unlock(&vdev->context_xa);
}
int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
{
int ret;
drm_WARN_ON(&vdev->drm, !ctx_id);
ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
if (ret) {
ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
return ret;
}
ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
if (ret) {
ivpu_err(vdev, "Failed to set page table: %d\n", ret);
goto err_context_fini;
}
return 0;
err_context_fini:
ivpu_mmu_context_fini(vdev, ctx);
return ret;
}
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
drm_WARN_ON(&vdev->drm, !ctx->id);
ivpu_mmu_clear_pgtable(vdev, ctx->id);
ivpu_mmu_context_fini(vdev, ctx);
}
| linux-master | drivers/accel/ivpu/ivpu_mmu_context.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
#define TILE_FUSE_ENABLE_BOTH 0x0
#define TILE_SKU_BOTH_MTL 0x3630
/* Work point configuration values */
#define CONFIG_1_TILE 0x01
#define CONFIG_2_TILE 0x02
#define PLL_RATIO_5_3 0x01
#define PLL_RATIO_4_3 0x02
#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
#define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
#define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
#define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
#define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
#define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_REF_CLK_FREQ (50 * 1000000)
#define PLL_SIMULATION_FREQ (10 * 1000000)
#define PLL_DEFAULT_EPP_VALUE 0x80
#define TIM_SAFE_ENABLE 0xf1d0dead
#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
#define TIMEOUT_US (150 * USEC_PER_MSEC)
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
switch (platform) {
case IVPU_PLATFORM_SILICON:
return "IVPU_PLATFORM_SILICON";
case IVPU_PLATFORM_SIMICS:
return "IVPU_PLATFORM_SIMICS";
case IVPU_PLATFORM_FPGA:
return "IVPU_PLATFORM_FPGA";
default:
return "Invalid platform";
}
}
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
vdev->platform = platform;
else
vdev->platform = IVPU_PLATFORM_SILICON;
ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
ivpu_platform_to_str(vdev->platform), vdev->platform);
}
static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
vdev->wa.clear_runtime_mem = false;
vdev->wa.d3hot_after_power_off = true;
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
vdev->wa.interrupt_clear_with_0 = true;
}
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
{
if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 100000;
vdev->timeout.jsm = 50000;
vdev->timeout.tdr = 2000000;
vdev->timeout.reschedule_suspend = 1000;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
}
}
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
{
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
/* Send KMD initiated workpoint change */
static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
u16 target_ratio, u16 config)
{
int ret;
u32 val;
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret) {
ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
return ret;
}
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret)
ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
return ret;
}
static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
{
u32 exp_val = enable ? 0x1 : 0x0;
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
{
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
u32 fmin_fuse, fmax_fuse;
fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
{
return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
{
struct ivpu_hw_info *hw = vdev->hw;
u16 target_ratio;
u16 config;
int ret;
if (IVPU_WA(punit_disabled)) {
ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
ivpu_platform_to_str(vdev->platform));
return 0;
}
if (enable) {
target_ratio = hw->pll.pn_ratio;
config = hw->config;
} else {
target_ratio = 0;
config = 0;
}
ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
config, target_ratio);
ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
if (ret) {
ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
return ret;
}
ret = ivpu_pll_wait_for_lock(vdev, enable);
if (ret) {
ivpu_err(vdev, "Timed out waiting for PLL lock\n");
return ret;
}
if (enable) {
ret = ivpu_pll_wait_for_status_ready(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
return ret;
}
ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
return ret;
}
}
return 0;
}
static int ivpu_pll_enable(struct ivpu_device *vdev)
{
return ivpu_pll_drive(vdev, true);
}
static int ivpu_pll_disable(struct ivpu_device *vdev)
{
return ivpu_pll_drive(vdev, false);
}
static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
u32 val = 0;
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
}
static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
if (enable) {
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
}
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
}
static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
if (enable) {
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
}
REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
}
static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
{
ivpu_boot_host_ss_rst_clr_assert(vdev);
return ivpu_boot_noc_qreqn_check(vdev, 0x0);
}
static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
{
REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
}
static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
return ret;
}
static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
{
return ivpu_boot_host_ss_axi_drive(vdev, true);
}
static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
if (enable) {
val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
} else {
val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
}
REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
return ret;
}
static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
{
return ivpu_boot_host_ss_top_noc_drive(vdev, true);
}
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
{
/* FPGA model (UPF) is not power aware, skipped Power Island polling */
if (ivpu_is_fpga(vdev))
return 0;
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
if (enable)
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
else
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
ivpu_boot_pwr_island_trickle_drive(vdev, true);
ivpu_boot_pwr_island_drive(vdev, true);
ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
if (ret) {
ivpu_err(vdev, "Timed out waiting for power island status\n");
return ret;
}
ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
return ret;
}
ivpu_boot_host_ss_clk_drive(vdev, true);
ivpu_boot_pwr_island_isolation_drive(vdev, false);
ivpu_boot_host_ss_rst_drive(vdev, true);
ivpu_boot_dpu_active_drive(vdev, true);
return ret;
}
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
}
static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
{
u32 val;
val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = vdev->fw->entry_point >> 9;
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
}
static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
if (enable)
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
else
val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
hw->sku = TILE_SKU_BOTH_MTL;
hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
ivpu_pll_init_frequency_ratios(vdev);
ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
return 0;
}
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
if (IVPU_WA(punit_disabled))
return 0;
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_boot_d0i3_drive(vdev, true);
if (ret)
ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
udelay(5); /* VPU requires 5 us to complete the transition */
return ret;
}
static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_boot_d0i3_drive(vdev, false);
if (ret)
ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
return ret;
}
static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
ivpu_hw_read_platform(vdev);
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
ret = ivpu_pll_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
return ret;
}
ret = ivpu_boot_host_ss_configure(vdev);
if (ret) {
ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
return ret;
}
/*
* The control circuitry for vpu_idle indication logic powers up active.
* To ensure unnecessary low power mode signal from LRT during bring up,
* KMD disables the circuitry prior to bringing up the Main Power island.
*/
ivpu_boot_vpu_idle_gen_disable(vdev);
ret = ivpu_boot_pwr_domain_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
return ret;
}
ret = ivpu_boot_host_ss_axi_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
return ret;
}
ret = ivpu_boot_host_ss_top_noc_enable(vdev);
if (ret)
ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
return ret;
}
static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
{
ivpu_boot_no_snoop_enable(vdev);
ivpu_boot_tbu_mmu_enable(vdev);
ivpu_boot_soc_cpu_boot(vdev);
return 0;
}
static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n");
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
if (ivpu_hw_37xx_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
return ret;
}
static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
{
u32 val;
/* Enable writing and set non-zero WDT value */
REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
/* Enable writing and disable watchdog timer */
REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
/* Now clear the timeout interrupt */
val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
}
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
if ((config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
return cpu_clock;
}
/* Register indirect accesses */
static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
}
static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
}
static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
}
static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
{
REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
}
static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
{
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
}
static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
{
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
}
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
ivpu_hw_wdt_disable(vdev);
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
ivpu_pm_schedule_recovery(vdev);
}
/* Handler for IRQs from VPU core (irqV) */
static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
return status;
}
/* Handler for IRQs from Buttress core (irqB) */
static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
if (status == 0)
return 0;
/* Disable global interrupt before handling local buttress interrupts */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
/* This must be done after interrupts are cleared at the source. */
if (IVPU_WA(interrupt_clear_with_0))
/*
* Writing 1 triggers an interrupt, so we can't perform read update write.
* Clear local interrupt status by writing 0 to all bits.
*/
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
else
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
return status;
}
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
{
u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
ivpu_err(vdev, "WDT MSS timeout detected\n");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
ivpu_err(vdev, "WDT NCE timeout detected\n");
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
ivpu_err(vdev, "NOC Firewall irq detected\n");
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
}
}
const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.info_init = ivpu_hw_37xx_info_init,
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
.power_down = ivpu_hw_37xx_power_down,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
.reg_db_set = ivpu_hw_37xx_reg_db_set,
.reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
.reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
.reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
.irq_clear = ivpu_hw_37xx_irq_clear,
.irq_enable = ivpu_hw_37xx_irq_enable,
.irq_disable = ivpu_hw_37xx_irq_disable,
.irq_handler = ivpu_hw_37xx_irq_handler,
};
| linux-master | drivers/accel/ivpu/ivpu_hw_37xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/reboot.h>
#include "vpu_boot_api.h"
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_fw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
static bool ivpu_disable_recovery;
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
#define PM_RESCHEDULE_LIMIT 5
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
ivpu_cmdq_reset_all_contexts(vdev);
ivpu_ipc_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
}
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
struct vpu_boot_params *bp = fw->mem->kvaddr;
if (!bp->save_restore_ret_address) {
ivpu_pm_prepare_cold_boot(vdev);
return;
}
ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address);
fw->entry_point = bp->save_restore_ret_address;
}
static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_shutdown(vdev);
if (ret) {
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
return ret;
}
return ret;
}
static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
return ret;
}
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
ivpu_hw_power_down(vdev);
return ret;
}
ret = ivpu_boot(vdev);
if (ret) {
ivpu_mmu_disable(vdev);
ivpu_hw_power_down(vdev);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret);
ivpu_pm_prepare_cold_boot(vdev);
goto retry;
} else {
ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
}
}
return ret;
}
static void ivpu_pm_recovery_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
struct ivpu_device *vdev = pm->vdev;
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret;
retry:
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
cond_resched();
goto retry;
}
if (ret && ret != -EAGAIN)
ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
{
struct ivpu_pm_info *pm = vdev->pm;
if (ivpu_disable_recovery) {
ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
return;
}
if (ivpu_is_fpga(vdev)) {
ivpu_err(vdev, "Recovery not available on FPGA\n");
return;
}
/* Schedule recovery if it's not in progress */
if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
ivpu_hw_irq_disable(vdev);
queue_work(system_long_wq, &pm->recovery_work);
}
}
int ivpu_pm_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
unsigned long timeout;
ivpu_dbg(vdev, PM, "Suspend..\n");
timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
while (!ivpu_hw_is_idle(vdev)) {
cond_resched();
if (time_after_eq(jiffies, timeout)) {
ivpu_err(vdev, "Failed to enter idle on system suspend\n");
return -EBUSY;
}
}
ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
pci_save_state(to_pci_dev(dev));
pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
ivpu_dbg(vdev, PM, "Suspend done.\n");
return 0;
}
int ivpu_pm_resume_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
ivpu_dbg(vdev, PM, "Resume..\n");
pci_set_power_state(to_pci_dev(dev), PCI_D0);
pci_restore_state(to_pci_dev(dev));
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
ivpu_dbg(vdev, PM, "Resume done.\n");
return ret;
}
int ivpu_pm_runtime_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
vdev->pm->suspend_reschedule_counter);
pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
vdev->pm->suspend_reschedule_counter--;
return -EAGAIN;
}
ret = ivpu_suspend(vdev);
if (ret)
ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
if (!vdev->pm->suspend_reschedule_counter) {
ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
}
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
return 0;
}
int ivpu_pm_runtime_resume_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
ivpu_dbg(vdev, PM, "Runtime resume..\n");
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
ivpu_dbg(vdev, PM, "Runtime resume done.\n");
return ret;
}
int ivpu_rpm_get(struct ivpu_device *vdev)
{
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
if (!drm_WARN_ON(&vdev->drm, ret < 0))
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
return ret;
}
void ivpu_rpm_put(struct ivpu_device *vdev)
{
pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
pm_runtime_get_sync(vdev->drm.dev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1);
ivpu_shutdown(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
atomic_set(&vdev->pm->in_reset, 0);
ivpu_dbg(vdev, PM, "Post-reset done.\n");
pm_runtime_put_autosuspend(vdev->drm.dev);
}
int ivpu_pm_init(struct ivpu_device *vdev)
{
struct device *dev = vdev->drm.dev;
struct ivpu_pm_info *pm = vdev->pm;
pm->vdev = vdev;
pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
atomic_set(&pm->in_reset, 0);
INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
pm_runtime_use_autosuspend(dev);
if (ivpu_disable_recovery)
pm_runtime_set_autosuspend_delay(dev, -1);
else if (ivpu_is_silicon(vdev))
pm_runtime_set_autosuspend_delay(dev, 100);
else
pm_runtime_set_autosuspend_delay(dev, 60000);
return 0;
}
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
{
cancel_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev)
{
struct device *dev = vdev->drm.dev;
pm_runtime_set_active(dev);
pm_runtime_allow(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
void ivpu_pm_disable(struct ivpu_device *vdev)
{
pm_runtime_get_noresume(vdev->drm.dev);
pm_runtime_forbid(vdev->drm.dev);
}
| linux-master | drivers/accel/ivpu/ivpu_pm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
static int bo_list_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_printer p = drm_seq_file_printer(s);
ivpu_bo_list(node->minor->dev, &p);
return 0;
}
static int fw_name_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%s\n", vdev->fw->name);
return 0;
}
static int fw_trace_capability_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
u64 trace_hw_component_mask;
u32 trace_destination_mask;
int ret;
ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask,
&trace_hw_component_mask);
if (!ret) {
seq_printf(s,
"trace_destination_mask: %#18x\n"
"trace_hw_component_mask: %#18llx\n",
trace_destination_mask, trace_hw_component_mask);
}
return 0;
}
static int fw_trace_config_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
/**
* WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet,
* so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config()
*/
u32 trace_level = vdev->fw->trace_level;
u32 trace_destination_mask = vdev->fw->trace_destination_mask;
u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
seq_printf(s,
"trace_level: %#18x\n"
"trace_destination_mask: %#18x\n"
"trace_hw_component_mask: %#18llx\n",
trace_level, trace_destination_mask, trace_hw_component_mask);
return 0;
}
static int last_bootmode_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot");
return 0;
}
static int reset_counter_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter));
return 0;
}
static int reset_pending_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
return 0;
}
static const struct drm_info_list vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
{"fw_trace_capability", fw_trace_capability_show, 0},
{"fw_trace_config", fw_trace_config_show, 0},
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
};
static int fw_log_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct drm_printer p = drm_seq_file_printer(s);
ivpu_fw_log_print(vdev, true, &p);
return 0;
}
static int fw_log_fops_open(struct inode *inode, struct file *file)
{
return single_open(file, fw_log_show, inode->i_private);
}
static ssize_t
fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct seq_file *s = file->private_data;
struct ivpu_device *vdev = s->private;
if (!size)
return -EINVAL;
ivpu_fw_log_clear(vdev);
return size;
}
static const struct file_operations fw_log_fops = {
.owner = THIS_MODULE,
.open = fw_log_fops_open,
.write = fw_log_fops_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static ssize_t
fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
struct ivpu_fw_info *fw = vdev->fw;
u32 trace_destination_mask;
int ret;
ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask);
if (ret < 0)
return ret;
fw->trace_destination_mask = trace_destination_mask;
ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask,
fw->trace_hw_component_mask);
return size;
}
static const struct file_operations fw_trace_destination_mask_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_trace_destination_mask_fops_write,
};
static ssize_t
fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
struct ivpu_fw_info *fw = vdev->fw;
u64 trace_hw_component_mask;
int ret;
ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask);
if (ret < 0)
return ret;
fw->trace_hw_component_mask = trace_hw_component_mask;
ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask,
trace_hw_component_mask);
return size;
}
static const struct file_operations fw_trace_hw_comp_mask_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_trace_hw_comp_mask_fops_write,
};
static ssize_t
fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
struct ivpu_fw_info *fw = vdev->fw;
u32 trace_level;
int ret;
ret = kstrtou32_from_user(user_buf, size, 0, &trace_level);
if (ret < 0)
return ret;
fw->trace_level = trace_level;
ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask,
fw->trace_hw_component_mask);
return size;
}
static const struct file_operations fw_trace_level_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_trace_level_fops_write,
};
static ssize_t
ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
if (!size)
return -EINVAL;
if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
return -ENODEV;
if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
return -ENODEV;
return size;
}
static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
if (!size)
return -EINVAL;
ivpu_pm_schedule_recovery(vdev);
return size;
}
static const struct file_operations ivpu_force_recovery_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = ivpu_force_recovery_fn,
};
static const struct file_operations ivpu_reset_engine_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = ivpu_reset_engine_fn,
};
void ivpu_debugfs_init(struct drm_minor *minor)
{
struct ivpu_device *vdev = to_ivpu_device(minor->dev);
drm_debugfs_create_files(vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list),
minor->debugfs_root, minor);
debugfs_create_file("force_recovery", 0200, minor->debugfs_root, vdev,
&ivpu_force_recovery_fops);
debugfs_create_file("fw_log", 0644, minor->debugfs_root, vdev,
&fw_log_fops);
debugfs_create_file("fw_trace_destination_mask", 0200, minor->debugfs_root, vdev,
&fw_trace_destination_mask_fops);
debugfs_create_file("fw_trace_hw_comp_mask", 0200, minor->debugfs_root, vdev,
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, minor->debugfs_root, vdev,
&fw_trace_level_fops);
debugfs_create_file("reset_engine", 0200, minor->debugfs_root, vdev,
&ivpu_reset_engine_fops);
}
| linux-master | drivers/accel/ivpu/ivpu_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
u64 jobq_base, u32 jobq_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
struct vpu_jsm_msg resp;
int ret = 0;
req.payload.register_db.db_idx = db_id;
req.payload.register_db.jobq_base = jobq_base;
req.payload.register_db.jobq_size = jobq_size;
req.payload.register_db.host_ssid = ctx_id;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
return ret;
}
ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
return 0;
}
int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
struct vpu_jsm_msg resp;
int ret = 0;
req.payload.unregister_db.db_idx = db_id;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
return ret;
}
ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
return 0;
}
int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
return -EINVAL;
req.payload.query_engine_hb.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret);
return ret;
}
*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
return ret;
}
int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
return -EINVAL;
req.payload.engine_reset.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret);
return ret;
}
int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
return -EINVAL;
req.payload.engine_preempt.engine_idx = engine;
req.payload.engine_preempt.preempt_id = preempt_id;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
return ret;
}
int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
struct vpu_jsm_msg resp;
int ret;
strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret);
return ret;
}
int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
u64 *trace_hw_component_mask)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
struct vpu_jsm_msg resp;
int ret;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret);
return ret;
}
*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
return ret;
}
int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
u64 trace_hw_component_mask)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
struct vpu_jsm_msg resp;
int ret;
req.payload.trace_config.trace_level = trace_level;
req.payload.trace_config.trace_destination_mask = trace_destination_mask;
req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn(vdev, "Failed to set config: %d\n", ret);
return ret;
}
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
struct vpu_jsm_msg resp;
req.payload.ssid_release.host_ssid = host_ssid;
return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
}
| linux-master | drivers/accel/ivpu/ivpu_jsm_msg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_hw_40xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_ipc.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
#include <linux/dmi.h>
#define TILE_MAX_NUM 6
#define TILE_MAX_MASK 0x3f
#define LNL_HW_ID 0x4040
#define SKU_TILE_SHIFT 0u
#define SKU_TILE_MASK 0x0000ffffu
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
#define PLL_CONFIG_DEFAULT 0x1
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
#define TIM_SAFE_ENABLE 0xf1d0dead
#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
#define TIMEOUT_US (150 * USEC_PER_MSEC)
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define WEIGHTS_DEFAULT 0xf711f711u
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
#define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR)))
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
switch (platform) {
case IVPU_PLATFORM_SILICON:
return "IVPU_PLATFORM_SILICON";
case IVPU_PLATFORM_SIMICS:
return "IVPU_PLATFORM_SIMICS";
case IVPU_PLATFORM_FPGA:
return "IVPU_PLATFORM_FPGA";
default:
return "Invalid platform";
}
}
static const struct dmi_system_id ivpu_dmi_platform_simulation[] = {
{
.ident = "Intel Simics",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
},
},
{
.ident = "Intel Simics",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "Simics"),
},
},
{ }
};
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
if (dmi_check_system(ivpu_dmi_platform_simulation))
vdev->platform = IVPU_PLATFORM_SIMICS;
else
vdev->platform = IVPU_PLATFORM_SILICON;
ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
ivpu_platform_to_str(vdev->platform), vdev->platform);
}
static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
vdev->wa.clear_runtime_mem = false;
if (ivpu_hw_gen(vdev) == IVPU_HW_40XX)
vdev->wa.disable_clock_relinquish = true;
}
static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
{
if (ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 100000;
vdev->timeout.jsm = 50000;
vdev->timeout.tdr = 2000000;
vdev->timeout.reschedule_suspend = 1000;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
vdev->timeout.reschedule_suspend = 10;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
}
}
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
{
return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
u16 target_ratio, u16 epp, u16 config, u16 cdyn)
{
int ret;
u32 val;
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret) {
ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
return ret;
}
val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val);
REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val);
REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD);
val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val);
REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val);
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret)
ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
return ret;
}
static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
{
return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio;
u32 fmin_fuse, fmax_fuse;
fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE);
fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE);
fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
{
u16 config = enable ? PLL_CONFIG_DEFAULT : 0;
u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0;
u16 epp = enable ? PLL_EPP_DEFAULT : 0;
struct ivpu_hw_info *hw = vdev->hw;
u16 target_ratio = hw->pll.pn_ratio;
int ret;
ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n",
PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn);
ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio,
target_ratio, epp, config, cdyn);
if (ret) {
ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
return ret;
}
if (enable) {
ret = ivpu_pll_wait_for_status_ready(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
return ret;
}
}
return 0;
}
static int ivpu_pll_enable(struct ivpu_device *vdev)
{
return ivpu_pll_drive(vdev, true);
}
static int ivpu_pll_disable(struct ivpu_device *vdev)
{
return ivpu_pll_drive(vdev, false);
}
static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
if (enable) {
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
}
REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
}
static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
if (enable) {
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
} else {
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
}
REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
}
static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
return -EIO;
return 0;
}
static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
}
static int ivpu_boot_host_ss_check(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_boot_noc_qreqn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check %d\n", ret);
return ret;
}
static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
return ret;
}
if (enable) {
REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
}
return ret;
}
static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
{
return ivpu_boot_host_ss_axi_drive(vdev, true);
}
static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
if (enable) {
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
} else {
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
}
REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
return ret;
}
static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
{
return ivpu_boot_host_ss_top_noc_drive(vdev, true);
}
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
if (enable)
ndelay(500);
}
static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
if (!enable)
ndelay(500);
}
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
{
if (ivpu_is_fpga(vdev))
return 0;
return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
else
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
}
static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
return -EIO;
return 0;
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
ivpu_boot_pwr_island_trickle_drive(vdev, true);
ivpu_boot_pwr_island_drive(vdev, true);
ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
if (ret) {
ivpu_err(vdev, "Timed out waiting for power island status\n");
return ret;
}
ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
if (ret) {
ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
return ret;
}
ivpu_boot_host_ss_clk_drive(vdev, true);
ivpu_boot_host_ss_rst_drive(vdev, true);
ivpu_boot_pwr_island_isolation_drive(vdev, false);
return ret;
}
static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
if (enable)
val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
else
val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
return ret;
}
ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0);
if (ret)
ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
return ret;
}
static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev)
{
return ivpu_boot_soc_cpu_drive(vdev, true);
}
static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
{
int ret;
u32 val;
u64 val64;
ret = ivpu_boot_soc_cpu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
return ret;
}
val64 = vdev->fw->entry_point;
val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
return 0;
}
static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL);
if (enable)
val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
else
val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val);
REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val);
ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
return 0;
}
static bool ivpu_tile_disable_check(u32 config)
{
/* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
if (config == 0)
return true;
if (config > BIT(TILE_MAX_NUM - 1))
return false;
if ((config & (config - 1)) == 0)
return true;
return false;
}
static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_disable;
u32 tile_enable;
u32 fuse;
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) {
ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
return -EIO;
}
tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse);
if (!ivpu_tile_disable_check(tile_disable)) {
ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable);
return -EIO;
}
if (tile_disable)
ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
TILE_MAX_NUM - 1, ffs(tile_disable) - 1);
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
tile_enable = (~tile_disable) & TILE_MAX_MASK;
hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
hw->tile_fuse = tile_disable;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
ivpu_pll_init_frequency_ratios(vdev);
ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
return 0;
}
static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
return ret;
}
val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET);
val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val);
ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
if (IVPU_WA(punit_disabled))
return 0;
ret = ivpu_boot_d0i3_drive(vdev, true);
if (ret)
ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
udelay(5); /* VPU requires 5 us to complete the transition */
return ret;
}
static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
if (IVPU_WA(punit_disabled))
return 0;
ret = ivpu_boot_d0i3_drive(vdev, false);
if (ret)
ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
return ret;
}
static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev)
{
u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
else
val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val);
REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
}
static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable");
}
static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev)
{
u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val);
}
static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_hw_40xx_reset(vdev);
if (ret) {
ivpu_err(vdev, "Failed to reset HW: %d\n", ret);
return ret;
}
ivpu_hw_read_platform(vdev);
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
ret = ivpu_hw_40xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
ret = ivpu_pll_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
return ret;
}
if (IVPU_WA(disable_clock_relinquish))
ivpu_hw_40xx_clock_relinquish_disable(vdev);
ivpu_hw_40xx_profiling_freq_reg_set(vdev);
ivpu_hw_40xx_ats_print(vdev);
ret = ivpu_boot_host_ss_check(vdev);
if (ret) {
ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
return ret;
}
ivpu_boot_idle_gen_drive(vdev, false);
ret = ivpu_boot_pwr_domain_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
return ret;
}
ret = ivpu_boot_host_ss_axi_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
return ret;
}
ret = ivpu_boot_host_ss_top_noc_enable(vdev);
if (ret)
ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
return ret;
}
static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev)
{
int ret;
ivpu_boot_no_snoop_enable(vdev);
ivpu_boot_tbu_mmu_enable(vdev);
ret = ivpu_boot_soc_cpu_boot(vdev);
if (ret)
ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
return ret;
}
static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS);
return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) &&
REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
ivpu_warn(vdev, "Failed to reset the VPU\n");
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
if (ivpu_hw_40xx_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
return ret;
}
static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
{
u32 val;
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
}
/* Register indirect accesses */
static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ);
pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK;
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
}
static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE);
}
static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
}
static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev)
{
REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
}
static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev)
{
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
}
static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
{
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
}
static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
/* TODO: For LNN hang consider engine reset instead of full recovery */
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_hw_wdt_disable(vdev);
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_pm_schedule_recovery(vdev);
}
/* Handler for IRQs from VPU core (irqV) */
static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
irqreturn_t ret = IRQ_NONE;
if (!status)
return IRQ_NONE;
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ret |= ivpu_ipc_irq_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
ivpu_hw_40xx_irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
ivpu_hw_40xx_irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
return ret;
}
/* Handler for IRQs from Buttress core (irqB) */
static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
bool schedule_recovery = false;
u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
if (status == 0)
return IRQ_NONE;
/* Disable global interrupt before handling local buttress interrupts */
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) {
ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) {
ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) {
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) {
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_err(vdev, "Survivability error detected\n");
schedule_recovery = true;
}
/* This must be done after interrupts are cleared at the source. */
REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
return IRQ_HANDLED;
}
static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
irqreturn_t ret = IRQ_NONE;
ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
if (ret & IRQ_WAKE_THREAD)
return IRQ_WAKE_THREAD;
return ret;
}
static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
{
u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
ivpu_err(vdev, "WDT MSS timeout detected\n");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
ivpu_err(vdev, "WDT NCE timeout detected\n");
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
ivpu_err(vdev, "NOC Firewall irq detected\n");
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) {
ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1),
REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2));
}
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb))
ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG));
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb))
ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG));
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb))
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW),
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH));
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb))
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW),
REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH));
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb))
ivpu_err(vdev, "Survivability error detected\n");
}
const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.info_init = ivpu_hw_40xx_info_init,
.power_up = ivpu_hw_40xx_power_up,
.is_idle = ivpu_hw_40xx_is_idle,
.power_down = ivpu_hw_40xx_power_down,
.boot_fw = ivpu_hw_40xx_boot_fw,
.wdt_disable = ivpu_hw_40xx_wdt_disable,
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
.reg_db_set = ivpu_hw_40xx_reg_db_set,
.reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get,
.reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get,
.reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set,
.irq_clear = ivpu_hw_40xx_irq_clear,
.irq_enable = ivpu_hw_40xx_irq_enable,
.irq_disable = ivpu_hw_40xx_irq_disable,
.irq_handler = ivpu_hw_40xx_irq_handler,
};
| linux-master | drivers/accel/ivpu/ivpu_hw_40xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include "vpu_boot_api.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#define IVPU_FW_LOG_LINE_LENGTH 256
unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
module_param(ivpu_log_level, uint, 0444);
MODULE_PARM_DESC(ivpu_log_level,
"VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
" fatal=" __stringify(IVPU_FW_LOG_FATAL));
static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
struct vpu_tracing_buffer_header **log_header)
{
struct vpu_tracing_buffer_header *log;
if ((*offset + sizeof(*log)) > bo->base.size)
return -EINVAL;
log = bo->kvaddr + *offset;
if (log->vpu_canary_start != VPU_TRACING_BUFFER_CANARY)
return -EINVAL;
if (log->header_size < sizeof(*log) || log->header_size > 1024) {
ivpu_dbg(vdev, FW_BOOT, "Invalid header size 0x%x\n", log->header_size);
return -EINVAL;
}
if ((char *)log + log->size > (char *)bo->kvaddr + bo->base.size) {
ivpu_dbg(vdev, FW_BOOT, "Invalid log size 0x%x\n", log->size);
return -EINVAL;
}
*log_header = log;
*offset += log->size;
ivpu_dbg(vdev, FW_BOOT,
"FW log name \"%s\", write offset 0x%x size 0x%x, wrap count %d, hdr version %d size %d format %d, alignment %d",
log->name, log->write_index, log->size, log->wrap_count, log->header_version,
log->header_size, log->format, log->alignment);
return 0;
}
static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
{
char line[IVPU_FW_LOG_LINE_LENGTH];
u32 index = 0;
if (!size || !buffer)
return;
while (size--) {
if (*buffer == '\n' || *buffer == 0) {
line[index] = 0;
if (index != 0)
drm_printf(p, "%s\n", line);
index = 0;
buffer++;
continue;
}
if (index == IVPU_FW_LOG_LINE_LENGTH - 1) {
line[index] = 0;
index = 0;
drm_printf(p, "%s\n", line);
}
if (*buffer != '\r' && (isprint(*buffer) || iscntrl(*buffer)))
line[index++] = *buffer;
buffer++;
}
line[index] = 0;
if (index != 0)
drm_printf(p, "%s\n", line);
}
static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
const char *prefix, bool only_new_msgs, struct drm_printer *p)
{
char *log_buffer = (void *)log + log->header_size;
u32 log_size = log->size - log->header_size;
u32 log_start = log->read_index;
u32 log_end = log->write_index;
if (!(log->write_index || log->wrap_count) ||
(log->write_index == log->read_index && only_new_msgs)) {
drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
return;
}
drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
if (log->write_index > log->read_index) {
buffer_print(log_buffer + log_start, log_end - log_start, p);
} else {
buffer_print(log_buffer + log_end, log_size - log_end, p);
buffer_print(log_buffer, log_end, p);
}
drm_printf(p, "\x1b[0m");
drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
}
void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
{
struct vpu_tracing_buffer_header *log_header;
u32 next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p);
next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p);
}
void ivpu_fw_log_clear(struct ivpu_device *vdev)
{
struct vpu_tracing_buffer_header *log_header;
u32 next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
log_header->read_index = log_header->write_index;
next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
log_header->read_index = log_header->write_index;
}
| linux-master | drivers/accel/ivpu/ivpu_fw_log.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "siox.h"
/*
* The lowest bit in the SIOX status word signals if the in-device watchdog is
* ok. If the bit is set, the device is functional.
*
* On writing the watchdog timer is reset when this bit toggles.
*/
#define SIOX_STATUS_WDG 0x01
/*
* Bits 1 to 3 of the status word read as the bitwise negation of what was
* clocked in before. The value clocked in is changed in each cycle and so
* allows to detect transmit/receive problems.
*/
#define SIOX_STATUS_COUNTER 0x0e
/*
* Each Siox-Device has a 4 bit type number that is neither 0 nor 15. This is
* available in the upper nibble of the read status.
*
* On write these bits are DC.
*/
#define SIOX_STATUS_TYPE 0xf0
#define CREATE_TRACE_POINTS
#include <trace/events/siox.h>
static bool siox_is_registered;
static void siox_master_lock(struct siox_master *smaster)
{
mutex_lock(&smaster->lock);
}
static void siox_master_unlock(struct siox_master *smaster)
{
mutex_unlock(&smaster->lock);
}
static inline u8 siox_status_clean(u8 status_read, u8 status_written)
{
/*
* bits 3:1 of status sample the respective bit in the status
* byte written in the previous cycle but inverted. So if you wrote the
* status word as 0xa before (counter = 0b101), it is expected to get
* back the counter bits as 0b010.
*
* So given the last status written this function toggles the there
* unset counter bits in the read value such that the counter bits in
* the return value are all zero iff the bits were read as expected to
* simplify error detection.
*/
return status_read ^ (~status_written & 0xe);
}
static bool siox_device_counter_error(struct siox_device *sdevice,
u8 status_clean)
{
return (status_clean & SIOX_STATUS_COUNTER) != 0;
}
static bool siox_device_type_error(struct siox_device *sdevice, u8 status_clean)
{
u8 statustype = (status_clean & SIOX_STATUS_TYPE) >> 4;
/*
* If the device knows which value the type bits should have, check
* against this value otherwise just rule out the invalid values 0b0000
* and 0b1111.
*/
if (sdevice->statustype) {
if (statustype != sdevice->statustype)
return true;
} else {
switch (statustype) {
case 0:
case 0xf:
return true;
}
}
return false;
}
static bool siox_device_wdg_error(struct siox_device *sdevice, u8 status_clean)
{
return (status_clean & SIOX_STATUS_WDG) == 0;
}
/*
* If there is a type or counter error the device is called "unsynced".
*/
bool siox_device_synced(struct siox_device *sdevice)
{
if (siox_device_type_error(sdevice, sdevice->status_read_clean))
return false;
return !siox_device_counter_error(sdevice, sdevice->status_read_clean);
}
EXPORT_SYMBOL_GPL(siox_device_synced);
/*
* A device is called "connected" if it is synced and the watchdog is not
* asserted.
*/
bool siox_device_connected(struct siox_device *sdevice)
{
if (!siox_device_synced(sdevice))
return false;
return !siox_device_wdg_error(sdevice, sdevice->status_read_clean);
}
EXPORT_SYMBOL_GPL(siox_device_connected);
static void siox_poll(struct siox_master *smaster)
{
struct siox_device *sdevice;
size_t i = smaster->setbuf_len;
unsigned int devno = 0;
int unsync_error = 0;
smaster->last_poll = jiffies;
/*
* The counter bits change in each second cycle, the watchdog bit
* toggles each time.
* The counter bits hold values from [0, 6]. 7 would be possible
* theoretically but the protocol designer considered that a bad idea
* for reasons unknown today. (Maybe that's because then the status read
* back has only zeros in the counter bits then which might be confused
* with a stuck-at-0 error. But for the same reason (with s/0/1/) 0
* could be skipped.)
*/
if (++smaster->status > 0x0d)
smaster->status = 0;
memset(smaster->buf, 0, smaster->setbuf_len);
/* prepare data pushed out to devices in buf[0..setbuf_len) */
list_for_each_entry(sdevice, &smaster->devices, node) {
struct siox_driver *sdriver =
to_siox_driver(sdevice->dev.driver);
sdevice->status_written = smaster->status;
i -= sdevice->inbytes;
/*
* If the device or a previous one is unsynced, don't pet the
* watchdog. This is done to ensure that the device is kept in
* reset when something is wrong.
*/
if (!siox_device_synced(sdevice))
unsync_error = 1;
if (sdriver && !unsync_error)
sdriver->set_data(sdevice, sdevice->status_written,
&smaster->buf[i + 1]);
else
/*
* Don't trigger watchdog if there is no driver or a
* sync problem
*/
sdevice->status_written &= ~SIOX_STATUS_WDG;
smaster->buf[i] = sdevice->status_written;
trace_siox_set_data(smaster, sdevice, devno, i);
devno++;
}
smaster->pushpull(smaster, smaster->setbuf_len, smaster->buf,
smaster->getbuf_len,
smaster->buf + smaster->setbuf_len);
unsync_error = 0;
/* interpret data pulled in from devices in buf[setbuf_len..] */
devno = 0;
i = smaster->setbuf_len;
list_for_each_entry(sdevice, &smaster->devices, node) {
struct siox_driver *sdriver =
to_siox_driver(sdevice->dev.driver);
u8 status = smaster->buf[i + sdevice->outbytes - 1];
u8 status_clean;
u8 prev_status_clean = sdevice->status_read_clean;
bool synced = true;
bool connected = true;
if (!siox_device_synced(sdevice))
unsync_error = 1;
/*
* If the watchdog bit wasn't toggled in this cycle, report the
* watchdog as active to give a consistent view for drivers and
* sysfs consumers.
*/
if (!sdriver || unsync_error)
status &= ~SIOX_STATUS_WDG;
status_clean =
siox_status_clean(status,
sdevice->status_written_lastcycle);
/* Check counter and type bits */
if (siox_device_counter_error(sdevice, status_clean) ||
siox_device_type_error(sdevice, status_clean)) {
bool prev_error;
synced = false;
/* only report a new error if the last cycle was ok */
prev_error =
siox_device_counter_error(sdevice,
prev_status_clean) ||
siox_device_type_error(sdevice,
prev_status_clean);
if (!prev_error) {
sdevice->status_errors++;
sysfs_notify_dirent(sdevice->status_errors_kn);
}
}
/* If the device is unsynced report the watchdog as active */
if (!synced) {
status &= ~SIOX_STATUS_WDG;
status_clean &= ~SIOX_STATUS_WDG;
}
if (siox_device_wdg_error(sdevice, status_clean))
connected = false;
/* The watchdog state changed just now */
if ((status_clean ^ prev_status_clean) & SIOX_STATUS_WDG) {
sysfs_notify_dirent(sdevice->watchdog_kn);
if (siox_device_wdg_error(sdevice, status_clean)) {
struct kernfs_node *wd_errs =
sdevice->watchdog_errors_kn;
sdevice->watchdog_errors++;
sysfs_notify_dirent(wd_errs);
}
}
if (connected != sdevice->connected)
sysfs_notify_dirent(sdevice->connected_kn);
sdevice->status_read_clean = status_clean;
sdevice->status_written_lastcycle = sdevice->status_written;
sdevice->connected = connected;
trace_siox_get_data(smaster, sdevice, devno, status_clean, i);
/* only give data read to driver if the device is connected */
if (sdriver && connected)
sdriver->get_data(sdevice, &smaster->buf[i]);
devno++;
i += sdevice->outbytes;
}
}
static int siox_poll_thread(void *data)
{
struct siox_master *smaster = data;
signed long timeout = 0;
get_device(&smaster->dev);
for (;;) {
if (kthread_should_stop()) {
put_device(&smaster->dev);
return 0;
}
siox_master_lock(smaster);
if (smaster->active) {
unsigned long next_poll =
smaster->last_poll + smaster->poll_interval;
if (time_is_before_eq_jiffies(next_poll))
siox_poll(smaster);
timeout = smaster->poll_interval -
(jiffies - smaster->last_poll);
} else {
timeout = MAX_SCHEDULE_TIMEOUT;
}
/*
* Set the task to idle while holding the lock. This makes sure
* that we don't sleep too long when the bus is reenabled before
* schedule_timeout is reached.
*/
if (timeout > 0)
set_current_state(TASK_IDLE);
siox_master_unlock(smaster);
if (timeout > 0)
schedule_timeout(timeout);
/*
* I'm not clear if/why it is important to set the state to
* RUNNING again, but it fixes a "do not call blocking ops when
* !TASK_RUNNING;"-warning.
*/
set_current_state(TASK_RUNNING);
}
}
static int __siox_start(struct siox_master *smaster)
{
if (!(smaster->setbuf_len + smaster->getbuf_len))
return -ENODEV;
if (!smaster->buf)
return -ENOMEM;
if (smaster->active)
return 0;
smaster->active = 1;
wake_up_process(smaster->poll_thread);
return 1;
}
static int siox_start(struct siox_master *smaster)
{
int ret;
siox_master_lock(smaster);
ret = __siox_start(smaster);
siox_master_unlock(smaster);
return ret;
}
static int __siox_stop(struct siox_master *smaster)
{
if (smaster->active) {
struct siox_device *sdevice;
smaster->active = 0;
list_for_each_entry(sdevice, &smaster->devices, node) {
if (sdevice->connected)
sysfs_notify_dirent(sdevice->connected_kn);
sdevice->connected = false;
}
return 1;
}
return 0;
}
static int siox_stop(struct siox_master *smaster)
{
int ret;
siox_master_lock(smaster);
ret = __siox_stop(smaster);
siox_master_unlock(smaster);
return ret;
}
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
return sprintf(buf, "%s\n", sdev->type);
}
static DEVICE_ATTR_RO(type);
static ssize_t inbytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
return sprintf(buf, "%zu\n", sdev->inbytes);
}
static DEVICE_ATTR_RO(inbytes);
static ssize_t outbytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
return sprintf(buf, "%zu\n", sdev->outbytes);
}
static DEVICE_ATTR_RO(outbytes);
static ssize_t status_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
unsigned int status_errors;
siox_master_lock(sdev->smaster);
status_errors = sdev->status_errors;
siox_master_unlock(sdev->smaster);
return sprintf(buf, "%u\n", status_errors);
}
static DEVICE_ATTR_RO(status_errors);
static ssize_t connected_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
bool connected;
siox_master_lock(sdev->smaster);
connected = sdev->connected;
siox_master_unlock(sdev->smaster);
return sprintf(buf, "%u\n", connected);
}
static DEVICE_ATTR_RO(connected);
static ssize_t watchdog_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
u8 status;
siox_master_lock(sdev->smaster);
status = sdev->status_read_clean;
siox_master_unlock(sdev->smaster);
return sprintf(buf, "%d\n", status & SIOX_STATUS_WDG);
}
static DEVICE_ATTR_RO(watchdog);
static ssize_t watchdog_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_device *sdev = to_siox_device(dev);
unsigned int watchdog_errors;
siox_master_lock(sdev->smaster);
watchdog_errors = sdev->watchdog_errors;
siox_master_unlock(sdev->smaster);
return sprintf(buf, "%u\n", watchdog_errors);
}
static DEVICE_ATTR_RO(watchdog_errors);
static struct attribute *siox_device_attrs[] = {
&dev_attr_type.attr,
&dev_attr_inbytes.attr,
&dev_attr_outbytes.attr,
&dev_attr_status_errors.attr,
&dev_attr_connected.attr,
&dev_attr_watchdog.attr,
&dev_attr_watchdog_errors.attr,
NULL
};
ATTRIBUTE_GROUPS(siox_device);
static void siox_device_release(struct device *dev)
{
struct siox_device *sdevice = to_siox_device(dev);
kfree(sdevice);
}
static struct device_type siox_device_type = {
.groups = siox_device_groups,
.release = siox_device_release,
};
static int siox_match(struct device *dev, struct device_driver *drv)
{
if (dev->type != &siox_device_type)
return 0;
/* up to now there is only a single driver so keeping this simple */
return 1;
}
static int siox_probe(struct device *dev)
{
struct siox_driver *sdriver = to_siox_driver(dev->driver);
struct siox_device *sdevice = to_siox_device(dev);
return sdriver->probe(sdevice);
}
static void siox_remove(struct device *dev)
{
struct siox_driver *sdriver =
container_of(dev->driver, struct siox_driver, driver);
struct siox_device *sdevice = to_siox_device(dev);
if (sdriver->remove)
sdriver->remove(sdevice);
}
static void siox_shutdown(struct device *dev)
{
struct siox_device *sdevice = to_siox_device(dev);
struct siox_driver *sdriver;
if (!dev->driver)
return;
sdriver = container_of(dev->driver, struct siox_driver, driver);
if (sdriver->shutdown)
sdriver->shutdown(sdevice);
}
static struct bus_type siox_bus_type = {
.name = "siox",
.match = siox_match,
.probe = siox_probe,
.remove = siox_remove,
.shutdown = siox_shutdown,
};
static ssize_t active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_master *smaster = to_siox_master(dev);
return sprintf(buf, "%d\n", smaster->active);
}
static ssize_t active_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct siox_master *smaster = to_siox_master(dev);
int ret;
int active;
ret = kstrtoint(buf, 0, &active);
if (ret < 0)
return ret;
if (active)
ret = siox_start(smaster);
else
ret = siox_stop(smaster);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(active);
static struct siox_device *siox_device_add(struct siox_master *smaster,
const char *type, size_t inbytes,
size_t outbytes, u8 statustype);
static ssize_t device_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct siox_master *smaster = to_siox_master(dev);
int ret;
char type[20] = "";
size_t inbytes = 0, outbytes = 0;
u8 statustype = 0;
ret = sscanf(buf, "%19s %zu %zu %hhu", type, &inbytes,
&outbytes, &statustype);
if (ret != 3 && ret != 4)
return -EINVAL;
if (strcmp(type, "siox-12x8") || inbytes != 2 || outbytes != 4)
return -EINVAL;
siox_device_add(smaster, "siox-12x8", inbytes, outbytes, statustype);
return count;
}
static DEVICE_ATTR_WO(device_add);
static void siox_device_remove(struct siox_master *smaster);
static ssize_t device_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct siox_master *smaster = to_siox_master(dev);
/* XXX? require to write <type> <inbytes> <outbytes> */
siox_device_remove(smaster);
return count;
}
static DEVICE_ATTR_WO(device_remove);
static ssize_t poll_interval_ns_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct siox_master *smaster = to_siox_master(dev);
return sprintf(buf, "%lld\n", jiffies_to_nsecs(smaster->poll_interval));
}
static ssize_t poll_interval_ns_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct siox_master *smaster = to_siox_master(dev);
int ret;
u64 val;
ret = kstrtou64(buf, 0, &val);
if (ret < 0)
return ret;
siox_master_lock(smaster);
smaster->poll_interval = nsecs_to_jiffies(val);
siox_master_unlock(smaster);
return count;
}
static DEVICE_ATTR_RW(poll_interval_ns);
static struct attribute *siox_master_attrs[] = {
&dev_attr_active.attr,
&dev_attr_device_add.attr,
&dev_attr_device_remove.attr,
&dev_attr_poll_interval_ns.attr,
NULL
};
ATTRIBUTE_GROUPS(siox_master);
static void siox_master_release(struct device *dev)
{
struct siox_master *smaster = to_siox_master(dev);
kfree(smaster);
}
static struct device_type siox_master_type = {
.groups = siox_master_groups,
.release = siox_master_release,
};
struct siox_master *siox_master_alloc(struct device *dev,
size_t size)
{
struct siox_master *smaster;
if (!dev)
return NULL;
smaster = kzalloc(sizeof(*smaster) + size, GFP_KERNEL);
if (!smaster)
return NULL;
device_initialize(&smaster->dev);
smaster->busno = -1;
smaster->dev.bus = &siox_bus_type;
smaster->dev.type = &siox_master_type;
smaster->dev.parent = dev;
smaster->poll_interval = DIV_ROUND_UP(HZ, 40);
dev_set_drvdata(&smaster->dev, &smaster[1]);
return smaster;
}
EXPORT_SYMBOL_GPL(siox_master_alloc);
int siox_master_register(struct siox_master *smaster)
{
int ret;
if (!siox_is_registered)
return -EPROBE_DEFER;
if (!smaster->pushpull)
return -EINVAL;
dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
mutex_init(&smaster->lock);
INIT_LIST_HEAD(&smaster->devices);
smaster->last_poll = jiffies;
smaster->poll_thread = kthread_run(siox_poll_thread, smaster,
"siox-%d", smaster->busno);
if (IS_ERR(smaster->poll_thread)) {
smaster->active = 0;
return PTR_ERR(smaster->poll_thread);
}
ret = device_add(&smaster->dev);
if (ret)
kthread_stop(smaster->poll_thread);
return ret;
}
EXPORT_SYMBOL_GPL(siox_master_register);
void siox_master_unregister(struct siox_master *smaster)
{
/* remove device */
device_del(&smaster->dev);
siox_master_lock(smaster);
__siox_stop(smaster);
while (smaster->num_devices) {
struct siox_device *sdevice;
sdevice = container_of(smaster->devices.prev,
struct siox_device, node);
list_del(&sdevice->node);
smaster->num_devices--;
siox_master_unlock(smaster);
device_unregister(&sdevice->dev);
siox_master_lock(smaster);
}
siox_master_unlock(smaster);
put_device(&smaster->dev);
}
EXPORT_SYMBOL_GPL(siox_master_unregister);
static struct siox_device *siox_device_add(struct siox_master *smaster,
const char *type, size_t inbytes,
size_t outbytes, u8 statustype)
{
struct siox_device *sdevice;
int ret;
size_t buf_len;
sdevice = kzalloc(sizeof(*sdevice), GFP_KERNEL);
if (!sdevice)
return ERR_PTR(-ENOMEM);
sdevice->type = type;
sdevice->inbytes = inbytes;
sdevice->outbytes = outbytes;
sdevice->statustype = statustype;
sdevice->smaster = smaster;
sdevice->dev.parent = &smaster->dev;
sdevice->dev.bus = &siox_bus_type;
sdevice->dev.type = &siox_device_type;
siox_master_lock(smaster);
dev_set_name(&sdevice->dev, "siox-%d-%d",
smaster->busno, smaster->num_devices);
buf_len = smaster->setbuf_len + inbytes +
smaster->getbuf_len + outbytes;
if (smaster->buf_len < buf_len) {
u8 *buf = krealloc(smaster->buf, buf_len, GFP_KERNEL);
if (!buf) {
dev_err(&smaster->dev,
"failed to realloc buffer to %zu\n", buf_len);
ret = -ENOMEM;
goto err_buf_alloc;
}
smaster->buf_len = buf_len;
smaster->buf = buf;
}
ret = device_register(&sdevice->dev);
if (ret) {
dev_err(&smaster->dev, "failed to register device: %d\n", ret);
goto err_device_register;
}
smaster->num_devices++;
list_add_tail(&sdevice->node, &smaster->devices);
smaster->setbuf_len += sdevice->inbytes;
smaster->getbuf_len += sdevice->outbytes;
sdevice->status_errors_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
"status_errors");
sdevice->watchdog_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
"watchdog");
sdevice->watchdog_errors_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
"watchdog_errors");
sdevice->connected_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
"connected");
siox_master_unlock(smaster);
return sdevice;
err_device_register:
/* don't care to make the buffer smaller again */
put_device(&sdevice->dev);
sdevice = NULL;
err_buf_alloc:
siox_master_unlock(smaster);
kfree(sdevice);
return ERR_PTR(ret);
}
static void siox_device_remove(struct siox_master *smaster)
{
struct siox_device *sdevice;
siox_master_lock(smaster);
if (!smaster->num_devices) {
siox_master_unlock(smaster);
return;
}
sdevice = container_of(smaster->devices.prev, struct siox_device, node);
list_del(&sdevice->node);
smaster->num_devices--;
smaster->setbuf_len -= sdevice->inbytes;
smaster->getbuf_len -= sdevice->outbytes;
if (!smaster->num_devices)
__siox_stop(smaster);
siox_master_unlock(smaster);
/*
* This must be done without holding the master lock because we're
* called from device_remove_store which also holds a sysfs mutex.
* device_unregister tries to aquire the same lock.
*/
device_unregister(&sdevice->dev);
}
int __siox_driver_register(struct siox_driver *sdriver, struct module *owner)
{
int ret;
if (unlikely(!siox_is_registered))
return -EPROBE_DEFER;
if (!sdriver->probe ||
(!sdriver->set_data && !sdriver->get_data)) {
pr_err("Driver %s doesn't provide needed callbacks\n",
sdriver->driver.name);
return -EINVAL;
}
sdriver->driver.owner = owner;
sdriver->driver.bus = &siox_bus_type;
ret = driver_register(&sdriver->driver);
if (ret)
pr_err("Failed to register siox driver %s (%d)\n",
sdriver->driver.name, ret);
return ret;
}
EXPORT_SYMBOL_GPL(__siox_driver_register);
static int __init siox_init(void)
{
int ret;
ret = bus_register(&siox_bus_type);
if (ret) {
pr_err("Registration of SIOX bus type failed: %d\n", ret);
return ret;
}
siox_is_registered = true;
return 0;
}
subsys_initcall(siox_init);
static void __exit siox_exit(void)
{
bus_unregister(&siox_bus_type);
}
module_exit(siox_exit);
MODULE_AUTHOR("Uwe Kleine-Koenig <[email protected]>");
MODULE_DESCRIPTION("Eckelmann SIOX driver core");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/siox/siox-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include "siox.h"
#define DRIVER_NAME "siox-gpio"
struct siox_gpio_ddata {
struct gpio_desc *din;
struct gpio_desc *dout;
struct gpio_desc *dclk;
struct gpio_desc *dld;
};
static unsigned int siox_clkhigh_ns = 1000;
static unsigned int siox_loadhigh_ns;
static unsigned int siox_bytegap_ns;
static int siox_gpio_pushpull(struct siox_master *smaster,
size_t setbuf_len, const u8 setbuf[],
size_t getbuf_len, u8 getbuf[])
{
struct siox_gpio_ddata *ddata = siox_master_get_devdata(smaster);
size_t i;
size_t cycles = max(setbuf_len, getbuf_len);
/* reset data and clock */
gpiod_set_value_cansleep(ddata->dout, 0);
gpiod_set_value_cansleep(ddata->dclk, 0);
gpiod_set_value_cansleep(ddata->dld, 1);
ndelay(siox_loadhigh_ns);
gpiod_set_value_cansleep(ddata->dld, 0);
for (i = 0; i < cycles; ++i) {
u8 set = 0, get = 0;
size_t j;
if (i >= cycles - setbuf_len)
set = setbuf[i - (cycles - setbuf_len)];
for (j = 0; j < 8; ++j) {
get <<= 1;
if (gpiod_get_value_cansleep(ddata->din))
get |= 1;
/* DOUT is logically inverted */
gpiod_set_value_cansleep(ddata->dout, !(set & 0x80));
set <<= 1;
gpiod_set_value_cansleep(ddata->dclk, 1);
ndelay(siox_clkhigh_ns);
gpiod_set_value_cansleep(ddata->dclk, 0);
}
if (i < getbuf_len)
getbuf[i] = get;
ndelay(siox_bytegap_ns);
}
gpiod_set_value_cansleep(ddata->dld, 1);
ndelay(siox_loadhigh_ns);
gpiod_set_value_cansleep(ddata->dld, 0);
/*
* Resetting dout isn't necessary protocol wise, but it makes the
* signals more pretty because the dout level is deterministic between
* cycles. Note that this only affects dout between the master and the
* first siox device. dout for the later devices depend on the output of
* the previous siox device.
*/
gpiod_set_value_cansleep(ddata->dout, 0);
return 0;
}
static int siox_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct siox_gpio_ddata *ddata;
int ret;
struct siox_master *smaster;
smaster = siox_master_alloc(&pdev->dev, sizeof(*ddata));
if (!smaster) {
dev_err(dev, "failed to allocate siox master\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, smaster);
ddata = siox_master_get_devdata(smaster);
ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
if (IS_ERR(ddata->din)) {
ret = dev_err_probe(dev, PTR_ERR(ddata->din),
"Failed to get din GPIO\n");
goto err;
}
ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
if (IS_ERR(ddata->dout)) {
ret = dev_err_probe(dev, PTR_ERR(ddata->dout),
"Failed to get dout GPIO\n");
goto err;
}
ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
if (IS_ERR(ddata->dclk)) {
ret = dev_err_probe(dev, PTR_ERR(ddata->dclk),
"Failed to get dclk GPIO\n");
goto err;
}
ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
if (IS_ERR(ddata->dld)) {
ret = dev_err_probe(dev, PTR_ERR(ddata->dld),
"Failed to get dld GPIO\n");
goto err;
}
smaster->pushpull = siox_gpio_pushpull;
/* XXX: determine automatically like spi does */
smaster->busno = 0;
ret = siox_master_register(smaster);
if (ret) {
dev_err_probe(dev, ret,
"Failed to register siox master\n");
err:
siox_master_put(smaster);
}
return ret;
}
static int siox_gpio_remove(struct platform_device *pdev)
{
struct siox_master *master = platform_get_drvdata(pdev);
siox_master_unregister(master);
return 0;
}
static const struct of_device_id siox_gpio_dt_ids[] = {
{ .compatible = "eckelmann,siox-gpio", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, siox_gpio_dt_ids);
static struct platform_driver siox_gpio_driver = {
.probe = siox_gpio_probe,
.remove = siox_gpio_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = siox_gpio_dt_ids,
},
};
module_platform_driver(siox_gpio_driver);
MODULE_AUTHOR("Uwe Kleine-Koenig <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/siox/siox-bus-gpio.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Driver for Amlogic Meson SPI flash controller (SPIFC)
//
// Copyright (C) 2014 Beniamino Galvani <[email protected]>
//
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
/* register map */
#define REG_CMD 0x00
#define REG_ADDR 0x04
#define REG_CTRL 0x08
#define REG_CTRL1 0x0c
#define REG_STATUS 0x10
#define REG_CTRL2 0x14
#define REG_CLOCK 0x18
#define REG_USER 0x1c
#define REG_USER1 0x20
#define REG_USER2 0x24
#define REG_USER3 0x28
#define REG_USER4 0x2c
#define REG_SLAVE 0x30
#define REG_SLAVE1 0x34
#define REG_SLAVE2 0x38
#define REG_SLAVE3 0x3c
#define REG_C0 0x40
#define REG_B8 0x60
#define REG_MAX 0x7c
/* register fields */
#define CMD_USER BIT(18)
#define CTRL_ENABLE_AHB BIT(17)
#define CLOCK_SOURCE BIT(31)
#define CLOCK_DIV_SHIFT 12
#define CLOCK_DIV_MASK (0x3f << CLOCK_DIV_SHIFT)
#define CLOCK_CNT_HIGH_SHIFT 6
#define CLOCK_CNT_HIGH_MASK (0x3f << CLOCK_CNT_HIGH_SHIFT)
#define CLOCK_CNT_LOW_SHIFT 0
#define CLOCK_CNT_LOW_MASK (0x3f << CLOCK_CNT_LOW_SHIFT)
#define USER_DIN_EN_MS BIT(0)
#define USER_CMP_MODE BIT(2)
#define USER_UC_DOUT_SEL BIT(27)
#define USER_UC_DIN_SEL BIT(28)
#define USER_UC_MASK ((BIT(5) - 1) << 27)
#define USER1_BN_UC_DOUT_SHIFT 17
#define USER1_BN_UC_DOUT_MASK (0xff << 16)
#define USER1_BN_UC_DIN_SHIFT 8
#define USER1_BN_UC_DIN_MASK (0xff << 8)
#define USER4_CS_ACT BIT(30)
#define SLAVE_TRST_DONE BIT(4)
#define SLAVE_OP_MODE BIT(30)
#define SLAVE_SW_RST BIT(31)
#define SPIFC_BUFFER_SIZE 64
/**
* struct meson_spifc
* @master: the SPI master
* @regmap: regmap for device registers
* @clk: input clock of the built-in baud rate generator
* @dev: the device structure
*/
struct meson_spifc {
struct spi_master *master;
struct regmap *regmap;
struct clk *clk;
struct device *dev;
};
static const struct regmap_config spifc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = REG_MAX,
};
/**
* meson_spifc_wait_ready() - wait for the current operation to terminate
* @spifc: the Meson SPI device
* Return: 0 on success, a negative value on error
*/
static int meson_spifc_wait_ready(struct meson_spifc *spifc)
{
unsigned long deadline = jiffies + msecs_to_jiffies(5);
u32 data;
do {
regmap_read(spifc->regmap, REG_SLAVE, &data);
if (data & SLAVE_TRST_DONE)
return 0;
cond_resched();
} while (!time_after(jiffies, deadline));
return -ETIMEDOUT;
}
/**
* meson_spifc_drain_buffer() - copy data from device buffer to memory
* @spifc: the Meson SPI device
* @buf: the destination buffer
* @len: number of bytes to copy
*/
static void meson_spifc_drain_buffer(struct meson_spifc *spifc, u8 *buf,
int len)
{
u32 data;
int i = 0;
while (i < len) {
regmap_read(spifc->regmap, REG_C0 + i, &data);
if (len - i >= 4) {
*((u32 *)buf) = data;
buf += 4;
} else {
memcpy(buf, &data, len - i);
break;
}
i += 4;
}
}
/**
* meson_spifc_fill_buffer() - copy data from memory to device buffer
* @spifc: the Meson SPI device
* @buf: the source buffer
* @len: number of bytes to copy
*/
static void meson_spifc_fill_buffer(struct meson_spifc *spifc, const u8 *buf,
int len)
{
u32 data;
int i = 0;
while (i < len) {
if (len - i >= 4)
data = *(u32 *)buf;
else
memcpy(&data, buf, len - i);
regmap_write(spifc->regmap, REG_C0 + i, data);
buf += 4;
i += 4;
}
}
/**
* meson_spifc_setup_speed() - program the clock divider
* @spifc: the Meson SPI device
* @speed: desired speed in Hz
*/
static void meson_spifc_setup_speed(struct meson_spifc *spifc, u32 speed)
{
unsigned long parent, value;
int n;
parent = clk_get_rate(spifc->clk);
n = max_t(int, parent / speed - 1, 1);
dev_dbg(spifc->dev, "parent %lu, speed %u, n %d\n", parent,
speed, n);
value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK;
value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK;
value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) &
CLOCK_CNT_HIGH_MASK;
regmap_write(spifc->regmap, REG_CLOCK, value);
}
/**
* meson_spifc_txrx() - transfer a chunk of data
* @spifc: the Meson SPI device
* @xfer: the current SPI transfer
* @offset: offset of the data to transfer
* @len: length of the data to transfer
* @last_xfer: whether this is the last transfer of the message
* @last_chunk: whether this is the last chunk of the transfer
* Return: 0 on success, a negative value on error
*/
static int meson_spifc_txrx(struct meson_spifc *spifc,
struct spi_transfer *xfer,
int offset, int len, bool last_xfer,
bool last_chunk)
{
bool keep_cs = true;
int ret;
if (xfer->tx_buf)
meson_spifc_fill_buffer(spifc, xfer->tx_buf + offset, len);
/* enable DOUT stage */
regmap_update_bits(spifc->regmap, REG_USER, USER_UC_MASK,
USER_UC_DOUT_SEL);
regmap_write(spifc->regmap, REG_USER1,
(8 * len - 1) << USER1_BN_UC_DOUT_SHIFT);
/* enable data input during DOUT */
regmap_update_bits(spifc->regmap, REG_USER, USER_DIN_EN_MS,
USER_DIN_EN_MS);
if (last_chunk) {
if (last_xfer)
keep_cs = xfer->cs_change;
else
keep_cs = !xfer->cs_change;
}
regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_ACT,
keep_cs ? USER4_CS_ACT : 0);
/* clear transition done bit */
regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_TRST_DONE, 0);
/* start transfer */
regmap_update_bits(spifc->regmap, REG_CMD, CMD_USER, CMD_USER);
ret = meson_spifc_wait_ready(spifc);
if (!ret && xfer->rx_buf)
meson_spifc_drain_buffer(spifc, xfer->rx_buf + offset, len);
return ret;
}
/**
* meson_spifc_transfer_one() - perform a single transfer
* @master: the SPI master
* @spi: the SPI device
* @xfer: the current SPI transfer
* Return: 0 on success, a negative value on error
*/
static int meson_spifc_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct meson_spifc *spifc = spi_master_get_devdata(master);
int len, done = 0, ret = 0;
meson_spifc_setup_speed(spifc, xfer->speed_hz);
regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB, 0);
while (done < xfer->len && !ret) {
len = min_t(int, xfer->len - done, SPIFC_BUFFER_SIZE);
ret = meson_spifc_txrx(spifc, xfer, done, len,
spi_transfer_is_last(master, xfer),
done + len >= xfer->len);
done += len;
}
regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB,
CTRL_ENABLE_AHB);
return ret;
}
/**
* meson_spifc_hw_init() - reset and initialize the SPI controller
* @spifc: the Meson SPI device
*/
static void meson_spifc_hw_init(struct meson_spifc *spifc)
{
/* reset device */
regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_SW_RST,
SLAVE_SW_RST);
/* disable compatible mode */
regmap_update_bits(spifc->regmap, REG_USER, USER_CMP_MODE, 0);
/* set master mode */
regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_OP_MODE, 0);
}
static int meson_spifc_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct meson_spifc *spifc;
void __iomem *base;
unsigned int rate;
int ret = 0;
master = spi_alloc_master(&pdev->dev, sizeof(struct meson_spifc));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
spifc = spi_master_get_devdata(master);
spifc->dev = &pdev->dev;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_err;
}
spifc->regmap = devm_regmap_init_mmio(spifc->dev, base,
&spifc_regmap_config);
if (IS_ERR(spifc->regmap)) {
ret = PTR_ERR(spifc->regmap);
goto out_err;
}
spifc->clk = devm_clk_get(spifc->dev, NULL);
if (IS_ERR(spifc->clk)) {
dev_err(spifc->dev, "missing clock\n");
ret = PTR_ERR(spifc->clk);
goto out_err;
}
ret = clk_prepare_enable(spifc->clk);
if (ret) {
dev_err(spifc->dev, "can't prepare clock\n");
goto out_err;
}
rate = clk_get_rate(spifc->clk);
master->num_chipselect = 1;
master->dev.of_node = pdev->dev.of_node;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->auto_runtime_pm = true;
master->transfer_one = meson_spifc_transfer_one;
master->min_speed_hz = rate >> 6;
master->max_speed_hz = rate >> 1;
meson_spifc_hw_init(spifc);
pm_runtime_set_active(spifc->dev);
pm_runtime_enable(spifc->dev);
ret = devm_spi_register_master(spifc->dev, master);
if (ret) {
dev_err(spifc->dev, "failed to register spi master\n");
goto out_clk;
}
return 0;
out_clk:
clk_disable_unprepare(spifc->clk);
pm_runtime_disable(spifc->dev);
out_err:
spi_master_put(master);
return ret;
}
static void meson_spifc_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct meson_spifc *spifc = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
clk_disable_unprepare(spifc->clk);
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int meson_spifc_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct meson_spifc *spifc = spi_master_get_devdata(master);
int ret;
ret = spi_master_suspend(master);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(spifc->clk);
return 0;
}
static int meson_spifc_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct meson_spifc *spifc = spi_master_get_devdata(master);
int ret;
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(spifc->clk);
if (ret)
return ret;
}
meson_spifc_hw_init(spifc);
ret = spi_master_resume(master);
if (ret)
clk_disable_unprepare(spifc->clk);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int meson_spifc_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct meson_spifc *spifc = spi_master_get_devdata(master);
clk_disable_unprepare(spifc->clk);
return 0;
}
static int meson_spifc_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct meson_spifc *spifc = spi_master_get_devdata(master);
return clk_prepare_enable(spifc->clk);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops meson_spifc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
SET_RUNTIME_PM_OPS(meson_spifc_runtime_suspend,
meson_spifc_runtime_resume,
NULL)
};
static const struct of_device_id meson_spifc_dt_match[] = {
{ .compatible = "amlogic,meson6-spifc", },
{ .compatible = "amlogic,meson-gxbb-spifc", },
{ },
};
MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
static struct platform_driver meson_spifc_driver = {
.probe = meson_spifc_probe,
.remove_new = meson_spifc_remove,
.driver = {
.name = "meson-spifc",
.of_match_table = of_match_ptr(meson_spifc_dt_match),
.pm = &meson_spifc_pm_ops,
},
};
module_platform_driver(meson_spifc_driver);
MODULE_AUTHOR("Beniamino Galvani <[email protected]>");
MODULE_DESCRIPTION("Amlogic Meson SPIFC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-meson-spifc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Memory-mapped interface driver for DW SPI Core
*
* Copyright (c) 2010, Octasic semiconductor.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include "spi-dw.h"
#define DRIVER_NAME "dw_spi_mmio"
struct dw_spi_mmio {
struct dw_spi dws;
struct clk *clk;
struct clk *pclk;
void *priv;
struct reset_control *rstc;
};
#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
#define OCELOT_IF_SI_OWNER_OFFSET 4
#define JAGUAR2_IF_SI_OWNER_OFFSET 6
#define MSCC_IF_SI_OWNER_MASK GENMASK(1, 0)
#define MSCC_IF_SI_OWNER_SISL 0
#define MSCC_IF_SI_OWNER_SIBM 1
#define MSCC_IF_SI_OWNER_SIMC 2
#define MSCC_SPI_MST_SW_MODE 0x14
#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
#define SPARX5_FORCE_ENA 0xa4
#define SPARX5_FORCE_VAL 0xa8
struct dw_spi_mscc {
struct regmap *syscon;
void __iomem *spi_mst; /* Not sparx5 */
};
/*
* Elba SoC does not use ssi, pin override is used for cs 0,1 and
* gpios for cs 2,3 as defined in the device tree.
*
* cs: | 1 0
* bit: |---3-------2-------1-------0
* | cs1 cs1_ovr cs0 cs0_ovr
*/
#define ELBA_SPICS_REG 0x2468
#define ELBA_SPICS_OFFSET(cs) ((cs) << 1)
#define ELBA_SPICS_MASK(cs) (GENMASK(1, 0) << ELBA_SPICS_OFFSET(cs))
#define ELBA_SPICS_SET(cs, val) \
((((val) << 1) | BIT(0)) << ELBA_SPICS_OFFSET(cs))
/*
* The Designware SPI controller (referred to as master in the documentation)
* automatically deasserts chip select when the tx fifo is empty. The chip
* selects then needs to be either driven as GPIOs or, for the first 4 using
* the SPI boot controller registers. the final chip select is an OR gate
* between the Designware SPI controller and the SPI boot controller.
*/
static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
u32 cs = spi_get_chipselect(spi, 0);
if (cs < 4) {
u32 sw_mode = MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE;
if (!enable)
sw_mode |= MSCC_SPI_MST_SW_MODE_SW_SPI_CS(BIT(cs));
writel(sw_mode, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
}
dw_spi_set_cs(spi, enable);
}
static int dw_spi_mscc_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio,
const char *cpu_syscon, u32 if_si_owner_offset)
{
struct dw_spi_mscc *dwsmscc;
dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
if (!dwsmscc)
return -ENOMEM;
dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwsmscc->spi_mst)) {
dev_err(&pdev->dev, "SPI_MST region map failed\n");
return PTR_ERR(dwsmscc->spi_mst);
}
dwsmscc->syscon = syscon_regmap_lookup_by_compatible(cpu_syscon);
if (IS_ERR(dwsmscc->syscon))
return PTR_ERR(dwsmscc->syscon);
/* Deassert all CS */
writel(0, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
/* Select the owner of the SI interface */
regmap_update_bits(dwsmscc->syscon, MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL,
MSCC_IF_SI_OWNER_MASK << if_si_owner_offset,
MSCC_IF_SI_OWNER_SIMC << if_si_owner_offset);
dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
dwsmmio->priv = dwsmscc;
return 0;
}
static int dw_spi_mscc_ocelot_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
return dw_spi_mscc_init(pdev, dwsmmio, "mscc,ocelot-cpu-syscon",
OCELOT_IF_SI_OWNER_OFFSET);
}
static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
return dw_spi_mscc_init(pdev, dwsmmio, "mscc,jaguar2-cpu-syscon",
JAGUAR2_IF_SI_OWNER_OFFSET);
}
/*
* The Designware SPI controller (referred to as master in the
* documentation) automatically deasserts chip select when the tx fifo
* is empty. The chip selects then needs to be driven by a CS override
* register. enable is an active low signal.
*/
static void dw_spi_sparx5_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
u8 cs = spi_get_chipselect(spi, 0);
if (!enable) {
/* CS override drive enable */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 1);
/* Now set CSx enabled */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~BIT(cs));
/* Allow settle */
usleep_range(1, 5);
} else {
/* CS value */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~0);
/* Allow settle */
usleep_range(1, 5);
/* CS override drive disable */
regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 0);
}
dw_spi_set_cs(spi, enable);
}
static int dw_spi_mscc_sparx5_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
const char *syscon_name = "microchip,sparx5-cpu-syscon";
struct device *dev = &pdev->dev;
struct dw_spi_mscc *dwsmscc;
if (!IS_ENABLED(CONFIG_SPI_MUX)) {
dev_err(dev, "This driver needs CONFIG_SPI_MUX\n");
return -EOPNOTSUPP;
}
dwsmscc = devm_kzalloc(dev, sizeof(*dwsmscc), GFP_KERNEL);
if (!dwsmscc)
return -ENOMEM;
dwsmscc->syscon =
syscon_regmap_lookup_by_compatible(syscon_name);
if (IS_ERR(dwsmscc->syscon)) {
dev_err(dev, "No syscon map %s\n", syscon_name);
return PTR_ERR(dwsmscc->syscon);
}
dwsmmio->dws.set_cs = dw_spi_sparx5_set_cs;
dwsmmio->priv = dwsmscc;
return 0;
}
static int dw_spi_alpine_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
dwsmmio->dws.caps = DW_SPI_CAP_CS_OVERRIDE;
return 0;
}
static int dw_spi_pssi_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0;
}
static int dw_spi_hssi_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
dwsmmio->dws.ip = DW_HSSI_ID;
dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0;
}
static int dw_spi_intel_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
dwsmmio->dws.ip = DW_HSSI_ID;
return 0;
}
/*
* DMA-based mem ops are not configured for this device and are not tested.
*/
static int dw_spi_mountevans_imc_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
/*
* The Intel Mount Evans SoC's Integrated Management Complex DW
* apb_ssi_v4.02a controller has an errata where a full TX FIFO can
* result in data corruption. The suggested workaround is to never
* completely fill the FIFO. The TX FIFO has a size of 32 so the
* fifo_len is set to 31.
*/
dwsmmio->dws.fifo_len = 31;
return 0;
}
static int dw_spi_canaan_k210_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
/*
* The Canaan Kendryte K210 SoC DW apb_ssi v4 spi controller is
* documented to have a 32 word deep TX and RX FIFO, which
* spi_hw_init() detects. However, when the RX FIFO is filled up to
* 32 entries (RXFLR = 32), an RX FIFO overrun error occurs. Avoid this
* problem by force setting fifo_len to 31.
*/
dwsmmio->dws.fifo_len = 31;
return 0;
}
static void dw_spi_elba_override_cs(struct regmap *syscon, int cs, int enable)
{
regmap_update_bits(syscon, ELBA_SPICS_REG, ELBA_SPICS_MASK(cs),
ELBA_SPICS_SET(cs, enable));
}
static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct regmap *syscon = dwsmmio->priv;
u8 cs;
cs = spi_get_chipselect(spi, 0);
if (cs < 2)
dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable);
/*
* The DW SPI controller needs a native CS bit selected to start
* the serial engine.
*/
spi_set_chipselect(spi, 0, 0);
dw_spi_set_cs(spi, enable);
spi_set_chipselect(spi, 0, cs);
}
static int dw_spi_elba_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
struct regmap *syscon;
syscon = syscon_regmap_lookup_by_phandle(dev_of_node(&pdev->dev),
"amd,pensando-elba-syscon");
if (IS_ERR(syscon))
return dev_err_probe(&pdev->dev, PTR_ERR(syscon),
"syscon regmap lookup failed\n");
dwsmmio->priv = syscon;
dwsmmio->dws.set_cs = dw_spi_elba_set_cs;
return 0;
}
static int dw_spi_mmio_probe(struct platform_device *pdev)
{
int (*init_func)(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio);
struct dw_spi_mmio *dwsmmio;
struct resource *mem;
struct dw_spi *dws;
int ret;
int num_cs;
dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio),
GFP_KERNEL);
if (!dwsmmio)
return -ENOMEM;
dws = &dwsmmio->dws;
/* Get basic io resource and map it */
dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(dws->regs))
return PTR_ERR(dws->regs);
dws->paddr = mem->start;
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0)
return dws->irq; /* -ENXIO */
dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dwsmmio->clk))
return PTR_ERR(dwsmmio->clk);
ret = clk_prepare_enable(dwsmmio->clk);
if (ret)
return ret;
/* Optional clock needed to access the registers */
dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
if (IS_ERR(dwsmmio->pclk)) {
ret = PTR_ERR(dwsmmio->pclk);
goto out_clk;
}
ret = clk_prepare_enable(dwsmmio->pclk);
if (ret)
goto out_clk;
/* find an optional reset controller */
dwsmmio->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, "spi");
if (IS_ERR(dwsmmio->rstc)) {
ret = PTR_ERR(dwsmmio->rstc);
goto out_clk;
}
reset_control_deassert(dwsmmio->rstc);
dws->bus_num = pdev->id;
dws->max_freq = clk_get_rate(dwsmmio->clk);
if (device_property_read_u32(&pdev->dev, "reg-io-width",
&dws->reg_io_width))
dws->reg_io_width = 4;
num_cs = 4;
device_property_read_u32(&pdev->dev, "num-cs", &num_cs);
dws->num_cs = num_cs;
init_func = device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func(pdev, dwsmmio);
if (ret)
goto out;
}
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto out;
platform_set_drvdata(pdev, dwsmmio);
return 0;
out:
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
out_clk:
clk_disable_unprepare(dwsmmio->clk);
reset_control_assert(dwsmmio->rstc);
return ret;
}
static void dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
dw_spi_remove_host(&dwsmmio->dws);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
reset_control_assert(dwsmmio->rstc);
}
static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "snps,dw-apb-ssi", .data = dw_spi_pssi_init},
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
{ .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
{ .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init},
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
{ .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
{
.compatible = "intel,mountevans-imc-ssi",
.data = dw_spi_mountevans_imc_init,
},
{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
{ .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
{ .compatible = "amd,pensando-elba-spi", .data = dw_spi_elba_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_spi_mmio_acpi_match[] = {
{"HISI0173", (kernel_ulong_t)dw_spi_pssi_init},
{},
};
MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
#endif
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
.remove_new = dw_spi_mmio_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = dw_spi_mmio_of_match,
.acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match),
},
};
module_platform_driver(dw_spi_mmio_driver);
MODULE_AUTHOR("Jean-Hugues Deschenes <[email protected]>");
MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(SPI_DW_CORE);
| linux-master | drivers/spi/spi-dw-mmio.c |
// SPDX-License-Identifier: GPL-2.0
//
// spi-mt7621.c -- MediaTek MT7621 SPI controller driver
//
// Copyright (C) 2011 Sergiy <[email protected]>
// Copyright (C) 2011-2013 Gabor Juhos <[email protected]>
// Copyright (C) 2014-2015 Felix Fietkau <[email protected]>
//
// Some parts are based on spi-orion.c:
// Author: Shadi Ammouri <[email protected]>
// Copyright (C) 2007-2008 Marvell Ltd.
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "spi-mt7621"
/* in usec */
#define RALINK_SPI_WAIT_MAX_LOOP 2000
/* SPISTAT register bit field */
#define SPISTAT_BUSY BIT(0)
#define MT7621_SPI_TRANS 0x00
#define SPITRANS_BUSY BIT(16)
#define MT7621_SPI_OPCODE 0x04
#define MT7621_SPI_DATA0 0x08
#define MT7621_SPI_DATA4 0x18
#define SPI_CTL_TX_RX_CNT_MASK 0xff
#define SPI_CTL_START BIT(8)
#define MT7621_SPI_MASTER 0x28
#define MASTER_MORE_BUFMODE BIT(2)
#define MASTER_FULL_DUPLEX BIT(10)
#define MASTER_RS_CLK_SEL GENMASK(27, 16)
#define MASTER_RS_CLK_SEL_SHIFT 16
#define MASTER_RS_SLAVE_SEL GENMASK(31, 29)
#define MT7621_SPI_MOREBUF 0x2c
#define MT7621_SPI_POLAR 0x38
#define MT7621_SPI_SPACE 0x3c
#define MT7621_CPHA BIT(5)
#define MT7621_CPOL BIT(4)
#define MT7621_LSB_FIRST BIT(3)
struct mt7621_spi {
struct spi_controller *master;
void __iomem *base;
unsigned int sys_freq;
unsigned int speed;
int pending_write;
};
static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
{
return spi_controller_get_devdata(spi->master);
}
static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg)
{
return ioread32(rs->base + reg);
}
static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
{
iowrite32(val, rs->base + reg);
}
static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
{
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
int cs = spi_get_chipselect(spi, 0);
u32 polar = 0;
u32 master;
/*
* Select SPI device 7, enable "more buffer mode" and disable
* full-duplex (only half-duplex really works on this chip
* reliably)
*/
master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
master |= MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE;
master &= ~MASTER_FULL_DUPLEX;
mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
rs->pending_write = 0;
if (enable)
polar = BIT(cs);
mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
}
static int mt7621_spi_prepare(struct spi_device *spi, unsigned int speed)
{
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
u32 rate;
u32 reg;
dev_dbg(&spi->dev, "speed:%u\n", speed);
rate = DIV_ROUND_UP(rs->sys_freq, speed);
dev_dbg(&spi->dev, "rate-1:%u\n", rate);
if (rate > 4097)
return -EINVAL;
if (rate < 2)
rate = 2;
reg = mt7621_spi_read(rs, MT7621_SPI_MASTER);
reg &= ~MASTER_RS_CLK_SEL;
reg |= (rate - 2) << MASTER_RS_CLK_SEL_SHIFT;
rs->speed = speed;
reg &= ~MT7621_LSB_FIRST;
if (spi->mode & SPI_LSB_FIRST)
reg |= MT7621_LSB_FIRST;
/*
* This SPI controller seems to be tested on SPI flash only and some
* bits are swizzled under other SPI modes probably due to incorrect
* wiring inside the silicon. Only mode 0 works correctly.
*/
reg &= ~(MT7621_CPHA | MT7621_CPOL);
mt7621_spi_write(rs, MT7621_SPI_MASTER, reg);
return 0;
}
static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
{
int i;
for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) {
u32 status;
status = mt7621_spi_read(rs, MT7621_SPI_TRANS);
if ((status & SPITRANS_BUSY) == 0)
return 0;
cpu_relax();
udelay(1);
}
return -ETIMEDOUT;
}
static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
int rx_len, u8 *buf)
{
int tx_len;
/*
* Combine with any pending write, and perform one or more half-duplex
* transactions reading 'len' bytes. Data to be written is already in
* MT7621_SPI_DATA.
*/
tx_len = rs->pending_write;
rs->pending_write = 0;
while (rx_len || tx_len) {
int i;
u32 val = (min(tx_len, 4) * 8) << 24;
int rx = min(rx_len, 32);
if (tx_len > 4)
val |= (tx_len - 4) * 8;
val |= (rx * 8) << 12;
mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val);
tx_len = 0;
val = mt7621_spi_read(rs, MT7621_SPI_TRANS);
val |= SPI_CTL_START;
mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
mt7621_spi_wait_till_ready(rs);
for (i = 0; i < rx; i++) {
if ((i % 4) == 0)
val = mt7621_spi_read(rs, MT7621_SPI_DATA0 + i);
*buf++ = val & 0xff;
val >>= 8;
}
rx_len -= i;
}
}
static inline void mt7621_spi_flush(struct mt7621_spi *rs)
{
mt7621_spi_read_half_duplex(rs, 0, NULL);
}
static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
int tx_len, const u8 *buf)
{
int len = rs->pending_write;
int val = 0;
if (len & 3) {
val = mt7621_spi_read(rs, MT7621_SPI_OPCODE + (len & ~3));
if (len < 4) {
val <<= (4 - len) * 8;
val = swab32(val);
}
}
while (tx_len > 0) {
if (len >= 36) {
rs->pending_write = len;
mt7621_spi_flush(rs);
len = 0;
}
val |= *buf++ << (8 * (len & 3));
len++;
if ((len & 3) == 0) {
if (len == 4)
/* The byte-order of the opcode is weird! */
val = swab32(val);
mt7621_spi_write(rs, MT7621_SPI_OPCODE + len - 4, val);
val = 0;
}
tx_len -= 1;
}
if (len & 3) {
if (len < 4) {
val = swab32(val);
val >>= (4 - len) * 8;
}
mt7621_spi_write(rs, MT7621_SPI_OPCODE + (len & ~3), val);
}
rs->pending_write = len;
}
static int mt7621_spi_transfer_one_message(struct spi_controller *master,
struct spi_message *m)
{
struct mt7621_spi *rs = spi_controller_get_devdata(master);
struct spi_device *spi = m->spi;
unsigned int speed = spi->max_speed_hz;
struct spi_transfer *t = NULL;
int status = 0;
mt7621_spi_wait_till_ready(rs);
list_for_each_entry(t, &m->transfers, transfer_list)
if (t->speed_hz < speed)
speed = t->speed_hz;
if (mt7621_spi_prepare(spi, speed)) {
status = -EIO;
goto msg_done;
}
/* Assert CS */
mt7621_spi_set_cs(spi, 1);
m->actual_length = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((t->rx_buf) && (t->tx_buf)) {
/*
* This controller will shift some extra data out
* of spi_opcode if (mosi_bit_cnt > 0) &&
* (cmd_bit_cnt == 0). So the claimed full-duplex
* support is broken since we have no way to read
* the MISO value during that bit.
*/
status = -EIO;
goto msg_done;
} else if (t->rx_buf) {
mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf);
} else if (t->tx_buf) {
mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf);
}
m->actual_length += t->len;
}
/* Flush data and deassert CS */
mt7621_spi_flush(rs);
mt7621_spi_set_cs(spi, 0);
msg_done:
m->status = status;
spi_finalize_current_message(master);
return 0;
}
static int mt7621_spi_setup(struct spi_device *spi)
{
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
if ((spi->max_speed_hz == 0) ||
(spi->max_speed_hz > (rs->sys_freq / 2)))
spi->max_speed_hz = rs->sys_freq / 2;
if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
dev_err(&spi->dev, "setup: requested speed is too low %d Hz\n",
spi->max_speed_hz);
return -EINVAL;
}
return 0;
}
static const struct of_device_id mt7621_spi_match[] = {
{ .compatible = "ralink,mt7621-spi" },
{},
};
MODULE_DEVICE_TABLE(of, mt7621_spi_match);
static int mt7621_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
struct clk *clk;
int ret;
match = of_match_device(mt7621_spi_match, &pdev->dev);
if (!match)
return -EINVAL;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&pdev->dev, PTR_ERR(clk),
"unable to get SYS clock\n");
master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs));
if (!master) {
dev_info(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
master->mode_bits = SPI_LSB_FIRST;
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = mt7621_spi_setup;
master->transfer_one_message = mt7621_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = 2;
dev_set_drvdata(&pdev->dev, master);
rs = spi_controller_get_devdata(master);
rs->base = base;
rs->master = master;
rs->sys_freq = clk_get_rate(clk);
rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "SPI reset failed!\n");
return ret;
}
return devm_spi_register_controller(&pdev->dev, master);
}
MODULE_ALIAS("platform:" DRIVER_NAME);
static struct platform_driver mt7621_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mt7621_spi_match,
},
.probe = mt7621_spi_probe,
};
module_platform_driver(mt7621_spi_driver);
MODULE_DESCRIPTION("MT7621 SPI driver");
MODULE_AUTHOR("Felix Fietkau <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-mt7621.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PCI interface driver for DW SPI Core
*
* Copyright (c) 2009, 2014 Intel Corporation.
*/
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include "spi-dw.h"
#define DRIVER_NAME "dw_spi_pci"
/* HW info for MRST Clk Control Unit, 32b reg per controller */
#define MRST_SPI_CLK_BASE 100000000 /* 100m */
#define MRST_CLK_SPI_REG 0xff11d86c
#define CLK_SPI_BDIV_OFFSET 0
#define CLK_SPI_BDIV_MASK 0x00000007
#define CLK_SPI_CDIV_OFFSET 9
#define CLK_SPI_CDIV_MASK 0x00000e00
#define CLK_SPI_DISABLE_OFFSET 8
struct dw_spi_pci_desc {
int (*setup)(struct dw_spi *);
u16 num_cs;
u16 bus_num;
u32 max_freq;
};
static int dw_spi_pci_mid_init(struct dw_spi *dws)
{
void __iomem *clk_reg;
u32 clk_cdiv;
clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
if (!clk_reg)
return -ENOMEM;
/* Get SPI controller operating freq info */
clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
clk_cdiv &= CLK_SPI_CDIV_MASK;
clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
iounmap(clk_reg);
dw_spi_dma_setup_mfld(dws);
return 0;
}
static int dw_spi_pci_generic_init(struct dw_spi *dws)
{
dw_spi_dma_setup_generic(dws);
return 0;
}
static struct dw_spi_pci_desc dw_spi_pci_mid_desc_1 = {
.setup = dw_spi_pci_mid_init,
.num_cs = 5,
.bus_num = 0,
};
static struct dw_spi_pci_desc dw_spi_pci_mid_desc_2 = {
.setup = dw_spi_pci_mid_init,
.num_cs = 2,
.bus_num = 1,
};
static struct dw_spi_pci_desc dw_spi_pci_ehl_desc = {
.setup = dw_spi_pci_generic_init,
.num_cs = 2,
.bus_num = -1,
.max_freq = 100000000,
};
static int dw_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct dw_spi_pci_desc *desc = (struct dw_spi_pci_desc *)ent->driver_data;
struct dw_spi *dws;
int pci_bar = 0;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
dws = devm_kzalloc(&pdev->dev, sizeof(*dws), GFP_KERNEL);
if (!dws)
return -ENOMEM;
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
pci_set_master(pdev);
ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev));
if (ret)
return ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
dws->regs = pcim_iomap_table(pdev)[pci_bar];
dws->irq = pci_irq_vector(pdev, 0);
/*
* Specific handling for platforms, like dma setup,
* clock rate, FIFO depth.
*/
if (desc) {
dws->num_cs = desc->num_cs;
dws->bus_num = desc->bus_num;
dws->max_freq = desc->max_freq;
if (desc->setup) {
ret = desc->setup(dws);
if (ret)
goto err_free_irq_vectors;
}
} else {
ret = -ENODEV;
goto err_free_irq_vectors;
}
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto err_free_irq_vectors;
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dws);
dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
err_free_irq_vectors:
pci_free_irq_vectors(pdev);
return ret;
}
static void dw_spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi *dws = pci_get_drvdata(pdev);
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
dw_spi_remove_host(dws);
pci_free_irq_vectors(pdev);
}
#ifdef CONFIG_PM_SLEEP
static int dw_spi_pci_suspend(struct device *dev)
{
struct dw_spi *dws = dev_get_drvdata(dev);
return dw_spi_suspend_host(dws);
}
static int dw_spi_pci_resume(struct device *dev)
{
struct dw_spi *dws = dev_get_drvdata(dev);
return dw_spi_resume_host(dws);
}
#endif
static SIMPLE_DEV_PM_OPS(dw_spi_pci_pm_ops, dw_spi_pci_suspend, dw_spi_pci_resume);
static const struct pci_device_id dw_spi_pci_ids[] = {
/* Intel MID platform SPI controller 0 */
/*
* The access to the device 8086:0801 is disabled by HW, since it's
* exclusively used by SCU to communicate with MSIC.
*/
/* Intel MID platform SPI controller 1 */
{ PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&dw_spi_pci_mid_desc_1},
/* Intel MID platform SPI controller 2 */
{ PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&dw_spi_pci_mid_desc_2},
/* Intel Elkhart Lake PSE SPI controllers */
{ PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
{ PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
{ PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
{ PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&dw_spi_pci_ehl_desc},
{},
};
MODULE_DEVICE_TABLE(pci, dw_spi_pci_ids);
static struct pci_driver dw_spi_pci_driver = {
.name = DRIVER_NAME,
.id_table = dw_spi_pci_ids,
.probe = dw_spi_pci_probe,
.remove = dw_spi_pci_remove,
.driver = {
.pm = &dw_spi_pci_pm_ops,
},
};
module_pci_driver(dw_spi_pci_driver);
MODULE_AUTHOR("Feng Tang <[email protected]>");
MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(SPI_DW_CORE);
| linux-master | drivers/spi/spi-dw-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Amlogic A1 SPI flash controller (SPIFC)
*
* Copyright (c) 2023, SberDevices. All Rights Reserved.
*
* Author: Martin Kurbanov <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/types.h>
#define SPIFC_A1_AHB_CTRL_REG 0x0
#define SPIFC_A1_AHB_BUS_EN BIT(31)
#define SPIFC_A1_USER_CTRL0_REG 0x200
#define SPIFC_A1_USER_REQUEST_ENABLE BIT(31)
#define SPIFC_A1_USER_REQUEST_FINISH BIT(30)
#define SPIFC_A1_USER_DATA_UPDATED BIT(0)
#define SPIFC_A1_USER_CTRL1_REG 0x204
#define SPIFC_A1_USER_CMD_ENABLE BIT(30)
#define SPIFC_A1_USER_CMD_MODE GENMASK(29, 28)
#define SPIFC_A1_USER_CMD_CODE GENMASK(27, 20)
#define SPIFC_A1_USER_ADDR_ENABLE BIT(19)
#define SPIFC_A1_USER_ADDR_MODE GENMASK(18, 17)
#define SPIFC_A1_USER_ADDR_BYTES GENMASK(16, 15)
#define SPIFC_A1_USER_DOUT_ENABLE BIT(14)
#define SPIFC_A1_USER_DOUT_MODE GENMASK(11, 10)
#define SPIFC_A1_USER_DOUT_BYTES GENMASK(9, 0)
#define SPIFC_A1_USER_CTRL2_REG 0x208
#define SPIFC_A1_USER_DUMMY_ENABLE BIT(31)
#define SPIFC_A1_USER_DUMMY_MODE GENMASK(30, 29)
#define SPIFC_A1_USER_DUMMY_CLK_SYCLES GENMASK(28, 23)
#define SPIFC_A1_USER_CTRL3_REG 0x20c
#define SPIFC_A1_USER_DIN_ENABLE BIT(31)
#define SPIFC_A1_USER_DIN_MODE GENMASK(28, 27)
#define SPIFC_A1_USER_DIN_BYTES GENMASK(25, 16)
#define SPIFC_A1_USER_ADDR_REG 0x210
#define SPIFC_A1_AHB_REQ_CTRL_REG 0x214
#define SPIFC_A1_AHB_REQ_ENABLE BIT(31)
#define SPIFC_A1_ACTIMING0_REG (0x0088 << 2)
#define SPIFC_A1_TSLCH GENMASK(31, 30)
#define SPIFC_A1_TCLSH GENMASK(29, 28)
#define SPIFC_A1_TSHWL GENMASK(20, 16)
#define SPIFC_A1_TSHSL2 GENMASK(15, 12)
#define SPIFC_A1_TSHSL1 GENMASK(11, 8)
#define SPIFC_A1_TWHSL GENMASK(7, 0)
#define SPIFC_A1_DBUF_CTRL_REG 0x240
#define SPIFC_A1_DBUF_DIR BIT(31)
#define SPIFC_A1_DBUF_AUTO_UPDATE_ADDR BIT(30)
#define SPIFC_A1_DBUF_ADDR GENMASK(7, 0)
#define SPIFC_A1_DBUF_DATA_REG 0x244
#define SPIFC_A1_USER_DBUF_ADDR_REG 0x248
#define SPIFC_A1_BUFFER_SIZE 512U
#define SPIFC_A1_MAX_HZ 200000000
#define SPIFC_A1_MIN_HZ 1000000
#define SPIFC_A1_USER_CMD(op) ( \
SPIFC_A1_USER_CMD_ENABLE | \
FIELD_PREP(SPIFC_A1_USER_CMD_CODE, (op)->cmd.opcode) | \
FIELD_PREP(SPIFC_A1_USER_CMD_MODE, ilog2((op)->cmd.buswidth)))
#define SPIFC_A1_USER_ADDR(op) ( \
SPIFC_A1_USER_ADDR_ENABLE | \
FIELD_PREP(SPIFC_A1_USER_ADDR_MODE, ilog2((op)->addr.buswidth)) | \
FIELD_PREP(SPIFC_A1_USER_ADDR_BYTES, (op)->addr.nbytes - 1))
#define SPIFC_A1_USER_DUMMY(op) ( \
SPIFC_A1_USER_DUMMY_ENABLE | \
FIELD_PREP(SPIFC_A1_USER_DUMMY_MODE, ilog2((op)->dummy.buswidth)) | \
FIELD_PREP(SPIFC_A1_USER_DUMMY_CLK_SYCLES, (op)->dummy.nbytes << 3))
#define SPIFC_A1_TSLCH_VAL FIELD_PREP(SPIFC_A1_TSLCH, 1)
#define SPIFC_A1_TCLSH_VAL FIELD_PREP(SPIFC_A1_TCLSH, 1)
#define SPIFC_A1_TSHWL_VAL FIELD_PREP(SPIFC_A1_TSHWL, 7)
#define SPIFC_A1_TSHSL2_VAL FIELD_PREP(SPIFC_A1_TSHSL2, 7)
#define SPIFC_A1_TSHSL1_VAL FIELD_PREP(SPIFC_A1_TSHSL1, 7)
#define SPIFC_A1_TWHSL_VAL FIELD_PREP(SPIFC_A1_TWHSL, 2)
#define SPIFC_A1_ACTIMING0_VAL (SPIFC_A1_TSLCH_VAL | SPIFC_A1_TCLSH_VAL | \
SPIFC_A1_TSHWL_VAL | SPIFC_A1_TSHSL2_VAL | \
SPIFC_A1_TSHSL1_VAL | SPIFC_A1_TWHSL_VAL)
struct amlogic_spifc_a1 {
struct spi_controller *ctrl;
struct clk *clk;
struct device *dev;
void __iomem *base;
u32 curr_speed_hz;
};
static int amlogic_spifc_a1_request(struct amlogic_spifc_a1 *spifc, bool read)
{
u32 mask = SPIFC_A1_USER_REQUEST_FINISH |
(read ? SPIFC_A1_USER_DATA_UPDATED : 0);
u32 val;
writel(SPIFC_A1_USER_REQUEST_ENABLE,
spifc->base + SPIFC_A1_USER_CTRL0_REG);
return readl_poll_timeout(spifc->base + SPIFC_A1_USER_CTRL0_REG,
val, (val & mask) == mask, 0,
200 * USEC_PER_MSEC);
}
static void amlogic_spifc_a1_drain_buffer(struct amlogic_spifc_a1 *spifc,
char *buf, u32 len)
{
u32 data;
const u32 count = len / sizeof(data);
const u32 pad = len % sizeof(data);
writel(SPIFC_A1_DBUF_AUTO_UPDATE_ADDR,
spifc->base + SPIFC_A1_DBUF_CTRL_REG);
ioread32_rep(spifc->base + SPIFC_A1_DBUF_DATA_REG, buf, count);
if (pad) {
data = readl(spifc->base + SPIFC_A1_DBUF_DATA_REG);
memcpy(buf + len - pad, &data, pad);
}
}
static void amlogic_spifc_a1_fill_buffer(struct amlogic_spifc_a1 *spifc,
const char *buf, u32 len)
{
u32 data;
const u32 count = len / sizeof(data);
const u32 pad = len % sizeof(data);
writel(SPIFC_A1_DBUF_DIR | SPIFC_A1_DBUF_AUTO_UPDATE_ADDR,
spifc->base + SPIFC_A1_DBUF_CTRL_REG);
iowrite32_rep(spifc->base + SPIFC_A1_DBUF_DATA_REG, buf, count);
if (pad) {
memcpy(&data, buf + len - pad, pad);
writel(data, spifc->base + SPIFC_A1_DBUF_DATA_REG);
}
}
static void amlogic_spifc_a1_user_init(struct amlogic_spifc_a1 *spifc)
{
writel(0, spifc->base + SPIFC_A1_USER_CTRL0_REG);
writel(0, spifc->base + SPIFC_A1_USER_CTRL1_REG);
writel(0, spifc->base + SPIFC_A1_USER_CTRL2_REG);
writel(0, spifc->base + SPIFC_A1_USER_CTRL3_REG);
}
static void amlogic_spifc_a1_set_cmd(struct amlogic_spifc_a1 *spifc,
u32 cmd_cfg)
{
u32 val;
val = readl(spifc->base + SPIFC_A1_USER_CTRL1_REG);
val &= ~(SPIFC_A1_USER_CMD_MODE | SPIFC_A1_USER_CMD_CODE);
val |= cmd_cfg;
writel(val, spifc->base + SPIFC_A1_USER_CTRL1_REG);
}
static void amlogic_spifc_a1_set_addr(struct amlogic_spifc_a1 *spifc, u32 addr,
u32 addr_cfg)
{
u32 val;
writel(addr, spifc->base + SPIFC_A1_USER_ADDR_REG);
val = readl(spifc->base + SPIFC_A1_USER_CTRL1_REG);
val &= ~(SPIFC_A1_USER_ADDR_MODE | SPIFC_A1_USER_ADDR_BYTES);
val |= addr_cfg;
writel(val, spifc->base + SPIFC_A1_USER_CTRL1_REG);
}
static void amlogic_spifc_a1_set_dummy(struct amlogic_spifc_a1 *spifc,
u32 dummy_cfg)
{
u32 val = readl(spifc->base + SPIFC_A1_USER_CTRL2_REG);
val &= ~(SPIFC_A1_USER_DUMMY_MODE | SPIFC_A1_USER_DUMMY_CLK_SYCLES);
val |= dummy_cfg;
writel(val, spifc->base + SPIFC_A1_USER_CTRL2_REG);
}
static int amlogic_spifc_a1_read(struct amlogic_spifc_a1 *spifc, void *buf,
u32 size, u32 mode)
{
u32 val = readl(spifc->base + SPIFC_A1_USER_CTRL3_REG);
int ret;
val &= ~(SPIFC_A1_USER_DIN_MODE | SPIFC_A1_USER_DIN_BYTES);
val |= SPIFC_A1_USER_DIN_ENABLE;
val |= FIELD_PREP(SPIFC_A1_USER_DIN_MODE, mode);
val |= FIELD_PREP(SPIFC_A1_USER_DIN_BYTES, size);
writel(val, spifc->base + SPIFC_A1_USER_CTRL3_REG);
ret = amlogic_spifc_a1_request(spifc, true);
if (!ret)
amlogic_spifc_a1_drain_buffer(spifc, buf, size);
return ret;
}
static int amlogic_spifc_a1_write(struct amlogic_spifc_a1 *spifc,
const void *buf, u32 size, u32 mode)
{
u32 val;
amlogic_spifc_a1_fill_buffer(spifc, buf, size);
val = readl(spifc->base + SPIFC_A1_USER_CTRL1_REG);
val &= ~(SPIFC_A1_USER_DOUT_MODE | SPIFC_A1_USER_DOUT_BYTES);
val |= FIELD_PREP(SPIFC_A1_USER_DOUT_MODE, mode);
val |= FIELD_PREP(SPIFC_A1_USER_DOUT_BYTES, size);
val |= SPIFC_A1_USER_DOUT_ENABLE;
writel(val, spifc->base + SPIFC_A1_USER_CTRL1_REG);
return amlogic_spifc_a1_request(spifc, false);
}
static int amlogic_spifc_a1_set_freq(struct amlogic_spifc_a1 *spifc, u32 freq)
{
int ret;
if (freq == spifc->curr_speed_hz)
return 0;
ret = clk_set_rate(spifc->clk, freq);
if (ret)
return ret;
spifc->curr_speed_hz = freq;
return 0;
}
static int amlogic_spifc_a1_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct amlogic_spifc_a1 *spifc =
spi_controller_get_devdata(mem->spi->controller);
size_t data_size = op->data.nbytes;
int ret;
ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz);
if (ret)
return ret;
amlogic_spifc_a1_user_init(spifc);
amlogic_spifc_a1_set_cmd(spifc, SPIFC_A1_USER_CMD(op));
if (op->addr.nbytes)
amlogic_spifc_a1_set_addr(spifc, op->addr.val,
SPIFC_A1_USER_ADDR(op));
if (op->dummy.nbytes)
amlogic_spifc_a1_set_dummy(spifc, SPIFC_A1_USER_DUMMY(op));
if (data_size) {
u32 mode = ilog2(op->data.buswidth);
writel(0, spifc->base + SPIFC_A1_USER_DBUF_ADDR_REG);
if (op->data.dir == SPI_MEM_DATA_IN)
ret = amlogic_spifc_a1_read(spifc, op->data.buf.in,
data_size, mode);
else
ret = amlogic_spifc_a1_write(spifc, op->data.buf.out,
data_size, mode);
} else {
ret = amlogic_spifc_a1_request(spifc, false);
}
return ret;
}
static int amlogic_spifc_a1_adjust_op_size(struct spi_mem *mem,
struct spi_mem_op *op)
{
op->data.nbytes = min(op->data.nbytes, SPIFC_A1_BUFFER_SIZE);
return 0;
}
static void amlogic_spifc_a1_hw_init(struct amlogic_spifc_a1 *spifc)
{
u32 regv;
regv = readl(spifc->base + SPIFC_A1_AHB_REQ_CTRL_REG);
regv &= ~(SPIFC_A1_AHB_REQ_ENABLE);
writel(regv, spifc->base + SPIFC_A1_AHB_REQ_CTRL_REG);
regv = readl(spifc->base + SPIFC_A1_AHB_CTRL_REG);
regv &= ~(SPIFC_A1_AHB_BUS_EN);
writel(regv, spifc->base + SPIFC_A1_AHB_CTRL_REG);
writel(SPIFC_A1_ACTIMING0_VAL, spifc->base + SPIFC_A1_ACTIMING0_REG);
writel(0, spifc->base + SPIFC_A1_USER_DBUF_ADDR_REG);
}
static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = {
.exec_op = amlogic_spifc_a1_exec_op,
.adjust_op_size = amlogic_spifc_a1_adjust_op_size,
};
static int amlogic_spifc_a1_probe(struct platform_device *pdev)
{
struct spi_controller *ctrl;
struct amlogic_spifc_a1 *spifc;
int ret;
ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifc));
if (!ctrl)
return -ENOMEM;
spifc = spi_controller_get_devdata(ctrl);
platform_set_drvdata(pdev, spifc);
spifc->dev = &pdev->dev;
spifc->ctrl = ctrl;
spifc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spifc->base))
return PTR_ERR(spifc->base);
spifc->clk = devm_clk_get_enabled(spifc->dev, NULL);
if (IS_ERR(spifc->clk))
return dev_err_probe(spifc->dev, PTR_ERR(spifc->clk),
"unable to get clock\n");
amlogic_spifc_a1_hw_init(spifc);
pm_runtime_set_autosuspend_delay(spifc->dev, 500);
pm_runtime_use_autosuspend(spifc->dev);
devm_pm_runtime_enable(spifc->dev);
ctrl->num_chipselect = 1;
ctrl->dev.of_node = pdev->dev.of_node;
ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
ctrl->auto_runtime_pm = true;
ctrl->mem_ops = &amlogic_spifc_a1_mem_ops;
ctrl->min_speed_hz = SPIFC_A1_MIN_HZ;
ctrl->max_speed_hz = SPIFC_A1_MAX_HZ;
ctrl->mode_bits = (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
ret = devm_spi_register_controller(spifc->dev, ctrl);
if (ret)
return dev_err_probe(spifc->dev, ret,
"failed to register spi controller\n");
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int amlogic_spifc_a1_suspend(struct device *dev)
{
struct amlogic_spifc_a1 *spifc = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(spifc->ctrl);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(spifc->clk);
return 0;
}
static int amlogic_spifc_a1_resume(struct device *dev)
{
struct amlogic_spifc_a1 *spifc = dev_get_drvdata(dev);
int ret = 0;
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(spifc->clk);
if (ret)
return ret;
}
amlogic_spifc_a1_hw_init(spifc);
ret = spi_controller_resume(spifc->ctrl);
if (ret)
clk_disable_unprepare(spifc->clk);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int amlogic_spifc_a1_runtime_suspend(struct device *dev)
{
struct amlogic_spifc_a1 *spifc = dev_get_drvdata(dev);
clk_disable_unprepare(spifc->clk);
return 0;
}
static int amlogic_spifc_a1_runtime_resume(struct device *dev)
{
struct amlogic_spifc_a1 *spifc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(spifc->clk);
if (!ret)
amlogic_spifc_a1_hw_init(spifc);
return ret;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops amlogic_spifc_a1_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(amlogic_spifc_a1_suspend,
amlogic_spifc_a1_resume)
SET_RUNTIME_PM_OPS(amlogic_spifc_a1_runtime_suspend,
amlogic_spifc_a1_runtime_resume,
NULL)
};
#ifdef CONFIG_OF
static const struct of_device_id amlogic_spifc_a1_dt_match[] = {
{ .compatible = "amlogic,a1-spifc", },
{ },
};
MODULE_DEVICE_TABLE(of, amlogic_spifc_a1_dt_match);
#endif /* CONFIG_OF */
static struct platform_driver amlogic_spifc_a1_driver = {
.probe = amlogic_spifc_a1_probe,
.driver = {
.name = "amlogic-spifc-a1",
.of_match_table = of_match_ptr(amlogic_spifc_a1_dt_match),
.pm = &amlogic_spifc_a1_pm_ops,
},
};
module_platform_driver(amlogic_spifc_a1_driver);
MODULE_AUTHOR("Martin Kurbanov <[email protected]>");
MODULE_DESCRIPTION("Amlogic A1 SPIFC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-amlogic-spifc-a1.c |
// SPDX-License-Identifier: GPL-2.0
//
// RPC-IF SPI/QSPI/Octa driver
//
// Copyright (C) 2018 ~ 2019 Renesas Solutions Corp.
// Copyright (C) 2019 Macronix International Co., Ltd.
// Copyright (C) 2019 - 2020 Cogent Embedded, Inc.
//
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <memory/renesas-rpc-if.h>
#include <asm/unaligned.h>
static void rpcif_spi_mem_prepare(struct spi_device *spi_dev,
const struct spi_mem_op *spi_op,
u64 *offs, size_t *len)
{
struct rpcif *rpc = spi_controller_get_devdata(spi_dev->controller);
struct rpcif_op rpc_op = { };
rpc_op.cmd.opcode = spi_op->cmd.opcode;
rpc_op.cmd.buswidth = spi_op->cmd.buswidth;
if (spi_op->addr.nbytes) {
rpc_op.addr.buswidth = spi_op->addr.buswidth;
rpc_op.addr.nbytes = spi_op->addr.nbytes;
rpc_op.addr.val = spi_op->addr.val;
}
if (spi_op->dummy.nbytes) {
rpc_op.dummy.buswidth = spi_op->dummy.buswidth;
rpc_op.dummy.ncycles = spi_op->dummy.nbytes * 8 /
spi_op->dummy.buswidth;
}
if (spi_op->data.nbytes || (offs && len)) {
rpc_op.data.buswidth = spi_op->data.buswidth;
rpc_op.data.nbytes = spi_op->data.nbytes;
switch (spi_op->data.dir) {
case SPI_MEM_DATA_IN:
rpc_op.data.dir = RPCIF_DATA_IN;
rpc_op.data.buf.in = spi_op->data.buf.in;
break;
case SPI_MEM_DATA_OUT:
rpc_op.data.dir = RPCIF_DATA_OUT;
rpc_op.data.buf.out = spi_op->data.buf.out;
break;
case SPI_MEM_NO_DATA:
rpc_op.data.dir = RPCIF_NO_DATA;
break;
}
} else {
rpc_op.data.dir = RPCIF_NO_DATA;
}
rpcif_prepare(rpc->dev, &rpc_op, offs, len);
}
static bool rpcif_spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
op->dummy.buswidth > 4 || op->cmd.buswidth > 4 ||
op->addr.nbytes > 4)
return false;
return true;
}
static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct rpcif *rpc =
spi_controller_get_devdata(desc->mem->spi->controller);
if (offs + desc->info.offset + len > U32_MAX)
return -EINVAL;
rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len);
return rpcif_dirmap_read(rpc->dev, offs, len, buf);
}
static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct rpcif *rpc =
spi_controller_get_devdata(desc->mem->spi->controller);
if (desc->info.offset + desc->info.length > U32_MAX)
return -ENOTSUPP;
if (!rpcif_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
return -ENOTSUPP;
if (!rpc->dirmap && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
return -ENOTSUPP;
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
return -ENOTSUPP;
return 0;
}
static int rpcif_spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct rpcif *rpc =
spi_controller_get_devdata(mem->spi->controller);
rpcif_spi_mem_prepare(mem->spi, op, NULL, NULL);
return rpcif_manual_xfer(rpc->dev);
}
static const struct spi_controller_mem_ops rpcif_spi_mem_ops = {
.supports_op = rpcif_spi_mem_supports_op,
.exec_op = rpcif_spi_mem_exec_op,
.dirmap_create = rpcif_spi_mem_dirmap_create,
.dirmap_read = rpcif_spi_mem_dirmap_read,
};
static int rpcif_spi_probe(struct platform_device *pdev)
{
struct device *parent = pdev->dev.parent;
struct spi_controller *ctlr;
struct rpcif *rpc;
int error;
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*rpc));
if (!ctlr)
return -ENOMEM;
rpc = spi_controller_get_devdata(ctlr);
error = rpcif_sw_init(rpc, parent);
if (error)
return error;
platform_set_drvdata(pdev, ctlr);
ctlr->dev.of_node = parent->of_node;
pm_runtime_enable(rpc->dev);
ctlr->num_chipselect = 1;
ctlr->mem_ops = &rpcif_spi_mem_ops;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
error = rpcif_hw_init(rpc->dev, false);
if (error)
goto out_disable_rpm;
error = spi_register_controller(ctlr);
if (error) {
dev_err(&pdev->dev, "spi_register_controller failed\n");
goto out_disable_rpm;
}
return 0;
out_disable_rpm:
pm_runtime_disable(rpc->dev);
return error;
}
static void rpcif_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct rpcif *rpc = spi_controller_get_devdata(ctlr);
spi_unregister_controller(ctlr);
pm_runtime_disable(rpc->dev);
}
static int __maybe_unused rpcif_spi_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
return spi_controller_suspend(ctlr);
}
static int __maybe_unused rpcif_spi_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
return spi_controller_resume(ctlr);
}
static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
static struct platform_driver rpcif_spi_driver = {
.probe = rpcif_spi_probe,
.remove_new = rpcif_spi_remove,
.driver = {
.name = "rpc-if-spi",
#ifdef CONFIG_PM_SLEEP
.pm = &rpcif_spi_pm_ops,
#endif
},
};
module_platform_driver(rpcif_spi_driver);
MODULE_DESCRIPTION("Renesas RPC-IF SPI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-rpc-if.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SPI master driver for ICP DAS LP-8841 RTC
*
* Copyright (C) 2016 Sergei Ianovich
*
* based on
*
* Dallas DS1302 RTC Support
* Copyright (C) 2002 David McCullough
* Copyright (C) 2003 - 2007 Paul Mundt
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "spi_lp8841_rtc"
#define SPI_LP8841_RTC_CE 0x01
#define SPI_LP8841_RTC_CLK 0x02
#define SPI_LP8841_RTC_nWE 0x04
#define SPI_LP8841_RTC_MOSI 0x08
#define SPI_LP8841_RTC_MISO 0x01
/*
* REVISIT If there is support for SPI_3WIRE and SPI_LSB_FIRST in SPI
* GPIO driver, this SPI driver can be replaced by a simple GPIO driver
* providing 3 GPIO pins.
*/
struct spi_lp8841_rtc {
void *iomem;
unsigned long state;
};
static inline void
setsck(struct spi_lp8841_rtc *data, int is_on)
{
if (is_on)
data->state |= SPI_LP8841_RTC_CLK;
else
data->state &= ~SPI_LP8841_RTC_CLK;
writeb(data->state, data->iomem);
}
static inline void
setmosi(struct spi_lp8841_rtc *data, int is_on)
{
if (is_on)
data->state |= SPI_LP8841_RTC_MOSI;
else
data->state &= ~SPI_LP8841_RTC_MOSI;
writeb(data->state, data->iomem);
}
static inline int
getmiso(struct spi_lp8841_rtc *data)
{
return ioread8(data->iomem) & SPI_LP8841_RTC_MISO;
}
static inline u32
bitbang_txrx_be_cpha0_lsb(struct spi_lp8841_rtc *data,
unsigned usecs, unsigned cpol, unsigned flags,
u32 word, u8 bits)
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
u32 shift = 32 - bits;
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
/* setup LSB (to slave) on leading edge */
if ((flags & SPI_CONTROLLER_NO_TX) == 0)
setmosi(data, (word & 1));
usleep_range(usecs, usecs + 1); /* T(setup) */
/* sample LSB (from slave) on trailing edge */
word >>= 1;
if ((flags & SPI_CONTROLLER_NO_RX) == 0)
word |= (getmiso(data) << 31);
setsck(data, !cpol);
usleep_range(usecs, usecs + 1);
setsck(data, cpol);
}
word >>= shift;
return word;
}
static int
spi_lp8841_rtc_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_lp8841_rtc *data = spi_master_get_devdata(master);
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
u8 word = 0;
int ret = 0;
if (tx) {
data->state &= ~SPI_LP8841_RTC_nWE;
writeb(data->state, data->iomem);
while (likely(count > 0)) {
word = *tx++;
bitbang_txrx_be_cpha0_lsb(data, 1, 0,
SPI_CONTROLLER_NO_RX, word, 8);
count--;
}
} else if (rx) {
data->state |= SPI_LP8841_RTC_nWE;
writeb(data->state, data->iomem);
while (likely(count > 0)) {
word = bitbang_txrx_be_cpha0_lsb(data, 1, 0,
SPI_CONTROLLER_NO_TX, word, 8);
*rx++ = word;
count--;
}
} else {
ret = -EINVAL;
}
spi_finalize_current_transfer(master);
return ret;
}
static void
spi_lp8841_rtc_set_cs(struct spi_device *spi, bool enable)
{
struct spi_lp8841_rtc *data = spi_master_get_devdata(spi->master);
data->state = 0;
writeb(data->state, data->iomem);
if (enable) {
usleep_range(4, 5);
data->state |= SPI_LP8841_RTC_CE;
writeb(data->state, data->iomem);
usleep_range(4, 5);
}
}
static int
spi_lp8841_rtc_setup(struct spi_device *spi)
{
if ((spi->mode & SPI_CS_HIGH) == 0) {
dev_err(&spi->dev, "unsupported active low chip select\n");
return -EINVAL;
}
if ((spi->mode & SPI_LSB_FIRST) == 0) {
dev_err(&spi->dev, "unsupported MSB first mode\n");
return -EINVAL;
}
if ((spi->mode & SPI_3WIRE) == 0) {
dev_err(&spi->dev, "unsupported wiring. 3 wires required\n");
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id spi_lp8841_rtc_dt_ids[] = {
{ .compatible = "icpdas,lp8841-spi-rtc" },
{ }
};
MODULE_DEVICE_TABLE(of, spi_lp8841_rtc_dt_ids);
#endif
static int
spi_lp8841_rtc_probe(struct platform_device *pdev)
{
int ret;
struct spi_master *master;
struct spi_lp8841_rtc *data;
master = spi_alloc_master(&pdev->dev, sizeof(*data));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->mode_bits = SPI_CS_HIGH | SPI_3WIRE | SPI_LSB_FIRST;
master->bus_num = pdev->id;
master->num_chipselect = 1;
master->setup = spi_lp8841_rtc_setup;
master->set_cs = spi_lp8841_rtc_set_cs;
master->transfer_one = spi_lp8841_rtc_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
#ifdef CONFIG_OF
master->dev.of_node = pdev->dev.of_node;
#endif
data = spi_master_get_devdata(master);
data->iomem = devm_platform_ioremap_resource(pdev, 0);
ret = PTR_ERR_OR_ZERO(data->iomem);
if (ret) {
dev_err(&pdev->dev, "failed to get IO address\n");
goto err_put_master;
}
/* register with the SPI framework */
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "cannot register spi master\n");
goto err_put_master;
}
return ret;
err_put_master:
spi_master_put(master);
return ret;
}
MODULE_ALIAS("platform:" DRIVER_NAME);
static struct platform_driver spi_lp8841_rtc_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(spi_lp8841_rtc_dt_ids),
},
.probe = spi_lp8841_rtc_probe,
};
module_platform_driver(spi_lp8841_rtc_driver);
MODULE_DESCRIPTION("SPI master driver for ICP DAS LP-8841 RTC");
MODULE_AUTHOR("Sergei Ianovich");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-lp8841-rtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SPI host driver using generic bitbanged GPIO
*
* Copyright (C) 2006,2008 David Brownell
* Copyright (C) 2017 Linus Walleij
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_gpio.h>
/*
* This bitbanging SPI host driver should help make systems usable
* when a native hardware SPI engine is not available, perhaps because
* its driver isn't yet working or because the I/O pins it requires
* are used for other purposes.
*
* platform_device->driver_data ... points to spi_gpio
*
* spi->controller_state ... reserved for bitbang framework code
*
* spi->controller->dev.driver_data ... points to spi_gpio->bitbang
*/
struct spi_gpio {
struct spi_bitbang bitbang;
struct gpio_desc *sck;
struct gpio_desc *miso;
struct gpio_desc *mosi;
struct gpio_desc **cs_gpios;
};
/*----------------------------------------------------------------------*/
/*
* Because the overhead of going through four GPIO procedure calls
* per transferred bit can make performance a problem, this code
* is set up so that you can use it in either of two ways:
*
* - The slow generic way: set up platform_data to hold the GPIO
* numbers used for MISO/MOSI/SCK, and issue procedure calls for
* each of them. This driver can handle several such busses.
*
* - The quicker inlined way: only helps with platform GPIO code
* that inlines operations for constant GPIOs. This can give
* you tight (fast!) inner loops, but each such bus needs a
* new driver. You'll define a new C file, with Makefile and
* Kconfig support; the C code can be a total of six lines:
*
* #define DRIVER_NAME "myboard_spi2"
* #define SPI_MISO_GPIO 119
* #define SPI_MOSI_GPIO 120
* #define SPI_SCK_GPIO 121
* #define SPI_N_CHIPSEL 4
* #include "spi-gpio.c"
*/
#ifndef DRIVER_NAME
#define DRIVER_NAME "spi_gpio"
#define GENERIC_BITBANG /* vs tight inlines */
#endif
/*----------------------------------------------------------------------*/
static inline struct spi_gpio *__pure
spi_to_spi_gpio(const struct spi_device *spi)
{
const struct spi_bitbang *bang;
struct spi_gpio *spi_gpio;
bang = spi_controller_get_devdata(spi->controller);
spi_gpio = container_of(bang, struct spi_gpio, bitbang);
return spi_gpio;
}
/* These helpers are in turn called by the bitbang inlines */
static inline void setsck(const struct spi_device *spi, int is_on)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
gpiod_set_value_cansleep(spi_gpio->sck, is_on);
}
static inline void setmosi(const struct spi_device *spi, int is_on)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
gpiod_set_value_cansleep(spi_gpio->mosi, is_on);
}
static inline int getmiso(const struct spi_device *spi)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
if (spi->mode & SPI_3WIRE)
return !!gpiod_get_value_cansleep(spi_gpio->mosi);
else
return !!gpiod_get_value_cansleep(spi_gpio->miso);
}
/*
* NOTE: this clocks "as fast as we can". It "should" be a function of the
* requested device clock. Software overhead means we usually have trouble
* reaching even one Mbit/sec (except when we can inline bitops), so for now
* we'll just assume we never need additional per-bit slowdowns.
*/
#define spidelay(nsecs) do {} while (0)
#include "spi-bitbang-txrx.h"
/*
* These functions can leverage inline expansion of GPIO calls to shrink
* costs for a txrx bit, often by factors of around ten (by instruction
* count). That is particularly visible for larger word sizes, but helps
* even with default 8-bit words.
*
* REVISIT overheads calling these functions for each word also have
* significant performance costs. Having txrx_bufs() calls that inline
* the txrx_word() logic would help performance, e.g. on larger blocks
* used with flash storage or MMC/SD. There should also be ways to make
* GCC be less stupid about reloading registers inside the I/O loops,
* even without inlined GPIO calls; __attribute__((hot)) on GCC 4.3?
*/
static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
else
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
else
return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
else
return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
else
return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*
* These functions do not call setmosi or getmiso if respective flag
* (SPI_CONTROLLER_NO_RX or SPI_CONTROLLER_NO_TX) is set, so they are safe to
* call when such pin is not present or defined in the controller.
* A separate set of callbacks is defined to get highest possible
* speed in the generic case (when both MISO and MOSI lines are
* available), as optimiser will remove the checks when argument is
* constant.
*/
static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
else
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
else
return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
else
return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
else
return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*----------------------------------------------------------------------*/
static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
/* set initial clock line level */
if (is_active)
gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
/* Drive chip select line, if we have one */
if (spi_gpio->cs_gpios) {
struct gpio_desc *cs = spi_gpio->cs_gpios[spi_get_chipselect(spi, 0)];
/* SPI chip selects are normally active-low */
gpiod_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
}
static int spi_gpio_setup(struct spi_device *spi)
{
struct gpio_desc *cs;
int status = 0;
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
/*
* The CS GPIOs have already been
* initialized from the descriptor lookup.
*/
if (spi_gpio->cs_gpios) {
cs = spi_gpio->cs_gpios[spi_get_chipselect(spi, 0)];
if (!spi->controller_state && cs)
status = gpiod_direction_output(cs,
!(spi->mode & SPI_CS_HIGH));
}
if (!status)
status = spi_bitbang_setup(spi);
return status;
}
static int spi_gpio_set_direction(struct spi_device *spi, bool output)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
int ret;
if (output)
return gpiod_direction_output(spi_gpio->mosi, 1);
/*
* Only change MOSI to an input if using 3WIRE mode.
* Otherwise, MOSI could be left floating if there is
* no pull resistor connected to the I/O pin, or could
* be left logic high if there is a pull-up. Transmitting
* logic high when only clocking MISO data in can put some
* SPI devices in to a bad state.
*/
if (spi->mode & SPI_3WIRE) {
ret = gpiod_direction_input(spi_gpio->mosi);
if (ret)
return ret;
}
/*
* Send a turnaround high impedance cycle when switching
* from output to input. Theoretically there should be
* a clock delay here, but as has been noted above, the
* nsec delay function for bit-banged GPIO is simply
* {} because bit-banging just doesn't get fast enough
* anyway.
*/
if (spi->mode & SPI_3WIRE_HIZ) {
gpiod_set_value_cansleep(spi_gpio->sck,
!(spi->mode & SPI_CPOL));
gpiod_set_value_cansleep(spi_gpio->sck,
!!(spi->mode & SPI_CPOL));
}
return 0;
}
static void spi_gpio_cleanup(struct spi_device *spi)
{
spi_bitbang_cleanup(spi);
}
/*
* It can be convenient to use this driver with pins that have alternate
* functions associated with a "native" SPI controller if a driver for that
* controller is not available, or is missing important functionality.
*
* On platforms which can do so, configure MISO with a weak pullup unless
* there's an external pullup on that signal. That saves power by avoiding
* floating signals. (A weak pulldown would save power too, but many
* drivers expect to see all-ones data as the no target "response".)
*/
static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
{
spi_gpio->mosi = devm_gpiod_get_optional(dev, "mosi", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->mosi))
return PTR_ERR(spi_gpio->mosi);
spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
return PTR_ERR_OR_ZERO(spi_gpio->sck);
}
#ifdef CONFIG_OF
static const struct of_device_id spi_gpio_dt_ids[] = {
{ .compatible = "spi-gpio" },
{}
};
MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
static int spi_gpio_probe_dt(struct platform_device *pdev,
struct spi_controller *host)
{
host->dev.of_node = pdev->dev.of_node;
host->use_gpio_descriptors = true;
return 0;
}
#else
static inline int spi_gpio_probe_dt(struct platform_device *pdev,
struct spi_controller *host)
{
return 0;
}
#endif
static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_controller *host)
{
struct device *dev = &pdev->dev;
struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
#ifdef GENERIC_BITBANG
if (!pdata || !pdata->num_chipselect)
return -ENODEV;
#endif
/*
* The host needs to think there is a chipselect even if not
* connected
*/
host->num_chipselect = pdata->num_chipselect ?: 1;
spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
if (!spi_gpio->cs_gpios)
return -ENOMEM;
for (i = 0; i < host->num_chipselect; i++) {
spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
GPIOD_OUT_HIGH);
if (IS_ERR(spi_gpio->cs_gpios[i]))
return PTR_ERR(spi_gpio->cs_gpios[i]);
}
return 0;
}
static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_controller *host;
struct spi_gpio *spi_gpio;
struct device *dev = &pdev->dev;
struct spi_bitbang *bb;
host = devm_spi_alloc_host(dev, sizeof(*spi_gpio));
if (!host)
return -ENOMEM;
if (pdev->dev.of_node)
status = spi_gpio_probe_dt(pdev, host);
else
status = spi_gpio_probe_pdata(pdev, host);
if (status)
return status;
spi_gpio = spi_controller_get_devdata(host);
status = spi_gpio_request(dev, spi_gpio);
if (status)
return status;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
SPI_CS_HIGH | SPI_LSB_FIRST;
if (!spi_gpio->mosi) {
/* HW configuration without MOSI pin
*
* No setting SPI_CONTROLLER_NO_RX here - if there is only
* a MOSI pin connected the host can still do RX by
* changing the direction of the line.
*/
host->flags = SPI_CONTROLLER_NO_TX;
}
host->bus_num = pdev->id;
host->setup = spi_gpio_setup;
host->cleanup = spi_gpio_cleanup;
bb = &spi_gpio->bitbang;
bb->master = host;
/*
* There is some additional business, apart from driving the CS GPIO
* line, that we need to do on selection. This makes the local
* callback for chipselect always get called.
*/
host->flags |= SPI_CONTROLLER_GPIO_SS;
bb->chipselect = spi_gpio_chipselect;
bb->set_line_direction = spi_gpio_set_direction;
if (host->flags & SPI_CONTROLLER_NO_TX) {
bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
bb->txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
} else {
bb->txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
bb->txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
bb->txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
bb->txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
}
bb->setup_transfer = spi_bitbang_setup_transfer;
status = spi_bitbang_init(&spi_gpio->bitbang);
if (status)
return status;
return devm_spi_register_controller(&pdev->dev, host);
}
MODULE_ALIAS("platform:" DRIVER_NAME);
static struct platform_driver spi_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
};
module_platform_driver(spi_gpio_driver);
MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-gpio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003-2015 Broadcom Corporation
* All Rights Reserved
*/
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
/* SPI Configuration Register */
#define XLP_SPI_CONFIG 0x00
#define XLP_SPI_CPHA BIT(0)
#define XLP_SPI_CPOL BIT(1)
#define XLP_SPI_CS_POL BIT(2)
#define XLP_SPI_TXMISO_EN BIT(3)
#define XLP_SPI_TXMOSI_EN BIT(4)
#define XLP_SPI_RXMISO_EN BIT(5)
#define XLP_SPI_CS_LSBFE BIT(10)
#define XLP_SPI_RXCAP_EN BIT(11)
/* SPI Frequency Divider Register */
#define XLP_SPI_FDIV 0x04
/* SPI Command Register */
#define XLP_SPI_CMD 0x08
#define XLP_SPI_CMD_IDLE_MASK 0x0
#define XLP_SPI_CMD_TX_MASK 0x1
#define XLP_SPI_CMD_RX_MASK 0x2
#define XLP_SPI_CMD_TXRX_MASK 0x3
#define XLP_SPI_CMD_CONT BIT(4)
#define XLP_SPI_XFR_BITCNT_SHIFT 16
/* SPI Status Register */
#define XLP_SPI_STATUS 0x0c
#define XLP_SPI_XFR_PENDING BIT(0)
#define XLP_SPI_XFR_DONE BIT(1)
#define XLP_SPI_TX_INT BIT(2)
#define XLP_SPI_RX_INT BIT(3)
#define XLP_SPI_TX_UF BIT(4)
#define XLP_SPI_RX_OF BIT(5)
#define XLP_SPI_STAT_MASK 0x3f
/* SPI Interrupt Enable Register */
#define XLP_SPI_INTR_EN 0x10
#define XLP_SPI_INTR_DONE BIT(0)
#define XLP_SPI_INTR_TXTH BIT(1)
#define XLP_SPI_INTR_RXTH BIT(2)
#define XLP_SPI_INTR_TXUF BIT(3)
#define XLP_SPI_INTR_RXOF BIT(4)
/* SPI FIFO Threshold Register */
#define XLP_SPI_FIFO_THRESH 0x14
/* SPI FIFO Word Count Register */
#define XLP_SPI_FIFO_WCNT 0x18
#define XLP_SPI_RXFIFO_WCNT_MASK 0xf
#define XLP_SPI_TXFIFO_WCNT_MASK 0xf0
#define XLP_SPI_TXFIFO_WCNT_SHIFT 4
/* SPI Transmit Data FIFO Register */
#define XLP_SPI_TXDATA_FIFO 0x1c
/* SPI Receive Data FIFO Register */
#define XLP_SPI_RXDATA_FIFO 0x20
/* SPI System Control Register */
#define XLP_SPI_SYSCTRL 0x100
#define XLP_SPI_SYS_RESET BIT(0)
#define XLP_SPI_SYS_CLKDIS BIT(1)
#define XLP_SPI_SYS_PMEN BIT(8)
#define SPI_CS_OFFSET 0x40
#define XLP_SPI_TXRXTH 0x80
#define XLP_SPI_FIFO_SIZE 8
#define XLP_SPI_MAX_CS 4
#define XLP_SPI_DEFAULT_FREQ 133333333
#define XLP_SPI_FDIV_MIN 4
#define XLP_SPI_FDIV_MAX 65535
/*
* SPI can transfer only 28 bytes properly at a time. So split the
* transfer into 28 bytes size.
*/
#define XLP_SPI_XFER_SIZE 28
struct xlp_spi_priv {
struct device dev; /* device structure */
void __iomem *base; /* spi registers base address */
const u8 *tx_buf; /* tx data buffer */
u8 *rx_buf; /* rx data buffer */
int tx_len; /* tx xfer length */
int rx_len; /* rx xfer length */
int txerrors; /* TXFIFO underflow count */
int rxerrors; /* RXFIFO overflow count */
int cs; /* slave device chip select */
u32 spi_clk; /* spi clock frequency */
bool cmd_cont; /* cs active */
struct completion done; /* completion notification */
};
static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv,
int cs, int regoff)
{
return readl(priv->base + regoff + cs * SPI_CS_OFFSET);
}
static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs,
int regoff, u32 val)
{
writel(val, priv->base + regoff + cs * SPI_CS_OFFSET);
}
static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv,
int regoff, u32 val)
{
writel(val, priv->base + regoff);
}
/*
* Setup global SPI_SYSCTRL register for all SPI channels.
*/
static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi)
{
int cs;
for (cs = 0; cs < XLP_SPI_MAX_CS; cs++)
xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL,
XLP_SPI_SYS_RESET << cs);
xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN);
}
static int xlp_spi_setup(struct spi_device *spi)
{
struct xlp_spi_priv *xspi;
u32 fdiv, cfg;
int cs;
xspi = spi_master_get_devdata(spi->master);
cs = spi_get_chipselect(spi, 0);
/*
* The value of fdiv must be between 4 and 65535.
*/
fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz);
if (fdiv > XLP_SPI_FDIV_MAX)
fdiv = XLP_SPI_FDIV_MAX;
else if (fdiv < XLP_SPI_FDIV_MIN)
fdiv = XLP_SPI_FDIV_MIN;
xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv);
xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH);
cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG);
if (spi->mode & SPI_CPHA)
cfg |= XLP_SPI_CPHA;
else
cfg &= ~XLP_SPI_CPHA;
if (spi->mode & SPI_CPOL)
cfg |= XLP_SPI_CPOL;
else
cfg &= ~XLP_SPI_CPOL;
if (!(spi->mode & SPI_CS_HIGH))
cfg |= XLP_SPI_CS_POL;
else
cfg &= ~XLP_SPI_CS_POL;
if (spi->mode & SPI_LSB_FIRST)
cfg |= XLP_SPI_CS_LSBFE;
else
cfg &= ~XLP_SPI_CS_LSBFE;
cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN;
if (fdiv == 4)
cfg |= XLP_SPI_RXCAP_EN;
xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg);
return 0;
}
static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi)
{
u32 rx_data, rxfifo_cnt;
int i, j, nbytes;
rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK;
while (rxfifo_cnt) {
rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO);
j = 0;
nbytes = min(xspi->rx_len, 4);
for (i = nbytes - 1; i >= 0; i--, j++)
xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff;
xspi->rx_len -= nbytes;
xspi->rx_buf += nbytes;
rxfifo_cnt--;
}
}
static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi)
{
u32 tx_data, txfifo_cnt;
int i, j, nbytes;
txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK;
txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT;
while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) {
j = 0;
tx_data = 0;
nbytes = min(xspi->tx_len, 4);
for (i = nbytes - 1; i >= 0; i--, j++)
tx_data |= xspi->tx_buf[i] << (j * 8);
xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data);
xspi->tx_len -= nbytes;
xspi->tx_buf += nbytes;
txfifo_cnt++;
}
}
static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id)
{
struct xlp_spi_priv *xspi = dev_id;
u32 stat;
stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) &
XLP_SPI_STAT_MASK;
if (!stat)
return IRQ_NONE;
if (stat & XLP_SPI_TX_INT) {
if (xspi->tx_len)
xlp_spi_fill_txfifo(xspi);
if (stat & XLP_SPI_TX_UF)
xspi->txerrors++;
}
if (stat & XLP_SPI_RX_INT) {
if (xspi->rx_len)
xlp_spi_read_rxfifo(xspi);
if (stat & XLP_SPI_RX_OF)
xspi->rxerrors++;
}
/* write status back to clear interrupts */
xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat);
if (stat & XLP_SPI_XFR_DONE)
complete(&xspi->done);
return IRQ_HANDLED;
}
static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len,
int cmd_cont)
{
u32 cmd = 0;
if (xspi->tx_buf)
cmd |= XLP_SPI_CMD_TX_MASK;
if (xspi->rx_buf)
cmd |= XLP_SPI_CMD_RX_MASK;
if (cmd_cont)
cmd |= XLP_SPI_CMD_CONT;
cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT);
xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd);
}
static int xlp_spi_xfer_block(struct xlp_spi_priv *xs,
const unsigned char *tx_buf,
unsigned char *rx_buf, int xfer_len, int cmd_cont)
{
int timeout;
u32 intr_mask = 0;
xs->tx_buf = tx_buf;
xs->rx_buf = rx_buf;
xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len;
xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len;
xs->txerrors = xs->rxerrors = 0;
/* fill TXDATA_FIFO, then send the CMD */
if (xs->tx_len)
xlp_spi_fill_txfifo(xs);
xlp_spi_send_cmd(xs, xfer_len, cmd_cont);
/*
* We are getting some spurious tx interrupts, so avoid enabling
* tx interrupts when only rx is in process.
* Enable all the interrupts in tx case.
*/
if (xs->tx_len)
intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF |
XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
else
intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
intr_mask |= XLP_SPI_INTR_DONE;
xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask);
timeout = wait_for_completion_timeout(&xs->done,
msecs_to_jiffies(1000));
/* Disable interrupts */
xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0);
if (!timeout) {
dev_err(&xs->dev, "xfer timedout!\n");
goto out;
}
if (xs->txerrors || xs->rxerrors)
dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n",
xs->rxerrors, xs->txerrors, xfer_len);
return xfer_len;
out:
return -ETIMEDOUT;
}
static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
{
int bytesleft, sz;
unsigned char *rx_buf;
const unsigned char *tx_buf;
tx_buf = t->tx_buf;
rx_buf = t->rx_buf;
bytesleft = t->len;
while (bytesleft) {
if (bytesleft > XLP_SPI_XFER_SIZE)
sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
XLP_SPI_XFER_SIZE, 1);
else
sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
bytesleft, xs->cmd_cont);
if (sz < 0)
return sz;
bytesleft -= sz;
if (tx_buf)
tx_buf += sz;
if (rx_buf)
rx_buf += sz;
}
return bytesleft;
}
static int xlp_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
int ret = 0;
xspi->cs = spi_get_chipselect(spi, 0);
xspi->dev = spi->dev;
if (spi_transfer_is_last(master, t))
xspi->cmd_cont = 0;
else
xspi->cmd_cont = 1;
if (xlp_spi_txrx_bufs(xspi, t))
ret = -EIO;
spi_finalize_current_transfer(master);
return ret;
}
static int xlp_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct xlp_spi_priv *xspi;
struct clk *clk;
int irq, err;
xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL);
if (!xspi)
return -ENOMEM;
xspi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->base))
return PTR_ERR(xspi->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
pdev->name, xspi);
if (err) {
dev_err(&pdev->dev, "unable to request irq %d\n", irq);
return err;
}
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "could not get spi clock\n");
return PTR_ERR(clk);
}
xspi->spi_clk = clk_get_rate(clk);
master = spi_alloc_master(&pdev->dev, 0);
if (!master) {
dev_err(&pdev->dev, "could not alloc master\n");
return -ENOMEM;
}
master->bus_num = 0;
master->num_chipselect = XLP_SPI_MAX_CS;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = xlp_spi_setup;
master->transfer_one = xlp_spi_transfer_one;
master->dev.of_node = pdev->dev.of_node;
init_completion(&xspi->done);
spi_master_set_devdata(master, xspi);
xlp_spi_sysctl_setup(xspi);
/* register spi controller */
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "spi register master failed!\n");
spi_master_put(master);
return err;
}
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp_spi_acpi_match[] = {
{ "BRCM900D", 0 },
{ "CAV900D", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, xlp_spi_acpi_match);
#endif
static struct platform_driver xlp_spi_driver = {
.probe = xlp_spi_probe,
.driver = {
.name = "xlp-spi",
.acpi_match_table = ACPI_PTR(xlp_spi_acpi_match),
},
};
module_platform_driver(xlp_spi_driver);
MODULE_AUTHOR("Kamlakant Patel <[email protected]>");
MODULE_DESCRIPTION("Netlogic XLP SPI controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-xlp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Broadcom BCM63xx SPI controller support
*
* Copyright (C) 2009-2012 Florian Fainelli <[email protected]>
* Copyright (C) 2010 Tanguy Bouzeloc <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/reset.h>
/* BCM 6338/6348 SPI core */
#define SPI_6348_RSET_SIZE 64
#define SPI_6348_CMD 0x00 /* 16-bits register */
#define SPI_6348_INT_STATUS 0x02
#define SPI_6348_INT_MASK_ST 0x03
#define SPI_6348_INT_MASK 0x04
#define SPI_6348_ST 0x05
#define SPI_6348_CLK_CFG 0x06
#define SPI_6348_FILL_BYTE 0x07
#define SPI_6348_MSG_TAIL 0x09
#define SPI_6348_RX_TAIL 0x0b
#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
#define SPI_6348_MSG_CTL_WIDTH 8
#define SPI_6348_MSG_DATA 0x41
#define SPI_6348_MSG_DATA_SIZE 0x3f
#define SPI_6348_RX_DATA 0x80
#define SPI_6348_RX_DATA_SIZE 0x3f
/* BCM 3368/6358/6262/6368 SPI core */
#define SPI_6358_RSET_SIZE 1804
#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
#define SPI_6358_MSG_CTL_WIDTH 16
#define SPI_6358_MSG_DATA 0x02
#define SPI_6358_MSG_DATA_SIZE 0x21e
#define SPI_6358_RX_DATA 0x400
#define SPI_6358_RX_DATA_SIZE 0x220
#define SPI_6358_CMD 0x700 /* 16-bits register */
#define SPI_6358_INT_STATUS 0x702
#define SPI_6358_INT_MASK_ST 0x703
#define SPI_6358_INT_MASK 0x704
#define SPI_6358_ST 0x705
#define SPI_6358_CLK_CFG 0x706
#define SPI_6358_FILL_BYTE 0x707
#define SPI_6358_MSG_TAIL 0x709
#define SPI_6358_RX_TAIL 0x70B
/* Shared SPI definitions */
/* Message configuration */
#define SPI_FD_RW 0x00
#define SPI_HD_W 0x01
#define SPI_HD_R 0x02
#define SPI_BYTE_CNT_SHIFT 0
#define SPI_6348_MSG_TYPE_SHIFT 6
#define SPI_6358_MSG_TYPE_SHIFT 14
/* Command */
#define SPI_CMD_NOOP 0x00
#define SPI_CMD_SOFT_RESET 0x01
#define SPI_CMD_HARD_RESET 0x02
#define SPI_CMD_START_IMMEDIATE 0x03
#define SPI_CMD_COMMAND_SHIFT 0
#define SPI_CMD_COMMAND_MASK 0x000f
#define SPI_CMD_DEVICE_ID_SHIFT 4
#define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
#define SPI_CMD_ONE_BYTE_SHIFT 11
#define SPI_CMD_ONE_WIRE_SHIFT 12
#define SPI_DEV_ID_0 0
#define SPI_DEV_ID_1 1
#define SPI_DEV_ID_2 2
#define SPI_DEV_ID_3 3
/* Interrupt mask */
#define SPI_INTR_CMD_DONE 0x01
#define SPI_INTR_RX_OVERFLOW 0x02
#define SPI_INTR_TX_UNDERFLOW 0x04
#define SPI_INTR_TX_OVERFLOW 0x08
#define SPI_INTR_RX_UNDERFLOW 0x10
#define SPI_INTR_CLEAR_ALL 0x1f
/* Status */
#define SPI_RX_EMPTY 0x02
#define SPI_CMD_BUSY 0x04
#define SPI_SERIAL_BUSY 0x08
/* Clock configuration */
#define SPI_CLK_20MHZ 0x00
#define SPI_CLK_0_391MHZ 0x01
#define SPI_CLK_0_781MHZ 0x02 /* default */
#define SPI_CLK_1_563MHZ 0x03
#define SPI_CLK_3_125MHZ 0x04
#define SPI_CLK_6_250MHZ 0x05
#define SPI_CLK_12_50MHZ 0x06
#define SPI_CLK_MASK 0x07
#define SPI_SSOFFTIME_MASK 0x38
#define SPI_SSOFFTIME_SHIFT 3
#define SPI_BYTE_SWAP 0x80
enum bcm63xx_regs_spi {
SPI_CMD,
SPI_INT_STATUS,
SPI_INT_MASK_ST,
SPI_INT_MASK,
SPI_ST,
SPI_CLK_CFG,
SPI_FILL_BYTE,
SPI_MSG_TAIL,
SPI_RX_TAIL,
SPI_MSG_CTL,
SPI_MSG_DATA,
SPI_RX_DATA,
SPI_MSG_TYPE_SHIFT,
SPI_MSG_CTL_WIDTH,
SPI_MSG_DATA_SIZE,
};
#define BCM63XX_SPI_MAX_PREPEND 7
#define BCM63XX_SPI_MAX_CS 8
#define BCM63XX_SPI_BUS_NUM 0
struct bcm63xx_spi {
struct completion done;
void __iomem *regs;
int irq;
/* Platform data */
const unsigned long *reg_offsets;
unsigned int fifo_size;
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
/* data iomem */
u8 __iomem *tx_io;
const u8 __iomem *rx_io;
struct clk *clk;
struct platform_device *pdev;
};
static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
unsigned int offset)
{
return readb(bs->regs + bs->reg_offsets[offset]);
}
static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
u8 value, unsigned int offset)
{
writeb(value, bs->regs + bs->reg_offsets[offset]);
}
static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
u16 value, unsigned int offset)
{
#ifdef CONFIG_CPU_BIG_ENDIAN
iowrite16be(value, bs->regs + bs->reg_offsets[offset]);
#else
writew(value, bs->regs + bs->reg_offsets[offset]);
#endif
}
static const unsigned int bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
{ 20000000, SPI_CLK_20MHZ },
{ 12500000, SPI_CLK_12_50MHZ },
{ 6250000, SPI_CLK_6_250MHZ },
{ 3125000, SPI_CLK_3_125MHZ },
{ 1563000, SPI_CLK_1_563MHZ },
{ 781000, SPI_CLK_0_781MHZ },
{ 391000, SPI_CLK_0_391MHZ }
};
static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct bcm63xx_spi *bs = spi_controller_get_devdata(spi->controller);
u8 clk_cfg, reg;
int i;
/* Default to lowest clock configuration */
clk_cfg = SPI_CLK_0_391MHZ;
/* Find the closest clock configuration */
for (i = 0; i < SPI_CLK_MASK; i++) {
if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) {
clk_cfg = bcm63xx_spi_freq_table[i][1];
break;
}
}
/* clear existing clock configuration bits of the register */
reg = bcm_spi_readb(bs, SPI_CLK_CFG);
reg &= ~SPI_CLK_MASK;
reg |= clk_cfg;
bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
clk_cfg, t->speed_hz);
}
/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA)
static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
unsigned int num_transfers)
{
struct bcm63xx_spi *bs = spi_controller_get_devdata(spi->controller);
u16 msg_ctl;
u16 cmd;
unsigned int i, timeout = 0, prepend_len = 0, len = 0;
struct spi_transfer *t = first;
bool do_rx = false;
bool do_tx = false;
/* Disable the CMD_DONE interrupt */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
prepend_len = t->len;
/* prepare the buffer */
for (i = 0; i < num_transfers; i++) {
if (t->tx_buf) {
do_tx = true;
memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
/* don't prepend more than one tx */
if (t != first)
prepend_len = 0;
}
if (t->rx_buf) {
do_rx = true;
/* prepend is half-duplex write only */
if (t == first)
prepend_len = 0;
}
len += t->len;
t = list_entry(t->transfer_list.next, struct spi_transfer,
transfer_list);
}
reinit_completion(&bs->done);
/* Fill in the Message control register */
msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
if (do_rx && do_tx && prepend_len == 0)
msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
else if (do_rx)
msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
else if (do_tx)
msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
switch (bs->msg_ctl_width) {
case 8:
bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
break;
case 16:
bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
break;
}
/* Issue the transfer */
cmd = SPI_CMD_START_IMMEDIATE;
cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
cmd |= (spi_get_chipselect(spi, 0) << SPI_CMD_DEVICE_ID_SHIFT);
bcm_spi_writew(bs, cmd, SPI_CMD);
/* Enable the CMD_DONE interrupt */
bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
timeout = wait_for_completion_timeout(&bs->done, HZ);
if (!timeout)
return -ETIMEDOUT;
if (!do_rx)
return 0;
len = 0;
t = first;
/* Read out all the data */
for (i = 0; i < num_transfers; i++) {
if (t->rx_buf)
memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
if (t != first || prepend_len == 0)
len += t->len;
t = list_entry(t->transfer_list.next, struct spi_transfer,
transfer_list);
}
return 0;
}
static int bcm63xx_spi_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
struct spi_transfer *t, *first = NULL;
struct spi_device *spi = m->spi;
int status = 0;
unsigned int n_transfers = 0, total_len = 0;
bool can_use_prepend = false;
/*
* This SPI controller does not support keeping CS active after a
* transfer.
* Work around this by merging as many transfers we can into one big
* full-duplex transfers.
*/
list_for_each_entry(t, &m->transfers, transfer_list) {
if (!first)
first = t;
n_transfers++;
total_len += t->len;
if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
first->len <= BCM63XX_SPI_MAX_PREPEND)
can_use_prepend = true;
else if (can_use_prepend && t->tx_buf)
can_use_prepend = false;
/* we can only transfer one fifo worth of data */
if ((can_use_prepend &&
total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
(!can_use_prepend && total_len > bs->fifo_size)) {
dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
total_len, bs->fifo_size);
status = -EINVAL;
goto exit;
}
/* all combined transfers have to have the same speed */
if (t->speed_hz != first->speed_hz) {
dev_err(&spi->dev, "unable to change speed between transfers\n");
status = -EINVAL;
goto exit;
}
/* CS will be deasserted directly after transfer */
if (t->delay.value) {
dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
status = -EINVAL;
goto exit;
}
if (t->cs_change ||
list_is_last(&t->transfer_list, &m->transfers)) {
/* configure adapter for a new transfer */
bcm63xx_spi_setup_transfer(spi, first);
/* send the data */
status = bcm63xx_txrx_bufs(spi, first, n_transfers);
if (status)
goto exit;
m->actual_length += total_len;
first = NULL;
n_transfers = 0;
total_len = 0;
can_use_prepend = false;
}
}
exit:
m->status = status;
spi_finalize_current_message(host);
return 0;
}
/* This driver supports single host mode only. Hence
* CMD_DONE is the only interrupt we care about
*/
static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *host = (struct spi_controller *)dev_id;
struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
u8 intr;
/* Read interupts and clear them immediately */
intr = bcm_spi_readb(bs, SPI_INT_STATUS);
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
/* A transfer completed */
if (intr & SPI_INTR_CMD_DONE)
complete(&bs->done);
return IRQ_HANDLED;
}
static size_t bcm63xx_spi_max_length(struct spi_device *spi)
{
struct bcm63xx_spi *bs = spi_controller_get_devdata(spi->controller);
return bs->fifo_size;
}
static const unsigned long bcm6348_spi_reg_offsets[] = {
[SPI_CMD] = SPI_6348_CMD,
[SPI_INT_STATUS] = SPI_6348_INT_STATUS,
[SPI_INT_MASK_ST] = SPI_6348_INT_MASK_ST,
[SPI_INT_MASK] = SPI_6348_INT_MASK,
[SPI_ST] = SPI_6348_ST,
[SPI_CLK_CFG] = SPI_6348_CLK_CFG,
[SPI_FILL_BYTE] = SPI_6348_FILL_BYTE,
[SPI_MSG_TAIL] = SPI_6348_MSG_TAIL,
[SPI_RX_TAIL] = SPI_6348_RX_TAIL,
[SPI_MSG_CTL] = SPI_6348_MSG_CTL,
[SPI_MSG_DATA] = SPI_6348_MSG_DATA,
[SPI_RX_DATA] = SPI_6348_RX_DATA,
[SPI_MSG_TYPE_SHIFT] = SPI_6348_MSG_TYPE_SHIFT,
[SPI_MSG_CTL_WIDTH] = SPI_6348_MSG_CTL_WIDTH,
[SPI_MSG_DATA_SIZE] = SPI_6348_MSG_DATA_SIZE,
};
static const unsigned long bcm6358_spi_reg_offsets[] = {
[SPI_CMD] = SPI_6358_CMD,
[SPI_INT_STATUS] = SPI_6358_INT_STATUS,
[SPI_INT_MASK_ST] = SPI_6358_INT_MASK_ST,
[SPI_INT_MASK] = SPI_6358_INT_MASK,
[SPI_ST] = SPI_6358_ST,
[SPI_CLK_CFG] = SPI_6358_CLK_CFG,
[SPI_FILL_BYTE] = SPI_6358_FILL_BYTE,
[SPI_MSG_TAIL] = SPI_6358_MSG_TAIL,
[SPI_RX_TAIL] = SPI_6358_RX_TAIL,
[SPI_MSG_CTL] = SPI_6358_MSG_CTL,
[SPI_MSG_DATA] = SPI_6358_MSG_DATA,
[SPI_RX_DATA] = SPI_6358_RX_DATA,
[SPI_MSG_TYPE_SHIFT] = SPI_6358_MSG_TYPE_SHIFT,
[SPI_MSG_CTL_WIDTH] = SPI_6358_MSG_CTL_WIDTH,
[SPI_MSG_DATA_SIZE] = SPI_6358_MSG_DATA_SIZE,
};
static const struct platform_device_id bcm63xx_spi_dev_match[] = {
{
.name = "bcm6348-spi",
.driver_data = (unsigned long)bcm6348_spi_reg_offsets,
},
{
.name = "bcm6358-spi",
.driver_data = (unsigned long)bcm6358_spi_reg_offsets,
},
{
},
};
static const struct of_device_id bcm63xx_spi_of_match[] = {
{ .compatible = "brcm,bcm6348-spi", .data = &bcm6348_spi_reg_offsets },
{ .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets },
{ },
};
static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
const unsigned long *bcm63xx_spireg;
struct device *dev = &pdev->dev;
int irq, bus_num;
struct spi_controller *host;
struct clk *clk;
struct bcm63xx_spi *bs;
int ret;
u32 num_cs = BCM63XX_SPI_MAX_CS;
struct reset_control *reset;
if (dev->of_node) {
const struct of_device_id *match;
match = of_match_node(bcm63xx_spi_of_match, dev->of_node);
if (!match)
return -EINVAL;
bcm63xx_spireg = match->data;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > BCM63XX_SPI_MAX_CS) {
dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
num_cs);
num_cs = BCM63XX_SPI_MAX_CS;
}
bus_num = -1;
} else if (pdev->id_entry->driver_data) {
const struct platform_device_id *match = pdev->id_entry;
bcm63xx_spireg = (const unsigned long *)match->driver_data;
bus_num = BCM63XX_SPI_BUS_NUM;
} else {
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
clk = devm_clk_get(dev, "spi");
if (IS_ERR(clk)) {
dev_err(dev, "no clock for device\n");
return PTR_ERR(clk);
}
reset = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
host = spi_alloc_host(dev, sizeof(*bs));
if (!host) {
dev_err(dev, "out of memory\n");
return -ENOMEM;
}
bs = spi_controller_get_devdata(host);
init_completion(&bs->done);
platform_set_drvdata(pdev, host);
bs->pdev = pdev;
bs->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(bs->regs)) {
ret = PTR_ERR(bs->regs);
goto out_err;
}
bs->irq = irq;
bs->clk = clk;
bs->reg_offsets = bcm63xx_spireg;
bs->fifo_size = bs->reg_offsets[SPI_MSG_DATA_SIZE];
ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
pdev->name, host);
if (ret) {
dev_err(dev, "unable to request irq\n");
goto out_err;
}
host->dev.of_node = dev->of_node;
host->bus_num = bus_num;
host->num_chipselect = num_cs;
host->transfer_one_message = bcm63xx_spi_transfer_one;
host->mode_bits = MODEBITS;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->max_transfer_size = bcm63xx_spi_max_length;
host->max_message_size = bcm63xx_spi_max_length;
host->auto_runtime_pm = true;
bs->msg_type_shift = bs->reg_offsets[SPI_MSG_TYPE_SHIFT];
bs->msg_ctl_width = bs->reg_offsets[SPI_MSG_CTL_WIDTH];
bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]);
bs->rx_io = (const u8 *)(bs->regs + bs->reg_offsets[SPI_RX_DATA]);
/* Initialize hardware */
ret = clk_prepare_enable(bs->clk);
if (ret)
goto out_err;
ret = reset_control_reset(reset);
if (ret) {
dev_err(dev, "unable to reset device: %d\n", ret);
goto out_clk_disable;
}
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
pm_runtime_enable(&pdev->dev);
/* register and we are done */
ret = devm_spi_register_controller(dev, host);
if (ret) {
dev_err(dev, "spi register failed\n");
goto out_pm_disable;
}
dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
r, irq, bs->fifo_size);
return 0;
out_pm_disable:
pm_runtime_disable(&pdev->dev);
out_clk_disable:
clk_disable_unprepare(clk);
out_err:
spi_controller_put(host);
return ret;
}
static void bcm63xx_spi_remove(struct platform_device *pdev)
{
struct spi_controller *host = platform_get_drvdata(pdev);
struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
/* reset spi block */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
/* HW shutdown */
clk_disable_unprepare(bs->clk);
}
static int bcm63xx_spi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
spi_controller_suspend(host);
clk_disable_unprepare(bs->clk);
return 0;
}
static int bcm63xx_spi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(bs->clk);
if (ret)
return ret;
spi_controller_resume(host);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(bcm63xx_spi_pm_ops, bcm63xx_spi_suspend, bcm63xx_spi_resume);
static struct platform_driver bcm63xx_spi_driver = {
.driver = {
.name = "bcm63xx-spi",
.pm = &bcm63xx_spi_pm_ops,
.of_match_table = bcm63xx_spi_of_match,
},
.id_table = bcm63xx_spi_dev_match,
.probe = bcm63xx_spi_probe,
.remove_new = bcm63xx_spi_remove,
};
module_platform_driver(bcm63xx_spi_driver);
MODULE_ALIAS("platform:bcm63xx_spi");
MODULE_AUTHOR("Florian Fainelli <[email protected]>");
MODULE_AUTHOR("Tanguy Bouzeloc <[email protected]>");
MODULE_DESCRIPTION("Broadcom BCM63xx SPI Controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-bcm63xx.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2018 Macronix International Co., Ltd.
//
// Authors:
// Mason Yang <[email protected]>
// zhengxunli <[email protected]>
// Boris Brezillon <[email protected]>
//
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-mxic.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define HC_CFG 0x0
#define HC_CFG_IF_CFG(x) ((x) << 27)
#define HC_CFG_DUAL_SLAVE BIT(31)
#define HC_CFG_INDIVIDUAL BIT(30)
#define HC_CFG_NIO(x) (((x) / 4) << 27)
#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
#define HC_CFG_TYPE_SPI_NOR 0
#define HC_CFG_TYPE_SPI_NAND 1
#define HC_CFG_TYPE_SPI_RAM 2
#define HC_CFG_TYPE_RAW_NAND 3
#define HC_CFG_SLV_ACT(x) ((x) << 21)
#define HC_CFG_CLK_PH_EN BIT(20)
#define HC_CFG_CLK_POL_INV BIT(19)
#define HC_CFG_BIG_ENDIAN BIT(18)
#define HC_CFG_DATA_PASS BIT(17)
#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
#define HC_CFG_MAN_START_EN BIT(3)
#define HC_CFG_MAN_START BIT(2)
#define HC_CFG_MAN_CS_EN BIT(1)
#define HC_CFG_MAN_CS_ASSERT BIT(0)
#define INT_STS 0x4
#define INT_STS_EN 0x8
#define INT_SIG_EN 0xc
#define INT_STS_ALL GENMASK(31, 0)
#define INT_RDY_PIN BIT(26)
#define INT_RDY_SR BIT(25)
#define INT_LNR_SUSP BIT(24)
#define INT_ECC_ERR BIT(17)
#define INT_CRC_ERR BIT(16)
#define INT_LWR_DIS BIT(12)
#define INT_LRD_DIS BIT(11)
#define INT_SDMA_INT BIT(10)
#define INT_DMA_FINISH BIT(9)
#define INT_RX_NOT_FULL BIT(3)
#define INT_RX_NOT_EMPTY BIT(2)
#define INT_TX_NOT_FULL BIT(1)
#define INT_TX_EMPTY BIT(0)
#define HC_EN 0x10
#define HC_EN_BIT BIT(0)
#define TXD(x) (0x14 + ((x) * 4))
#define RXD 0x24
#define SS_CTRL(s) (0x30 + ((s) * 4))
#define LRD_CFG 0x44
#define LWR_CFG 0x80
#define RWW_CFG 0x70
#define OP_READ BIT(23)
#define OP_DUMMY_CYC(x) ((x) << 17)
#define OP_ADDR_BYTES(x) ((x) << 14)
#define OP_CMD_BYTES(x) (((x) - 1) << 13)
#define OP_OCTA_CRC_EN BIT(12)
#define OP_DQS_EN BIT(11)
#define OP_ENHC_EN BIT(10)
#define OP_PREAMBLE_EN BIT(9)
#define OP_DATA_DDR BIT(8)
#define OP_DATA_BUSW(x) ((x) << 6)
#define OP_ADDR_DDR BIT(5)
#define OP_ADDR_BUSW(x) ((x) << 3)
#define OP_CMD_DDR BIT(2)
#define OP_CMD_BUSW(x) (x)
#define OP_BUSW_1 0
#define OP_BUSW_2 1
#define OP_BUSW_4 2
#define OP_BUSW_8 3
#define OCTA_CRC 0x38
#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
#define ONFI_DIN_CNT(s) (0x3c + (s))
#define LRD_CTRL 0x48
#define RWW_CTRL 0x74
#define LWR_CTRL 0x84
#define LMODE_EN BIT(31)
#define LMODE_SLV_ACT(x) ((x) << 21)
#define LMODE_CMD1(x) ((x) << 8)
#define LMODE_CMD0(x) (x)
#define LRD_ADDR 0x4c
#define LWR_ADDR 0x88
#define LRD_RANGE 0x50
#define LWR_RANGE 0x8c
#define AXI_SLV_ADDR 0x54
#define DMAC_RD_CFG 0x58
#define DMAC_WR_CFG 0x94
#define DMAC_CFG_PERIPH_EN BIT(31)
#define DMAC_CFG_ALLFLUSH_EN BIT(30)
#define DMAC_CFG_LASTFLUSH_EN BIT(29)
#define DMAC_CFG_QE(x) (((x) + 1) << 16)
#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
#define DMAC_CFG_DIR_READ BIT(1)
#define DMAC_CFG_START BIT(0)
#define DMAC_RD_CNT 0x5c
#define DMAC_WR_CNT 0x98
#define SDMA_ADDR 0x60
#define DMAM_CFG 0x64
#define DMAM_CFG_START BIT(31)
#define DMAM_CFG_CONT BIT(30)
#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
#define DMAM_CFG_DIR_READ BIT(1)
#define DMAM_CFG_EN BIT(0)
#define DMAM_CNT 0x68
#define LNR_TIMER_TH 0x6c
#define RDM_CFG0 0x78
#define RDM_CFG0_POLY(x) (x)
#define RDM_CFG1 0x7c
#define RDM_CFG1_RDM_EN BIT(31)
#define RDM_CFG1_SEED(x) (x)
#define LWR_SUSP_CTRL 0x90
#define LWR_SUSP_CTRL_EN BIT(31)
#define DMAS_CTRL 0x9c
#define DMAS_CTRL_EN BIT(31)
#define DMAS_CTRL_DIR_READ BIT(30)
#define DATA_STROB 0xa0
#define DATA_STROB_EDO_EN BIT(2)
#define DATA_STROB_INV_POL BIT(1)
#define DATA_STROB_DELAY_2CYC BIT(0)
#define IDLY_CODE(x) (0xa4 + ((x) * 4))
#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
#define GPIO 0xc4
#define GPIO_PT(x) BIT(3 + ((x) * 16))
#define GPIO_RESET(x) BIT(2 + ((x) * 16))
#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
#define GPIO_WPB(x) BIT((x) * 16)
#define HC_VER 0xd0
#define HW_TEST(x) (0xe0 + ((x) * 4))
struct mxic_spi {
struct device *dev;
struct clk *ps_clk;
struct clk *send_clk;
struct clk *send_dly_clk;
void __iomem *regs;
u32 cur_speed_hz;
struct {
void __iomem *map;
dma_addr_t dma;
size_t size;
} linear;
struct {
bool use_pipelined_conf;
struct nand_ecc_engine *pipelined_engine;
void *ctx;
} ecc;
};
static int mxic_spi_clk_enable(struct mxic_spi *mxic)
{
int ret;
ret = clk_prepare_enable(mxic->send_clk);
if (ret)
return ret;
ret = clk_prepare_enable(mxic->send_dly_clk);
if (ret)
goto err_send_dly_clk;
return ret;
err_send_dly_clk:
clk_disable_unprepare(mxic->send_clk);
return ret;
}
static void mxic_spi_clk_disable(struct mxic_spi *mxic)
{
clk_disable_unprepare(mxic->send_clk);
clk_disable_unprepare(mxic->send_dly_clk);
}
static void mxic_spi_set_input_delay_dqs(struct mxic_spi *mxic, u8 idly_code)
{
writel(IDLY_CODE_VAL(0, idly_code) |
IDLY_CODE_VAL(1, idly_code) |
IDLY_CODE_VAL(2, idly_code) |
IDLY_CODE_VAL(3, idly_code),
mxic->regs + IDLY_CODE(0));
writel(IDLY_CODE_VAL(4, idly_code) |
IDLY_CODE_VAL(5, idly_code) |
IDLY_CODE_VAL(6, idly_code) |
IDLY_CODE_VAL(7, idly_code),
mxic->regs + IDLY_CODE(1));
}
static int mxic_spi_clk_setup(struct mxic_spi *mxic, unsigned long freq)
{
int ret;
ret = clk_set_rate(mxic->send_clk, freq);
if (ret)
return ret;
ret = clk_set_rate(mxic->send_dly_clk, freq);
if (ret)
return ret;
/*
* A constant delay range from 0x0 ~ 0x1F for input delay,
* the unit is 78 ps, the max input delay is 2.418 ns.
*/
mxic_spi_set_input_delay_dqs(mxic, 0xf);
/*
* Phase degree = 360 * freq * output-delay
* where output-delay is a constant value 1 ns in FPGA.
*
* Get Phase degree = 360 * freq * 1 ns
* = 360 * freq * 1 sec / 1000000000
* = 9 * freq / 25000000
*/
ret = clk_set_phase(mxic->send_dly_clk, 9 * freq / 25000000);
if (ret)
return ret;
return 0;
}
static int mxic_spi_set_freq(struct mxic_spi *mxic, unsigned long freq)
{
int ret;
if (mxic->cur_speed_hz == freq)
return 0;
mxic_spi_clk_disable(mxic);
ret = mxic_spi_clk_setup(mxic, freq);
if (ret)
return ret;
ret = mxic_spi_clk_enable(mxic);
if (ret)
return ret;
mxic->cur_speed_hz = freq;
return 0;
}
static void mxic_spi_hw_init(struct mxic_spi *mxic)
{
writel(0, mxic->regs + DATA_STROB);
writel(INT_STS_ALL, mxic->regs + INT_STS_EN);
writel(0, mxic->regs + HC_EN);
writel(0, mxic->regs + LRD_CFG);
writel(0, mxic->regs + LRD_CTRL);
writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NOR) |
HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
mxic->regs + HC_CFG);
}
static u32 mxic_spi_prep_hc_cfg(struct spi_device *spi, u32 flags)
{
int nio = 1;
if (spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
nio = 8;
else if (spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
nio = 4;
else if (spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
nio = 2;
return flags | HC_CFG_NIO(nio) |
HC_CFG_TYPE(spi_get_chipselect(spi, 0), HC_CFG_TYPE_SPI_NOR) |
HC_CFG_SLV_ACT(spi_get_chipselect(spi, 0)) | HC_CFG_IDLE_SIO_LVL(1);
}
static u32 mxic_spi_mem_prep_op_cfg(const struct spi_mem_op *op,
unsigned int data_len)
{
u32 cfg = OP_CMD_BYTES(op->cmd.nbytes) |
OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
(op->cmd.dtr ? OP_CMD_DDR : 0);
if (op->addr.nbytes)
cfg |= OP_ADDR_BYTES(op->addr.nbytes) |
OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
(op->addr.dtr ? OP_ADDR_DDR : 0);
if (op->dummy.nbytes)
cfg |= OP_DUMMY_CYC(op->dummy.nbytes);
/* Direct mapping data.nbytes field is not populated */
if (data_len) {
cfg |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
(op->data.dtr ? OP_DATA_DDR : 0);
if (op->data.dir == SPI_MEM_DATA_IN) {
cfg |= OP_READ;
if (op->data.dtr)
cfg |= OP_DQS_EN;
}
}
return cfg;
}
static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
void *rxbuf, unsigned int len)
{
unsigned int pos = 0;
while (pos < len) {
unsigned int nbytes = len - pos;
u32 data = 0xffffffff;
u32 sts;
int ret;
if (nbytes > 4)
nbytes = 4;
if (txbuf)
memcpy(&data, txbuf + pos, nbytes);
ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
if (ret)
return ret;
writel(data, mxic->regs + TXD(nbytes % 4));
ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
if (ret)
return ret;
ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
sts & INT_RX_NOT_EMPTY, 0,
USEC_PER_SEC);
if (ret)
return ret;
data = readl(mxic->regs + RXD);
if (rxbuf) {
data >>= (8 * (4 - nbytes));
memcpy(rxbuf + pos, &data, nbytes);
}
WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
pos += nbytes;
}
return 0;
}
static ssize_t mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct mxic_spi *mxic = spi_master_get_devdata(desc->mem->spi->master);
int ret;
u32 sts;
if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
return -EINVAL;
writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG);
writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
mxic->regs + LRD_CFG);
writel(desc->info.offset + offs, mxic->regs + LRD_ADDR);
len = min_t(size_t, len, mxic->linear.size);
writel(len, mxic->regs + LRD_RANGE);
writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
LMODE_SLV_ACT(spi_get_chipselect(desc->mem->spi, 0)) |
LMODE_EN,
mxic->regs + LRD_CTRL);
if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
NAND_PAGE_READ,
mxic->linear.dma + offs);
if (ret)
return ret;
} else {
memcpy_fromio(buf, mxic->linear.map, len);
}
writel(INT_LRD_DIS, mxic->regs + INT_STS);
writel(0, mxic->regs + LRD_CTRL);
ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
sts & INT_LRD_DIS, 0, USEC_PER_SEC);
if (ret)
return ret;
return len;
}
static ssize_t mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len,
const void *buf)
{
struct mxic_spi *mxic = spi_master_get_devdata(desc->mem->spi->master);
u32 sts;
int ret;
if (WARN_ON(offs + desc->info.offset + len > U32_MAX))
return -EINVAL;
writel(mxic_spi_prep_hc_cfg(desc->mem->spi, 0), mxic->regs + HC_CFG);
writel(mxic_spi_mem_prep_op_cfg(&desc->info.op_tmpl, len),
mxic->regs + LWR_CFG);
writel(desc->info.offset + offs, mxic->regs + LWR_ADDR);
len = min_t(size_t, len, mxic->linear.size);
writel(len, mxic->regs + LWR_RANGE);
writel(LMODE_CMD0(desc->info.op_tmpl.cmd.opcode) |
LMODE_SLV_ACT(spi_get_chipselect(desc->mem->spi, 0)) |
LMODE_EN,
mxic->regs + LWR_CTRL);
if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) {
ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine,
NAND_PAGE_WRITE,
mxic->linear.dma + offs);
if (ret)
return ret;
} else {
memcpy_toio(mxic->linear.map, buf, len);
}
writel(INT_LWR_DIS, mxic->regs + INT_STS);
writel(0, mxic->regs + LWR_CTRL);
ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
sts & INT_LWR_DIS, 0, USEC_PER_SEC);
if (ret)
return ret;
return len;
}
static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
return false;
if (op->data.nbytes && op->dummy.nbytes &&
op->data.buswidth != op->dummy.buswidth)
return false;
if (op->addr.nbytes > 7)
return false;
return spi_mem_default_supports_op(mem, op);
}
static int mxic_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct mxic_spi *mxic = spi_master_get_devdata(desc->mem->spi->master);
if (!mxic->linear.map)
return -EINVAL;
if (desc->info.offset + desc->info.length > U32_MAX)
return -EINVAL;
if (!mxic_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
return 0;
}
static int mxic_spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
int i, ret;
u8 addr[8], cmd[2];
ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
if (ret)
return ret;
writel(mxic_spi_prep_hc_cfg(mem->spi, HC_CFG_MAN_CS_EN),
mxic->regs + HC_CFG);
writel(HC_EN_BIT, mxic->regs + HC_EN);
writel(mxic_spi_mem_prep_op_cfg(op, op->data.nbytes),
mxic->regs + SS_CTRL(spi_get_chipselect(mem->spi, 0)));
writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
mxic->regs + HC_CFG);
for (i = 0; i < op->cmd.nbytes; i++)
cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
if (ret)
goto out;
for (i = 0; i < op->addr.nbytes; i++)
addr[i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
ret = mxic_spi_data_xfer(mxic, addr, NULL, op->addr.nbytes);
if (ret)
goto out;
ret = mxic_spi_data_xfer(mxic, NULL, NULL, op->dummy.nbytes);
if (ret)
goto out;
ret = mxic_spi_data_xfer(mxic,
op->data.dir == SPI_MEM_DATA_OUT ?
op->data.buf.out : NULL,
op->data.dir == SPI_MEM_DATA_IN ?
op->data.buf.in : NULL,
op->data.nbytes);
out:
writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
mxic->regs + HC_CFG);
writel(0, mxic->regs + HC_EN);
return ret;
}
static const struct spi_controller_mem_ops mxic_spi_mem_ops = {
.supports_op = mxic_spi_mem_supports_op,
.exec_op = mxic_spi_mem_exec_op,
.dirmap_create = mxic_spi_mem_dirmap_create,
.dirmap_read = mxic_spi_mem_dirmap_read,
.dirmap_write = mxic_spi_mem_dirmap_write,
};
static const struct spi_controller_mem_caps mxic_spi_mem_caps = {
.dtr = true,
.ecc = true,
};
static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
{
struct mxic_spi *mxic = spi_master_get_devdata(spi->master);
if (!lvl) {
writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
mxic->regs + HC_CFG);
writel(HC_EN_BIT, mxic->regs + HC_EN);
writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
mxic->regs + HC_CFG);
} else {
writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
mxic->regs + HC_CFG);
writel(0, mxic->regs + HC_EN);
}
}
static int mxic_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
{
struct mxic_spi *mxic = spi_master_get_devdata(master);
unsigned int busw = OP_BUSW_1;
int ret;
if (t->rx_buf && t->tx_buf) {
if (((spi->mode & SPI_TX_QUAD) &&
!(spi->mode & SPI_RX_QUAD)) ||
((spi->mode & SPI_TX_DUAL) &&
!(spi->mode & SPI_RX_DUAL)))
return -ENOTSUPP;
}
ret = mxic_spi_set_freq(mxic, t->speed_hz);
if (ret)
return ret;
if (t->tx_buf) {
if (spi->mode & SPI_TX_QUAD)
busw = OP_BUSW_4;
else if (spi->mode & SPI_TX_DUAL)
busw = OP_BUSW_2;
} else if (t->rx_buf) {
if (spi->mode & SPI_RX_QUAD)
busw = OP_BUSW_4;
else if (spi->mode & SPI_RX_DUAL)
busw = OP_BUSW_2;
}
writel(OP_CMD_BYTES(1) | OP_CMD_BUSW(busw) |
OP_DATA_BUSW(busw) | (t->rx_buf ? OP_READ : 0),
mxic->regs + SS_CTRL(0));
ret = mxic_spi_data_xfer(mxic, t->tx_buf, t->rx_buf, t->len);
if (ret)
return ret;
spi_finalize_current_transfer(master);
return 0;
}
/* ECC wrapper */
static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand)
{
struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
struct mxic_spi *mxic = nand->ecc.engine->priv;
mxic->ecc.use_pipelined_conf = true;
return ops->init_ctx(nand);
}
static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand)
{
struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
struct mxic_spi *mxic = nand->ecc.engine->priv;
mxic->ecc.use_pipelined_conf = false;
ops->cleanup_ctx(nand);
}
static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
return ops->prepare_io_req(nand, req);
}
static int mxic_spi_mem_ecc_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops();
return ops->finish_io_req(nand, req);
}
static struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = {
.init_ctx = mxic_spi_mem_ecc_init_ctx,
.cleanup_ctx = mxic_spi_mem_ecc_cleanup_ctx,
.prepare_io_req = mxic_spi_mem_ecc_prepare_io_req,
.finish_io_req = mxic_spi_mem_ecc_finish_io_req,
};
static void mxic_spi_mem_ecc_remove(struct mxic_spi *mxic)
{
if (mxic->ecc.pipelined_engine) {
mxic_ecc_put_pipelined_engine(mxic->ecc.pipelined_engine);
nand_ecc_unregister_on_host_hw_engine(mxic->ecc.pipelined_engine);
}
}
static int mxic_spi_mem_ecc_probe(struct platform_device *pdev,
struct mxic_spi *mxic)
{
struct nand_ecc_engine *eng;
if (!mxic_ecc_get_pipelined_ops())
return -EOPNOTSUPP;
eng = mxic_ecc_get_pipelined_engine(pdev);
if (IS_ERR(eng))
return PTR_ERR(eng);
eng->dev = &pdev->dev;
eng->integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
eng->ops = &mxic_spi_mem_ecc_engine_pipelined_ops;
eng->priv = mxic;
mxic->ecc.pipelined_engine = eng;
nand_ecc_register_on_host_hw_engine(eng);
return 0;
}
static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
mxic_spi_clk_disable(mxic);
clk_disable_unprepare(mxic->ps_clk);
return 0;
}
static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(mxic->ps_clk);
if (ret) {
dev_err(dev, "Cannot enable ps_clock.\n");
return ret;
}
return mxic_spi_clk_enable(mxic);
}
static const struct dev_pm_ops mxic_spi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(mxic_spi_runtime_suspend,
mxic_spi_runtime_resume, NULL)
};
static int mxic_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct resource *res;
struct mxic_spi *mxic;
int ret;
master = devm_spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
mxic = spi_master_get_devdata(master);
mxic->dev = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk");
if (IS_ERR(mxic->ps_clk))
return PTR_ERR(mxic->ps_clk);
mxic->send_clk = devm_clk_get(&pdev->dev, "send_clk");
if (IS_ERR(mxic->send_clk))
return PTR_ERR(mxic->send_clk);
mxic->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly_clk");
if (IS_ERR(mxic->send_dly_clk))
return PTR_ERR(mxic->send_dly_clk);
mxic->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(mxic->regs))
return PTR_ERR(mxic->regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
mxic->linear.map = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(mxic->linear.map)) {
mxic->linear.dma = res->start;
mxic->linear.size = resource_size(res);
} else {
mxic->linear.map = NULL;
}
pm_runtime_enable(&pdev->dev);
master->auto_runtime_pm = true;
master->num_chipselect = 1;
master->mem_ops = &mxic_spi_mem_ops;
master->mem_caps = &mxic_spi_mem_caps;
master->set_cs = mxic_spi_set_cs;
master->transfer_one = mxic_spi_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA |
SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD |
SPI_RX_OCTAL | SPI_TX_OCTAL;
mxic_spi_hw_init(mxic);
ret = mxic_spi_mem_ecc_probe(pdev, mxic);
if (ret == -EPROBE_DEFER) {
pm_runtime_disable(&pdev->dev);
return ret;
}
ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
pm_runtime_disable(&pdev->dev);
mxic_spi_mem_ecc_remove(mxic);
}
return ret;
}
static void mxic_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mxic_spi *mxic = spi_master_get_devdata(master);
pm_runtime_disable(&pdev->dev);
mxic_spi_mem_ecc_remove(mxic);
spi_unregister_master(master);
}
static const struct of_device_id mxic_spi_of_ids[] = {
{ .compatible = "mxicy,mx25f0a-spi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxic_spi_of_ids);
static struct platform_driver mxic_spi_driver = {
.probe = mxic_spi_probe,
.remove_new = mxic_spi_remove,
.driver = {
.name = "mxic-spi",
.of_match_table = mxic_spi_of_ids,
.pm = &mxic_spi_dev_pm_ops,
},
};
module_platform_driver(mxic_spi_driver);
MODULE_AUTHOR("Mason Yang <[email protected]>");
MODULE_DESCRIPTION("MX25F0A SPI controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/spi/spi-mxic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPC512x PSC in SPI mode driver.
*
* Copyright (C) 2007,2008 Freescale Semiconductor Inc.
* Original port from 52xx driver:
* Hongjun Chen <[email protected]>
*
* Fork of mpc52xx_psc_spi.c:
* Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <asm/mpc52xx_psc.h>
enum {
TYPE_MPC5121,
TYPE_MPC5125,
};
/*
* This macro abstracts the differences in the PSC register layout between
* MPC5121 (which uses a struct mpc52xx_psc) and MPC5125 (using mpc5125_psc).
*/
#define psc_addr(mps, regname) ({ \
void *__ret = NULL; \
switch (mps->type) { \
case TYPE_MPC5121: { \
struct mpc52xx_psc __iomem *psc = mps->psc; \
__ret = &psc->regname; \
}; \
break; \
case TYPE_MPC5125: { \
struct mpc5125_psc __iomem *psc = mps->psc; \
__ret = &psc->regname; \
}; \
break; \
} \
__ret; })
struct mpc512x_psc_spi {
/* driver internal data */
int type;
void __iomem *psc;
struct mpc512x_psc_fifo __iomem *fifo;
int irq;
u8 bits_per_word;
u32 mclk_rate;
struct completion txisrdone;
};
/* controller state */
struct mpc512x_psc_spi_cs {
int bits_per_word;
int speed_hz;
};
/* set clock freq, clock ramp, bits per work
* if t is NULL then reset the values to the default values
*/
static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
cs->speed_hz = (t && t->speed_hz)
? t->speed_hz : spi->max_speed_hz;
cs->bits_per_word = (t && t->bits_per_word)
? t->bits_per_word : spi->bits_per_word;
cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
return 0;
}
static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
{
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
u32 sicr;
u32 ccr;
int speed;
u16 bclkdiv;
sicr = in_be32(psc_addr(mps, sicr));
/* Set clock phase and polarity */
if (spi->mode & SPI_CPHA)
sicr |= 0x00001000;
else
sicr &= ~0x00001000;
if (spi->mode & SPI_CPOL)
sicr |= 0x00002000;
else
sicr &= ~0x00002000;
if (spi->mode & SPI_LSB_FIRST)
sicr |= 0x10000000;
else
sicr &= ~0x10000000;
out_be32(psc_addr(mps, sicr), sicr);
ccr = in_be32(psc_addr(mps, ccr));
ccr &= 0xFF000000;
speed = cs->speed_hz;
if (!speed)
speed = 1000000; /* default 1MHz */
bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
out_be32(psc_addr(mps, ccr), ccr);
mps->bits_per_word = cs->bits_per_word;
if (spi_get_csgpiod(spi, 0)) {
/* gpiolib will deal with the inversion */
gpiod_set_value(spi_get_csgpiod(spi, 0), 1);
}
}
static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
{
if (spi_get_csgpiod(spi, 0)) {
/* gpiolib will deal with the inversion */
gpiod_set_value(spi_get_csgpiod(spi, 0), 0);
}
}
/* extract and scale size field in txsz or rxsz */
#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2);
#define EOFBYTE 1
static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
size_t tx_len = t->len;
size_t rx_len = t->len;
u8 *tx_buf = (u8 *)t->tx_buf;
u8 *rx_buf = (u8 *)t->rx_buf;
if (!tx_buf && !rx_buf && t->len)
return -EINVAL;
while (rx_len || tx_len) {
size_t txcount;
u8 data;
size_t fifosz;
size_t rxcount;
int rxtries;
/*
* send the TX bytes in as large a chunk as possible
* but neither exceed the TX nor the RX FIFOs
*/
fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz));
txcount = min(fifosz, tx_len);
fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->rxsz));
fifosz -= in_be32(&fifo->rxcnt) + 1;
txcount = min(fifosz, txcount);
if (txcount) {
/* fill the TX FIFO */
while (txcount-- > 0) {
data = tx_buf ? *tx_buf++ : 0;
if (tx_len == EOFBYTE && t->cs_change)
setbits32(&fifo->txcmd,
MPC512x_PSC_FIFO_EOF);
out_8(&fifo->txdata_8, data);
tx_len--;
}
/* have the ISR trigger when the TX FIFO is empty */
reinit_completion(&mps->txisrdone);
out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
wait_for_completion(&mps->txisrdone);
}
/*
* consume as much RX data as the FIFO holds, while we
* iterate over the transfer's TX data length
*
* only insist in draining all the remaining RX bytes
* when the TX bytes were exhausted (that's at the very
* end of this transfer, not when still iterating over
* the transfer's chunks)
*/
rxtries = 50;
do {
/*
* grab whatever was in the FIFO when we started
* looking, don't bother fetching what was added to
* the FIFO while we read from it -- we'll return
* here eventually and prefer sending out remaining
* TX data
*/
fifosz = in_be32(&fifo->rxcnt);
rxcount = min(fifosz, rx_len);
while (rxcount-- > 0) {
data = in_8(&fifo->rxdata_8);
if (rx_buf)
*rx_buf++ = data;
rx_len--;
}
/*
* come back later if there still is TX data to send,
* bail out of the RX drain loop if all of the TX data
* was sent and all of the RX data was received (i.e.
* when the transmission has completed)
*/
if (tx_len)
break;
if (!rx_len)
break;
/*
* TX data transmission has completed while RX data
* is still pending -- that's a transient situation
* which depends on wire speed and specific
* hardware implementation details (buffering) yet
* should resolve very quickly
*
* just yield for a moment to not hog the CPU for
* too long when running SPI at low speed
*
* the timeout range is rather arbitrary and tries
* to balance throughput against system load; the
* chosen values result in a minimal timeout of 50
* times 10us and thus work at speeds as low as
* some 20kbps, while the maximum timeout at the
* transfer's end could be 5ms _if_ nothing else
* ticks in the system _and_ RX data still wasn't
* received, which only occurs in situations that
* are exceptional; removing the unpredictability
* of the timeout either decreases throughput
* (longer timeouts), or puts more load on the
* system (fixed short timeouts) or requires the
* use of a timeout API instead of a counter and an
* unknown inner delay
*/
usleep_range(10, 100);
} while (--rxtries > 0);
if (!tx_len && rx_len && !rxtries) {
/*
* not enough RX bytes even after several retries
* and the resulting rather long timeout?
*/
rxcount = in_be32(&fifo->rxcnt);
dev_warn(&spi->dev,
"short xfer, missing %zd RX bytes, FIFO level %zd\n",
rx_len, rxcount);
}
/*
* drain and drop RX data which "should not be there" in
* the first place, for undisturbed transmission this turns
* into a NOP (except for the FIFO level fetch)
*/
if (!tx_len && !rx_len) {
while (in_be32(&fifo->rxcnt))
in_8(&fifo->rxdata_8);
}
}
return 0;
}
static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
struct spi_message *m)
{
struct spi_device *spi;
unsigned cs_change;
int status;
struct spi_transfer *t;
spi = m->spi;
cs_change = 1;
status = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
status = mpc512x_psc_spi_transfer_setup(spi, t);
if (status < 0)
break;
if (cs_change)
mpc512x_psc_spi_activate_cs(spi);
cs_change = t->cs_change;
status = mpc512x_psc_spi_transfer_rxtx(spi, t);
if (status)
break;
m->actual_length += t->len;
spi_transfer_delay_exec(t);
if (cs_change)
mpc512x_psc_spi_deactivate_cs(spi);
}
m->status = status;
if (m->complete)
m->complete(m->context);
if (status || !cs_change)
mpc512x_psc_spi_deactivate_cs(spi);
mpc512x_psc_spi_transfer_setup(spi, NULL);
spi_finalize_current_message(master);
return status;
}
static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
dev_dbg(&master->dev, "%s()\n", __func__);
/* Zero MR2 */
in_8(psc_addr(mps, mr2));
out_8(psc_addr(mps, mr2), 0x0);
/* enable transmitter/receiver */
out_8(psc_addr(mps, command), MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
return 0;
}
static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
dev_dbg(&master->dev, "%s()\n", __func__);
/* disable transmitter/receiver and fifo interrupt */
out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
out_be32(&fifo->tximr, 0);
return 0;
}
static int mpc512x_psc_spi_setup(struct spi_device *spi)
{
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
if (spi->bits_per_word % 8)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
}
cs->bits_per_word = spi->bits_per_word;
cs->speed_hz = spi->max_speed_hz;
return 0;
}
static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
{
kfree(spi->controller_state);
}
static int mpc512x_psc_spi_port_config(struct spi_master *master,
struct mpc512x_psc_spi *mps)
{
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
u32 sicr;
u32 ccr;
int speed;
u16 bclkdiv;
/* Reset the PSC into a known state */
out_8(psc_addr(mps, command), MPC52xx_PSC_RST_RX);
out_8(psc_addr(mps, command), MPC52xx_PSC_RST_TX);
out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
/* Disable psc interrupts all useful interrupts are in fifo */
out_be16(psc_addr(mps, isr_imr.imr), 0);
/* Disable fifo interrupts, will be enabled later */
out_be32(&fifo->tximr, 0);
out_be32(&fifo->rximr, 0);
/* Setup fifo slice address and size */
/*out_be32(&fifo->txsz, 0x0fe00004);*/
/*out_be32(&fifo->rxsz, 0x0ff00004);*/
sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */
0x00800000 | /* GenClk = 1 -- internal clk */
0x00008000 | /* SPI = 1 */
0x00004000 | /* MSTR = 1 -- SPI master */
0x00000800; /* UseEOF = 1 -- SS low until EOF */
out_be32(psc_addr(mps, sicr), sicr);
ccr = in_be32(psc_addr(mps, ccr));
ccr &= 0xFF000000;
speed = 1000000; /* default 1MHz */
bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
out_be32(psc_addr(mps, ccr), ccr);
/* Set 2ms DTL delay */
out_8(psc_addr(mps, ctur), 0x00);
out_8(psc_addr(mps, ctlr), 0x82);
/* we don't use the alarms */
out_be32(&fifo->rxalarm, 0xfff);
out_be32(&fifo->txalarm, 0);
/* Enable FIFO slices for Rx/Tx */
out_be32(&fifo->rxcmd,
MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
out_be32(&fifo->txcmd,
MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
mps->bits_per_word = 8;
return 0;
}
static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
{
struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
/* clear interrupt and wake up the rx/tx routine */
if (in_be32(&fifo->txisr) &
in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) {
out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
out_be32(&fifo->tximr, 0);
complete(&mps->txisrdone);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mpc512x_psc_spi_of_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mpc512x_psc_spi *mps;
struct spi_master *master;
int ret;
void *tempp;
struct clk *clk;
master = devm_spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;
dev_set_drvdata(dev, master);
mps = spi_master_get_devdata(master);
mps->type = (int)device_get_match_data(dev);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
master->setup = mpc512x_psc_spi_setup;
master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw;
master->transfer_one_message = mpc512x_psc_spi_msg_xfer;
master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw;
master->use_gpio_descriptors = true;
master->cleanup = mpc512x_psc_spi_cleanup;
device_set_node(&master->dev, dev_fwnode(dev));
tempp = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(tempp))
return dev_err_probe(dev, PTR_ERR(tempp), "could not ioremap I/O port range\n");
mps->psc = tempp;
mps->fifo =
(struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
mps->irq = platform_get_irq(pdev, 0);
if (mps->irq < 0)
return mps->irq;
ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
"mpc512x-psc-spi", mps);
if (ret)
return ret;
init_completion(&mps->txisrdone);
clk = devm_clk_get_enabled(dev, "mclk");
if (IS_ERR(clk))
return PTR_ERR(clk);
mps->mclk_rate = clk_get_rate(clk);
clk = devm_clk_get_enabled(dev, "ipg");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = mpc512x_psc_spi_port_config(master, mps);
if (ret < 0)
return ret;
return devm_spi_register_master(dev, master);
}
static const struct of_device_id mpc512x_psc_spi_of_match[] = {
{ .compatible = "fsl,mpc5121-psc-spi", .data = (void *)TYPE_MPC5121 },
{ .compatible = "fsl,mpc5125-psc-spi", .data = (void *)TYPE_MPC5125 },
{},
};
MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
static struct platform_driver mpc512x_psc_spi_of_driver = {
.probe = mpc512x_psc_spi_of_probe,
.driver = {
.name = "mpc512x-psc-spi",
.of_match_table = mpc512x_psc_spi_of_match,
},
};
module_platform_driver(mpc512x_psc_spi_of_driver);
MODULE_AUTHOR("John Rigby");
MODULE_DESCRIPTION("MPC512x PSC SPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/spi/spi-mpc512x-psc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PCI glue driver for SPI PXA2xx compatible controllers.
* CE4100's SPI device is more or less the same one as found on PXA.
*
* Copyright (C) 2016, 2021 Intel Corporation
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-dw.h>
#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0935
#define PCI_DEVICE_ID_INTEL_BYT 0x0f0e
#define PCI_DEVICE_ID_INTEL_MRFLD 0x1194
#define PCI_DEVICE_ID_INTEL_BSW0 0x228e
#define PCI_DEVICE_ID_INTEL_BSW1 0x2290
#define PCI_DEVICE_ID_INTEL_BSW2 0x22ac
#define PCI_DEVICE_ID_INTEL_CE4100 0x2e6a
#define PCI_DEVICE_ID_INTEL_LPT0_0 0x9c65
#define PCI_DEVICE_ID_INTEL_LPT0_1 0x9c66
#define PCI_DEVICE_ID_INTEL_LPT1_0 0x9ce5
#define PCI_DEVICE_ID_INTEL_LPT1_1 0x9ce6
struct pxa_spi_info {
int (*setup)(struct pci_dev *pdev, struct pxa2xx_spi_controller *c);
};
static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
static struct dw_dma_slave byt_rx_param = { .src_id = 1 };
static struct dw_dma_slave mrfld3_tx_param = { .dst_id = 15 };
static struct dw_dma_slave mrfld3_rx_param = { .src_id = 14 };
static struct dw_dma_slave mrfld5_tx_param = { .dst_id = 13 };
static struct dw_dma_slave mrfld5_rx_param = { .src_id = 12 };
static struct dw_dma_slave mrfld6_tx_param = { .dst_id = 11 };
static struct dw_dma_slave mrfld6_rx_param = { .src_id = 10 };
static struct dw_dma_slave bsw0_tx_param = { .dst_id = 0 };
static struct dw_dma_slave bsw0_rx_param = { .src_id = 1 };
static struct dw_dma_slave bsw1_tx_param = { .dst_id = 6 };
static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
static void pxa2xx_spi_pci_clk_unregister(void *clk)
{
clk_unregister(clk);
}
static int pxa2xx_spi_pci_clk_register(struct pci_dev *dev, struct ssp_device *ssp,
unsigned long rate)
{
char buf[40];
snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0, rate);
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
return devm_add_action_or_reset(&dev->dev, pxa2xx_spi_pci_clk_unregister, ssp->clk);
}
static bool lpss_dma_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_slave *dws = param;
if (dws->dma_dev != chan->device->dev)
return false;
chan->private = dws;
return true;
}
static void lpss_dma_put_device(void *dma_dev)
{
pci_dev_put(dma_dev);
}
static int lpss_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
{
struct ssp_device *ssp = &c->ssp;
struct dw_dma_slave *tx, *rx;
struct pci_dev *dma_dev;
int ret;
switch (dev->device) {
case PCI_DEVICE_ID_INTEL_BYT:
ssp->type = LPSS_BYT_SSP;
ssp->port_id = 0;
c->tx_param = &byt_tx_param;
c->rx_param = &byt_rx_param;
break;
case PCI_DEVICE_ID_INTEL_BSW0:
ssp->type = LPSS_BSW_SSP;
ssp->port_id = 0;
c->tx_param = &bsw0_tx_param;
c->rx_param = &bsw0_rx_param;
break;
case PCI_DEVICE_ID_INTEL_BSW1:
ssp->type = LPSS_BSW_SSP;
ssp->port_id = 1;
c->tx_param = &bsw1_tx_param;
c->rx_param = &bsw1_rx_param;
break;
case PCI_DEVICE_ID_INTEL_BSW2:
ssp->type = LPSS_BSW_SSP;
ssp->port_id = 2;
c->tx_param = &bsw2_tx_param;
c->rx_param = &bsw2_rx_param;
break;
case PCI_DEVICE_ID_INTEL_LPT0_0:
case PCI_DEVICE_ID_INTEL_LPT1_0:
ssp->type = LPSS_LPT_SSP;
ssp->port_id = 0;
c->tx_param = &lpt0_tx_param;
c->rx_param = &lpt0_rx_param;
break;
case PCI_DEVICE_ID_INTEL_LPT0_1:
case PCI_DEVICE_ID_INTEL_LPT1_1:
ssp->type = LPSS_LPT_SSP;
ssp->port_id = 1;
c->tx_param = &lpt1_tx_param;
c->rx_param = &lpt1_rx_param;
break;
default:
return -ENODEV;
}
c->num_chipselect = 1;
ret = pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
if (ret)
return ret;
dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
if (ret)
return ret;
tx = c->tx_param;
tx->dma_dev = &dma_dev->dev;
tx->m_master = 0;
tx->p_master = 1;
rx = c->rx_param;
rx->dma_dev = &dma_dev->dev;
rx->m_master = 0;
rx->p_master = 1;
c->dma_filter = lpss_dma_filter;
c->dma_burst_size = 1;
c->enable_dma = 1;
return 0;
}
static const struct pxa_spi_info lpss_info_config = {
.setup = lpss_spi_setup,
};
static int ce4100_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
{
struct ssp_device *ssp = &c->ssp;
ssp->type = PXA25x_SSP;
ssp->port_id = dev->devfn;
c->num_chipselect = dev->devfn;
return pxa2xx_spi_pci_clk_register(dev, ssp, 3686400);
}
static const struct pxa_spi_info ce4100_info_config = {
.setup = ce4100_spi_setup,
};
static int mrfld_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
{
struct ssp_device *ssp = &c->ssp;
struct dw_dma_slave *tx, *rx;
struct pci_dev *dma_dev;
int ret;
ssp->type = MRFLD_SSP;
switch (PCI_FUNC(dev->devfn)) {
case 0:
ssp->port_id = 3;
c->num_chipselect = 1;
c->tx_param = &mrfld3_tx_param;
c->rx_param = &mrfld3_rx_param;
break;
case 1:
ssp->port_id = 5;
c->num_chipselect = 4;
c->tx_param = &mrfld5_tx_param;
c->rx_param = &mrfld5_rx_param;
break;
case 2:
ssp->port_id = 6;
c->num_chipselect = 1;
c->tx_param = &mrfld6_tx_param;
c->rx_param = &mrfld6_rx_param;
break;
default:
return -ENODEV;
}
ret = pxa2xx_spi_pci_clk_register(dev, ssp, 25000000);
if (ret)
return ret;
dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
if (ret)
return ret;
tx = c->tx_param;
tx->dma_dev = &dma_dev->dev;
rx = c->rx_param;
rx->dma_dev = &dma_dev->dev;
c->dma_filter = lpss_dma_filter;
c->dma_burst_size = 8;
c->enable_dma = 1;
return 0;
}
static const struct pxa_spi_info mrfld_info_config = {
.setup = mrfld_spi_setup,
};
static int qrk_spi_setup(struct pci_dev *dev, struct pxa2xx_spi_controller *c)
{
struct ssp_device *ssp = &c->ssp;
ssp->type = QUARK_X1000_SSP;
ssp->port_id = dev->devfn;
c->num_chipselect = 1;
return pxa2xx_spi_pci_clk_register(dev, ssp, 50000000);
}
static const struct pxa_spi_info qrk_info_config = {
.setup = qrk_spi_setup,
};
static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
const struct pxa_spi_info *info;
struct platform_device_info pi;
int ret;
struct platform_device *pdev;
struct pxa2xx_spi_controller spi_pdata;
struct ssp_device *ssp;
ret = pcim_enable_device(dev);
if (ret)
return ret;
ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
if (ret)
return ret;
memset(&spi_pdata, 0, sizeof(spi_pdata));
ssp = &spi_pdata.ssp;
ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
info = (struct pxa_spi_info *)ent->driver_data;
ret = info->setup(dev, &spi_pdata);
if (ret)
return ret;
pci_set_master(dev);
ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
ssp->irq = pci_irq_vector(dev, 0);
memset(&pi, 0, sizeof(pi));
pi.fwnode = dev_fwnode(&dev->dev);
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
pi.id = ssp->port_id;
pi.data = &spi_pdata;
pi.size_data = sizeof(spi_pdata);
pdev = platform_device_register_full(&pi);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
pci_set_drvdata(dev, pdev);
return 0;
}
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
platform_device_unregister(pdev);
}
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK_X1000, &qrk_info_config) },
{ PCI_DEVICE_DATA(INTEL, BYT, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, MRFLD, &mrfld_info_config) },
{ PCI_DEVICE_DATA(INTEL, BSW0, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, BSW1, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, BSW2, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, CE4100, &ce4100_info_config) },
{ PCI_DEVICE_DATA(INTEL, LPT0_0, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, LPT0_1, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, LPT1_0, &lpss_info_config) },
{ PCI_DEVICE_DATA(INTEL, LPT1_1, &lpss_info_config) },
{ }
};
MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
static struct pci_driver pxa2xx_spi_pci_driver = {
.name = "pxa2xx_spi_pci",
.id_table = pxa2xx_spi_pci_devices,
.probe = pxa2xx_spi_pci_probe,
.remove = pxa2xx_spi_pci_remove,
};
module_pci_driver(pxa2xx_spi_pci_driver);
MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
| linux-master | drivers/spi/spi-pxa2xx-pci.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.